{"package":"smoothr","topic":"densify","snippet":"### Name: densify\n### Title: Densify spatial lines or polygons\n### Aliases: densify\n\n### ** Examples\n\nlibrary(sf)\nl <- jagged_lines$geometry[[2]]\nl_dense <- densify(l, n = 2)\nplot(l, lwd = 5)\nplot(l_dense, col = \"red\", lwd = 2, lty = 2, add = TRUE)\nplot(l_dense %>% st_cast(\"MULTIPOINT\"), col = \"red\", pch = 19,\n add = TRUE)\n\n\n"} {"package":"smoothr","topic":"drop_crumbs","snippet":"### Name: drop_crumbs\n### Title: Remove small polygons or line segments\n### Aliases: drop_crumbs\n\n### ** Examples\n\n# remove polygons smaller than 200km2\np <- jagged_polygons$geometry[7]\narea_thresh <- units::set_units(200, km^2)\np_dropped <- drop_crumbs(p, threshold = area_thresh)\n# plot\npar(mar = c(0, 0, 1, 0), mfrow = c(1, 2))\nplot(p, col = \"black\", main = \"Original\")\nif (length(p_dropped) > 0) {\n plot(p_dropped, col = \"black\", main = \"After drop_crumbs()\")\n}\n\n\n# remove lines less than 25 miles\nl <- jagged_lines$geometry[8]\n# note that any units can be used\n# conversion to units of projection happens automatically\nlength_thresh <- units::set_units(25, miles)\nl_dropped <- drop_crumbs(l, threshold = length_thresh)\n# plot\npar(mar = c(0, 0, 1, 0), mfrow = c(1, 2))\nplot(l, lwd = 5, main = \"Original\")\nif (length(l_dropped)) {\n plot(l_dropped, lwd = 5, main = \"After drop_crumbs()\")\n}\n\n\n\n"} {"package":"smoothr","topic":"fill_holes","snippet":"### Name: fill_holes\n### Title: Fill small holes in polygons\n### Aliases: fill_holes\n\n### ** Examples\n\n# fill holes smaller than 1000km2\np <- jagged_polygons$geometry[5]\narea_thresh <- units::set_units(1000, km^2)\np_dropped <- fill_holes(p, threshold = area_thresh)\n# plot\npar(mar = c(0, 0, 1, 0), mfrow = c(1, 2))\nplot(p, col = \"black\", main = \"Original\")\nplot(p_dropped, col = \"black\", main = \"After fill_holes()\")\n\n\n"} {"package":"smoothr","topic":"smooth","snippet":"### Name: smooth\n### Title: Smooth a spatial feature\n### Aliases: smooth\n\n### ** Examples\n\nlibrary(sf)\n# compare different smoothing methods\n# polygons\npar(mar = c(0, 0, 0, 0), oma = c(4, 0, 0, 0), mfrow = c(3, 3))\np_smooth_chaikin <- smooth(jagged_polygons, method = \"chaikin\")\np_smooth_ksmooth <- smooth(jagged_polygons, method = \"ksmooth\")\np_smooth_spline <- smooth(jagged_polygons, method = \"spline\")\nfor (i in 1:nrow(jagged_polygons)) {\n plot(st_geometry(p_smooth_spline[i, ]), col = NA, border = NA)\n plot(st_geometry(jagged_polygons[i, ]), col = \"grey40\", border = NA, add = TRUE)\n plot(st_geometry(p_smooth_chaikin[i, ]), col = NA, border = \"#E41A1C\", lwd = 2, add = TRUE)\n plot(st_geometry(p_smooth_ksmooth[i, ]), col = NA, border = \"#4DAF4A\", lwd = 2, add = TRUE)\n plot(st_geometry(p_smooth_spline[i, ]), col = NA, border = \"#377EB8\", lwd = 2, add = TRUE)\n}\npar(fig = c(0, 1, 0, 1), oma = c(0, 0, 0, 0), new = TRUE)\nplot(0, 0, type = \"n\", bty = \"n\", xaxt = \"n\", yaxt = \"n\", axes = FALSE)\nlegend(\"bottom\", legend = c(\"chaikin\", \"ksmooth\", \"spline\"),\n col = c(\"#E41A1C\", \"#4DAF4A\", \"#377EB8\"),\n lwd = 2, cex = 2, box.lwd = 0, inset = 0, horiz = TRUE)\n\n# lines\npar(mar = c(0, 0, 0, 0), oma = c(4, 0, 0, 0), mfrow = c(3, 3))\nl_smooth_chaikin <- smooth(jagged_lines, method = \"chaikin\")\nl_smooth_ksmooth <- smooth(jagged_lines, method = \"ksmooth\")\nl_smooth_spline <- smooth(jagged_lines, method = \"spline\")\nfor (i in 1:nrow(jagged_lines)) {\n plot(st_geometry(l_smooth_spline[i, ]), col = NA)\n plot(st_geometry(jagged_lines[i, ]), col = \"grey20\", lwd = 3, add = TRUE)\n plot(st_geometry(l_smooth_chaikin[i, ]), col = \"#E41A1C\", lwd = 2, lty = 2, add = TRUE)\n plot(st_geometry(l_smooth_ksmooth[i, ]), col = \"#4DAF4A\", lwd = 2, lty = 2, add = TRUE)\n plot(st_geometry(l_smooth_spline[i, ]), col = \"#377EB8\", lwd = 2, lty = 2, add = TRUE)\n}\npar(fig = c(0, 1, 0, 1), oma = c(0, 0, 0, 0), new = TRUE)\nplot(0, 0, type = \"n\", bty = \"n\", xaxt = \"n\", yaxt = \"n\", axes = FALSE)\nlegend(\"bottom\", legend = c(\"chaikin\", \"smooth\", \"spline\"),\n col = c(\"#E41A1C\", \"#4DAF4A\", \"#377EB8\"),\n lwd = 2, cex = 2, box.lwd = 0, inset = 0, horiz = TRUE)\n\n\n"} {"package":"smoothr","topic":"smooth_chaikin","snippet":"### Name: smooth_chaikin\n### Title: Chaikin's corner cutting algorithm\n### Aliases: smooth_chaikin\n\n### ** Examples\n\n# smooth_chaikin works on matrices of coordinates\n# use the matrix of coordinates defining a polygon as an example\nm <- jagged_polygons$geometry[[2]][[1]]\nm_smooth <- smooth_chaikin(m, wrap = TRUE)\nclass(m)\nclass(m_smooth)\nplot(m, type = \"l\", axes = FALSE, xlab = NA, ylab = NA)\nlines(m_smooth, col = \"red\")\n\n# smooth is a wrapper for smooth_chaikin that works on spatial features\nlibrary(sf)\np <- jagged_polygons$geometry[[2]]\np_smooth <- smooth(p, method = \"chaikin\")\nclass(p)\nclass(p_smooth)\nplot(p)\nplot(p_smooth, border = \"red\", add = TRUE)\n\n\n"} {"package":"smoothr","topic":"smooth_densify","snippet":"### Name: smooth_densify\n### Title: Densify lines or polygons\n### Aliases: smooth_densify\n\n### ** Examples\n\n# smooth_densify works on matrices of coordinates\n# use the matrix of coordinates defining a line as an example\nm <- jagged_lines$geometry[[2]][]\nm_dense <- smooth_densify(m, n = 5)\nclass(m)\nclass(m_dense)\nplot(m, type = \"b\", pch = 19, cex = 1.5, axes = FALSE, xlab = NA, ylab = NA)\npoints(m_dense, col = \"red\", pch = 19, cex = 0.5)\n\n# max_distance can be used to ensure vertices are at most a given dist apart\nm_md <- smooth_densify(m, max_distance = 0.05)\nplot(m, type = \"b\", pch = 19, cex = 1.5, axes = FALSE, xlab = NA, ylab = NA)\npoints(m_md, col = \"red\", pch = 19, cex = 0.5)\n\n# smooth is a wrapper for smooth_densify that works on spatial features\nlibrary(sf)\nl <- jagged_lines$geometry[[2]]\nl_dense <- smooth(l, method = \"densify\", n = 2)\nclass(l)\nclass(l_dense)\nplot(l, lwd = 5)\nplot(l_dense, col = \"red\", lwd = 2, lty = 2, add = TRUE)\nplot(l_dense %>% st_cast(\"MULTIPOINT\"), col = \"red\", pch = 19,\n add = TRUE)\n\n\n"} {"package":"smoothr","topic":"smooth_ksmooth","snippet":"### Name: smooth_ksmooth\n### Title: Kernel smooth\n### Aliases: smooth_ksmooth\n\n### ** Examples\n\n# smooth_ksmooth works on matrices of coordinates\n# use the matrix of coordinates defining a polygon as an example\nm <- jagged_polygons$geometry[[2]][[1]]\nm_smooth <- smooth_ksmooth(m, wrap = TRUE)\nclass(m)\nclass(m_smooth)\nplot(m, type = \"l\", col = \"black\", lwd = 3, axes = FALSE, xlab = NA,\n ylab = NA)\nlines(m_smooth, lwd = 3, col = \"red\")\n\n# lines can also be smoothed\nl <- jagged_lines$geometry[[2]][]\nl_smooth <- smooth_ksmooth(l, wrap = FALSE, max_distance = 0.05)\nplot(l, type = \"l\", col = \"black\", lwd = 3, axes = FALSE, xlab = NA,\n ylab = NA)\nlines(l_smooth, lwd = 3, col = \"red\")\n\n# explore different levels of smoothness\np <- jagged_polygons$geometry[[2]][[1]]\nps1 <- smooth_ksmooth(p, wrap = TRUE, max_distance = 0.01, smoothness = 0.5)\nps2 <- smooth_ksmooth(p, wrap = TRUE, max_distance = 0.01, smoothness = 1)\nps3 <- smooth_ksmooth(p, wrap = TRUE, max_distance = 0.01, smoothness = 2)\n# plot\npar(mar = c(0, 0, 0, 0), oma = c(10, 0, 0, 0))\nplot(p, type = \"l\", col = \"black\", lwd = 3, axes = FALSE, xlab = NA,\n ylab = NA)\nlines(ps1, lwd = 3, col = \"#E41A1C\")\nlines(ps2, lwd = 3, col = \"#4DAF4A\")\nlines(ps3, lwd = 3, col = \"#377EB8\")\npar(fig = c(0, 1, 0, 1), oma = c(0, 0, 0, 0), new = TRUE)\nplot(0, 0, type = \"n\", bty = \"n\", xaxt = \"n\", yaxt = \"n\", axes = FALSE)\nlegend(\"bottom\", legend = c(\"0.5\", \"1\", \"2\"),\n col = c(\"#E41A1C\", \"#4DAF4A\", \"#377EB8\"),\n lwd = 3, cex = 2, box.lwd = 0, inset = 0, horiz = TRUE)\n\nlibrary(sf)\np <- jagged_polygons$geometry[[2]]\np_smooth <- smooth(p, method = \"ksmooth\")\nclass(p)\nclass(p_smooth)\nplot(p_smooth, border = \"red\")\nplot(p, add = TRUE)\n\n\n"} {"package":"smoothr","topic":"smooth_spline","snippet":"### Name: smooth_spline\n### Title: Spline interpolation\n### Aliases: smooth_spline\n\n### ** Examples\n\n# smooth_spline works on matrices of coordinates\n# use the matrix of coordinates defining a polygon as an example\nm <- jagged_polygons$geometry[[2]][[1]]\nm_smooth <- smooth_spline(m, wrap = TRUE)\nclass(m)\nclass(m_smooth)\nplot(m_smooth, type = \"l\", col = \"red\", axes = FALSE, xlab = NA, ylab = NA)\nlines(m, col = \"black\")\n\n# smooth is a wrapper for smooth_spline that works on spatial features\nlibrary(sf)\np <- jagged_polygons$geometry[[2]]\np_smooth <- smooth(p, method = \"spline\")\nclass(p)\nclass(p_smooth)\nplot(p_smooth, border = \"red\")\nplot(p, add = TRUE)\n\n\n"} {"package":"agriwater","topic":"albedo_modis","snippet":"### Name: albedo_modis\n### Title: Surface Albedo using MODIS images.\n### Aliases: albedo_modis\n\n### ** Examples\n\nlibrary(agriwater)\n\n# dependencies of package 'agriwater'\nlibrary(terra)\n\n# Using a temporary folder to run example\nwd <- tempdir()\ninitial = getwd()\nsetwd(wd)\n\n# creating raster which simulate Sentinel-2 reflectances - for using\n# real data, please download:\n# https://drive.google.com/open?id=14E1wHNLxG7_Dh4I-GqNYakj8YJDgKLzk\n\nxy <- matrix(rnorm(4, mean = 0.05, sd = 0.015),2, 2)\nrast <- rast(xy, crs=\"+proj=longlat +datum=WGS84\")\next(rast) <- c(-40.5,-40.45,-9.5,-9.45)\nwriteRaster(rast, file.path(wd, \"B2.tif\"),filetype = \"GTiff\", overwrite=TRUE)\nxy <- matrix(rnorm(4, mean = 0.05, sd = 0.01),2, 2)\nrast <- rast(xy, crs=\"+proj=longlat +datum=WGS84\")\next(rast) <- c(-40.5,-40.45,-9.5,-9.45)\nwriteRaster(rast, file.path(wd, \"B1.tif\"),filetype = \"GTiff\", overwrite=TRUE)\n\n# creating mask of study area\nmask <- as.polygons(rast)\nwriteVector(mask, file.path(getwd(),\"mask.shp\"), overwrite=TRUE)\n\n# using \"agriwater\"\nalbedo_modis()\n\n#Exiting temporary folder and returning to previous workspace\nsetwd(initial)\n\n\n"} {"package":"agriwater","topic":"albedo_s2","snippet":"### Name: albedo_s2\n### Title: Surface Albedo using Sentinel-2 images.\n### Aliases: albedo_s2\n\n### ** Examples\n\nlibrary(agriwater)\n\n# dependencies of package 'agriwater'\nlibrary(terra)\n\n# Using a temporary folder to run example\nwd <- tempdir()\ninitial = getwd()\nsetwd(wd)\n\n# creating raster which simulate Sentinel-2 reflectances - for using\n# real data, please download:\n# https://drive.google.com/open?id=14E1wHNLxG7_Dh4I-GqNYakj8YJDgKLzk\n\nxy <- matrix(rnorm(4, mean = 0.07, sd = 0.01), 2, 2)\nrast <- rast(xy, crs=\"+proj=longlat +datum=WGS84\")\next(rast) <- c(-40.5,-40.45,-9.5,-9.45)\nwriteRaster(rast, file.path(wd, \"B2.tif\"),filetype = \"GTiff\", overwrite=TRUE)\nxy <- matrix(rnorm(4, mean = 0.05, sd = 0.015),2, 2)\nrast <- rast(xy, crs=\"+proj=longlat +datum=WGS84\")\next(rast) <- c(-40.5,-40.45,-9.5,-9.45)\nwriteRaster(rast, file.path(wd, \"B3.tif\"),filetype = \"GTiff\", overwrite=TRUE)\nxy <- matrix(rnorm(4, mean = 0.03, sd = 0.018),2, 2)\nrast <- rast(xy, crs=\"+proj=longlat +datum=WGS84\")\next(rast) <- c(-40.5,-40.45,-9.5,-9.45)\nwriteRaster(rast, file.path(wd, \"B4.tif\"),filetype = \"GTiff\", overwrite=TRUE)\nxy <- matrix(rnorm(4, mean = 0.05, sd = 0.015),2, 2)\nrast <- rast(xy, crs=\"+proj=longlat +datum=WGS84\")\next(rast) <- c(-40.5,-40.45,-9.5,-9.45)\nwriteRaster(rast, file.path(wd, \"B8.tif\"),filetype = \"GTiff\", overwrite=TRUE)\nmask <- as.polygons(rast)\nwriteVector(mask, file.path(getwd(),\"mask.shp\"), overwrite=TRUE)\n\n# using \"agriwater\"\nalbedo_s2()\n\n#Exiting temporary folder and returning to previous workspace\nsetwd(initial)\n\n\n"} {"package":"agriwater","topic":"evapo_modis","snippet":"### Name: evapo_modis\n### Title: Actual evapotranspiration (ETa) using MODIS with single\n### agrometeorological data.\n### Aliases: evapo_modis\n\n### ** Examples\n\nlibrary(agriwater)\n\n# dependencies of package 'agriwater'\nlibrary(terra)\n\n# Using a temporary folder to run example\nwd <- tempdir()\ninitial = getwd()\nsetwd(wd)\n\n\n# creating raster which simulate Sentinel-2 reflectances - for using\n# real data, please download:\n# https://drive.google.com/open?id=14E1wHNLxG7_Dh4I-GqNYakj8YJDgKLzk\n\nxy <- matrix(rnorm(4, mean = 0.05, sd = 0.015),2, 2)\nrast <- rast(xy, crs=\"+proj=longlat +datum=WGS84\")\next(rast) <- c(-40.5,-40.45,-9.5,-9.45)\nwriteRaster(rast, file.path(wd, \"B2.tif\"),filetype = \"GTiff\", overwrite=TRUE)\nxy <- matrix(rnorm(4, mean = 0.05, sd = 0.015),2, 2)\nrast <- rast(xy, crs=\"+proj=longlat +datum=WGS84\")\next(rast) <- c(-40.5,-40.45,-9.5,-9.45)\nwriteRaster(rast, file.path(wd, \"B1.tif\"),filetype = \"GTiff\", overwrite=TRUE)\n\nmask <- as.polygons(rast)\nwriteVector(mask, file.path(getwd(),\"mask.shp\"), overwrite=TRUE)\n\n# using \"agriwater\" - it's the same procedure as the used for\n# evapo_l8(), evapo_l8t(), evapo_modis_grid(), evapo_l8_grid(),\n# evapo_l8t_grid(), evapo_s2() and evapo_s2_grid()\nevapo_modis(doy = 134, RG = 17.6, Ta = 27.9, ET0 = 3.8, a = 1.8, b = -0.008)\n\n#Exiting temporary folder and returning to previous workspace\nsetwd(initial)\n\n\n"} {"package":"agriwater","topic":"evapo_s2","snippet":"### Name: evapo_s2\n### Title: Actual evapotranspiration (ETa) using Sentinel-2 images with\n### single agrometeorological data.\n### Aliases: evapo_s2\n\n### ** Examples\n\nlibrary(agriwater)\n\n# dependencies of package 'agriwater'\nlibrary(terra)\n\n\n# Using a temporary folder to run example\nwd <- tempdir()\ninitial = getwd()\nsetwd(wd)\n\n# creating raster which simulate Sentinel-2 reflectances - for using\n# real data, please download:\n# https://drive.google.com/open?id=14E1wHNLxG7_Dh4I-GqNYakj8YJDgKLzk\n\nxy <- matrix(rnorm(4, mean = 0.07, sd = 0.01), 2, 2)\nrast <- rast(xy, crs=\"+proj=longlat +datum=WGS84\")\next(rast) <- c(-40.5,-40.45,-9.5,-9.45)\nwriteRaster(rast, file.path(wd, \"B2.tif\"),filetype = \"GTiff\", overwrite=TRUE)\nxy <- matrix(rnorm(4, mean = 0.05, sd = 0.015),2, 2)\nrast <- rast(xy, crs=\"+proj=longlat +datum=WGS84\")\next(rast) <- c(-40.5,-40.45,-9.5,-9.45)\nwriteRaster(rast, file.path(wd, \"B3.tif\"),filetype = \"GTiff\", overwrite=TRUE)\nxy <- matrix(rnorm(4, mean = 0.03, sd = 0.018),2, 2)\nrast <- rast(xy, crs=\"+proj=longlat +datum=WGS84\")\next(rast) <- c(-40.5,-40.45,-9.5,-9.45)\nwriteRaster(rast, file.path(wd, \"B4.tif\"),filetype = \"GTiff\", overwrite=TRUE)\nxy <- matrix(rnorm(4, mean = 0.05, sd = 0.015),2, 2)\nrast <- rast(xy, crs=\"+proj=longlat +datum=WGS84\")\next(rast) <- c(-40.5,-40.45,-9.5,-9.45)\nwriteRaster(rast, file.path(wd, \"B8.tif\"),filetype = \"GTiff\", overwrite=TRUE)\nmask <- as.polygons(rast)\nwriteVector(mask, file.path(getwd(),\"mask.shp\"), overwrite=TRUE)\n\n# using \"agriwater\"\nevapo_s2(doy = 134, RG = 17.6, Ta = 27.9, ET0 = 3.8, a = 1.8, b = -0.008)\n\n#Exiting temporary folder and returning to previous workspace\nsetwd(initial)\n\n\n"} {"package":"agriwater","topic":"kc_modis","snippet":"### Name: kc_modis\n### Title: Crop coefficient (ETa / ET0) using MODIS with single\n### agrometeorological data.\n### Aliases: kc_modis\n\n### ** Examples\n\nlibrary(agriwater)\n\n# dependencies of package 'agriwater'\nlibrary(terra)\n\n\n# Using a temporary folder to run example\nwd <- tempdir()\ninitial = getwd()\nsetwd(wd)\n\n# creating raster which simulate MODIS reflectances - for using\n# real data, please download:\n# https://drive.google.com/open?id=14E1wHNLxG7_Dh4I-GqNYakj8YJDgKLzk\n\nxy <- matrix(rnorm(4, mean = 0.07, sd = 0.01), 2, 2)\nrast <- rast(xy, crs=\"+proj=longlat +datum=WGS84\")\next(rast) <- c(-40.5,-40.45,-9.5,-9.45)\nwriteRaster(rast, file.path(wd, \"B2.tif\"), filetype = \"GTiff\", overwrite=TRUE)\nxy <- matrix(rnorm(4, mean = 0.05, sd = 0.015),2, 2)\nrast <- rast(xy, crs=\"+proj=longlat +datum=WGS84\")\next(rast) <- c(-40.5,-40.45,-9.5,-9.45)\nwriteRaster(rast, file.path(wd, \"B1.tif\"), filetype = \"GTiff\", overwrite=TRUE)\n\nmask <- as.polygons(rast)\nwriteVector(mask, file.path(getwd(),\"mask.shp\"), overwrite=TRUE)\n\n# using \"agriwater\"\nkc_modis(doy = 134, RG = 17.6, Ta = 27.9, a = 1.8, b = -0.008)\n\n#Exiting temporary folder and returning to previous workspace\nsetwd(initial)\n\n\n"} {"package":"agriwater","topic":"kc_s2","snippet":"### Name: kc_s2\n### Title: Crop coefficient (ETa / ET0) using Sentinel-2 images with single\n### agrometeorological data.\n### Aliases: kc_s2\n\n### ** Examples\n\nlibrary(agriwater)\n\n# dependencies of package 'agriwater'\nlibrary(terra)\n\n# Using a temporary folder to run example\nwd <- tempdir()\ninitial = getwd()\nsetwd(wd)\n\n# creating raster which simulate Sentinel-2 reflectances - for using\n# real data, please download:\n# https://drive.google.com/open?id=14E1wHNLxG7_Dh4I-GqNYakj8YJDgKLzk\n\nxy <- matrix(rnorm(4, mean = 0.07, sd = 0.01), 2, 2)\nrast <- rast(xy, crs=\"+proj=longlat +datum=WGS84\")\next(rast) <- c(-40.5,-40.45,-9.5,-9.45)\nwriteRaster(rast, file.path(wd, \"B2.tif\"),filetype = \"GTiff\", overwrite=TRUE)\nxy <- matrix(rnorm(4, mean = 0.05, sd = 0.015),2, 2)\nrast <- rast(xy, crs=\"+proj=longlat +datum=WGS84\")\next(rast) <- c(-40.5,-40.45,-9.5,-9.45)\nwriteRaster(rast, file.path(wd, \"B3.tif\"),filetype = \"GTiff\", overwrite=TRUE)\nxy <- matrix(rnorm(4, mean = 0.03, sd = 0.018),2, 2)\nrast <- rast(xy, crs=\"+proj=longlat +datum=WGS84\")\next(rast) <- c(-40.5,-40.45,-9.5,-9.45)\nwriteRaster(rast, file.path(wd, \"B4.tif\"),filetype = \"GTiff\", overwrite=TRUE)\nxy <- matrix(rnorm(4, mean = 0.05, sd = 0.015),2, 2)\nrast <- rast(xy, crs=\"+proj=longlat +datum=WGS84\")\next(rast) <- c(-40.5,-40.45,-9.5,-9.45)\nwriteRaster(rast, file.path(wd, \"B8.tif\"),filetype = \"GTiff\", overwrite=TRUE)\nmask <- as.polygons(rast)\nwriteVector(mask, file.path(getwd(),\"mask.shp\"), overwrite=TRUE)\n\n# using \"agriwater\"\nkc_s2(doy = 134, RG = 17.6, Ta = 27.9, a = 1.8, b = -0.008)\n\n#Exiting temporary folder and returning to previous workspace\nsetwd(initial)\n\n\n"} {"package":"agriwater","topic":"radiation_modis","snippet":"### Name: radiation_modis\n### Title: Energy balance using Landsat-8 images with single\n### agrometeorological data.\n### Aliases: radiation_modis\n\n### ** Examples\n\nlibrary(agriwater)\n\n# dependencies of package 'agriwater'\nlibrary(terra)\n\n# Using a temporary folder to run example\nwd <- tempdir()\ninitial = getwd()\nsetwd(wd)\n\n# creating raster which simulate Sentinel-2 reflectances - for using\n# real data, please download:\n# https://drive.google.com/open?id=14E1wHNLxG7_Dh4I-GqNYakj8YJDgKLzk\n\nxy <- matrix(rnorm(4, mean = 0.05, sd = 0.015),2, 2)\nrast <- rast(xy, crs=\"+proj=longlat +datum=WGS84\")\next(rast) <- c(-40.5,-40.45,-9.5,-9.45)\nwriteRaster(rast, file.path(wd, \"B2.tif\"),filetype = \"GTiff\", overwrite=TRUE)\nxy <- matrix(rnorm(4, mean = 0.05, sd = 0.015),2, 2)\nrast <- rast(xy, crs=\"+proj=longlat +datum=WGS84\")\next(rast) <- c(-40.5,-40.45,-9.5,-9.45)\nwriteRaster(rast, file.path(wd, \"B1.tif\"),filetype = \"GTiff\", overwrite=TRUE)\n\n# creating mask of study area\nmask <- as.polygons(rast)\nwriteVector(mask, file.path(getwd(),\"mask.shp\"), overwrite=TRUE)\n\n# using \"agriwater\" - it's the same procedure as the used for\n# radiation_l8(), radiation_l8t(), radiation_s2(),\n# radiation_l8_grid(), radiation_l8t_grid(),\n# radiation_s2_grid(), radiation_s2() and radiation_modis_grid()\nradiation_modis(doy = 134, RG = 17.6, Ta = 27.9, ET0 = 3.8, a = 1.8, b = -0.008)\n\n#Exiting temporary folder and returning to previous workspace\nsetwd(initial)\n\n\n"} {"package":"agriwater","topic":"radiation_s2","snippet":"### Name: radiation_s2\n### Title: Energy balance using Sentinel-2 images with single\n### agrometeorological data.\n### Aliases: radiation_s2\n\n### ** Examples\n\nlibrary(agriwater)\n\n# dependencies of package 'agriwater'\nlibrary(terra)\n\n\n# Using a temporary folder to run example\nwd <- tempdir()\ninitial = getwd()\nsetwd(wd)\n\n# creating raster which simulate Sentinel-2 reflectances - for using\n# real data, please download:\n# https://drive.google.com/open?id=14E1wHNLxG7_Dh4I-GqNYakj8YJDgKLzk\n\nxy <- matrix(rnorm(4, mean = 0.07, sd = 0.01), 2, 2)\nrast <- rast(xy, crs=\"+proj=longlat +datum=WGS84\")\next(rast) <- c(-40.5,-40.45,-9.5,-9.45)\nwriteRaster(rast, file.path(wd, \"B2.tif\"),filetype = \"GTiff\", overwrite=TRUE)\nxy <- matrix(rnorm(4, mean = 0.05, sd = 0.015),2, 2)\nrast <- rast(xy, crs=\"+proj=longlat +datum=WGS84\")\next(rast) <- c(-40.5,-40.45,-9.5,-9.45)\nwriteRaster(rast, file.path(wd, \"B3.tif\"),filetype = \"GTiff\", overwrite=TRUE)\nxy <- matrix(rnorm(4, mean = 0.03, sd = 0.018),2, 2)\nrast <- rast(xy, crs=\"+proj=longlat +datum=WGS84\")\next(rast) <- c(-40.5,-40.45,-9.5,-9.45)\nwriteRaster(rast, file.path(wd, \"B4.tif\"),filetype = \"GTiff\", overwrite=TRUE)\nxy <- matrix(rnorm(4, mean = 0.05, sd = 0.015),2, 2)\nrast <- rast(xy, crs=\"+proj=longlat +datum=WGS84\")\next(rast) <- c(-40.5,-40.45,-9.5,-9.45)\nwriteRaster(rast, file.path(wd, \"B8.tif\"),filetype = \"GTiff\", overwrite=TRUE)\nmask <- as.polygons(rast)\nwriteVector(mask, file.path(getwd(),\"mask.shp\"), overwrite=TRUE)\n\n# using \"agriwater\"\nradiation_s2(doy = 134, RG = 17.6, Ta = 27.9, ET0 = 3.8, a = 1.8, b = -0.008)\n\n#Exiting temporary folder and returning to previous workspace\nsetwd(initial)\n\n\n"} {"package":"ICAOD","topic":"FIM_exp_2par","snippet":"### Name: FIM_exp_2par\n### Title: Fisher Information Matrix for the 2-Parameter Exponential Model\n### Aliases: FIM_exp_2par\n\n### ** Examples\n\nFIM_exp_2par(x = c(1, 2), w = c(.5, .5), param = c(3, 4))\n\n\n"} {"package":"ICAOD","topic":"FIM_logistic","snippet":"### Name: FIM_logistic\n### Title: Fisher Information Matrix for the 2-Parameter Logistic (2PL)\n### Model\n### Aliases: FIM_logistic\n\n### ** Examples\n\nFIM_logistic(x = c(1, 2), w = c(.5, .5), param = c(2, 1))\n\n\n"} {"package":"ICAOD","topic":"FIM_logistic_4par","snippet":"### Name: FIM_logistic_4par\n### Title: Fisher Information Matrix for the 4-Parameter Logistic Model\n### Aliases: FIM_logistic_4par\n\n### ** Examples\n\nFIM_logistic_4par(x = c(-6.9, -4.6, -3.9, 6.7 ),\n w = c(0.489, 0.40, 0.061, 0.050),\n param = c(1.563, 1.790, 8.442, 0.137))\n\n\n"} {"package":"ICAOD","topic":"FIM_mixed_inhibition","snippet":"### Name: FIM_mixed_inhibition\n### Title: Fisher Information Matrix for the Mixed Inhibition Model.\n### Aliases: FIM_mixed_inhibition\n\n### ** Examples\n\nFIM_mixed_inhibition(S = c(30, 3.86, 30, 4.60),\n I = c(0, 0, 5.11, 4.16), w = rep(.25, 4),\n param = c(1.5, 5.2, 3.4, 5.6))\n\n\n"} {"package":"ICAOD","topic":"ICA.control","snippet":"### Name: ICA.control\n### Title: Returns ICA Control Optimization Parameters\n### Aliases: ICA.control\n\n### ** Examples\n\nICA.control(ncount = 100)\n\n\n"} {"package":"ICAOD","topic":"bayes","snippet":"### Name: bayes\n### Title: Bayesian D-Optimal Designs\n### Aliases: bayes\n\n### ** Examples\n\n#############################################\n# Two parameter logistic model: uniform prior\n#############################################\n# set the unfirom prior\nuni <- uniform(lower = c(-3, .1), upper = c(3, 2))\n# set the logistic model with formula\nres1 <- bayes(formula = ~1/(1 + exp(-b *(x - a))),\n predvars = \"x\", parvars = c(\"a\", \"b\"),\n family = binomial(), lx = -3, ux = 3,\n k = 5, iter = 1, prior = uni,\n ICA.control = list(rseed = 1366))\n\n## Not run: \n##D res1 <- update(res1, 500)\n##D plot(res1)\n## End(Not run)\n# You can also use your Fisher information matrix (FIM) if you think it is faster!\n## Not run: \n##D bayes(fimfunc = FIM_logistic, lx = -3, ux = 3, k = 5, iter = 500,\n##D prior = uni, ICA.control = list(rseed = 1366))\n## End(Not run)\n\n# with fixed x\n## Not run: \n##D res1.1 <- bayes(formula = ~1/(1 + exp(-b *(x - a))),\n##D predvars = \"x\", parvars = c(\"a\", \"b\"),\n##D family = binomial(), lx = -3, ux = 3,\n##D k = 5, iter = 100, prior = uni,\n##D x = c( -3, -1.5, 0, 1.5, 3),\n##D ICA.control = list(rseed = 1366))\n##D plot(res1.1)\n##D # not optimal\n## End(Not run)\n\n# with quadrature formula\n## Not run: \n##D res1.2 <- bayes(formula = ~1/(1 + exp(-b *(x - a))),\n##D predvars = \"x\", parvars = c(\"a\", \"b\"),\n##D family = binomial(), lx = -3, ux = 3,\n##D k = 5, iter = 1, prior = uni,\n##D crt.bayes.control = list(method = \"quadrature\"),\n##D ICA.control = list(rseed = 1366))\n##D res1.2 <- update(res1.2, 500)\n##D plot(res1.2) # not optimal\n##D # compare it with res1 that was found by automatic integration\n##D plot(res1)\n##D \n##D # we increase the number of quadrature nodes\n##D res1.3 <- bayes(formula = ~1/(1 + exp(-b *(x - a))),\n##D predvars = \"x\", parvars = c(\"a\", \"b\"),\n##D family = binomial(), lx = -3, ux = 3,\n##D k = 5, iter = 1, prior = uni,\n##D crt.bayes.control = list(method = \"quadrature\",\n##D quadrature = list(level = 9)),\n##D ICA.control = list(rseed = 1366))\n##D res1.3 <- update(res1.3, 500)\n##D plot(res1.3)\n##D # by automatic integration (method = \"cubature\"),\n##D # we did not need to worry about the number of nodes.\n## End(Not run)\n###############################################\n# Two parameter logistic model: normal prior #1\n###############################################\n# defining the normal prior #1\nnorm1 <- normal(mu = c(0, 1),\n sigma = matrix(c(1, -0.17, -0.17, .5), nrow = 2),\n lower = c(-3, .1), upper = c(3, 2))\n## Not run: \n##D # initializing\n##D res2 <- bayes(formula = ~1/(1 + exp(-b *(x - a))), predvars = \"x\", parvars = c(\"a\", \"b\"),\n##D family = binomial(), lx = -3, ux = 3, k = 4, iter = 1, prior = norm1,\n##D ICA.control = list(rseed = 1366))\n##D res2 <- update(res2, 500)\n##D plot(res2)\n## End(Not run)\n\n###############################################\n# Two parameter logistic model: normal prior #2\n###############################################\n# defining the normal prior #1\nnorm2 <- normal(mu = c(0, 1),\n sigma = matrix(c(1, 0, 0, .5), nrow = 2),\n lower = c(-3, .1), upper = c(3, 2))\n## Not run: \n##D # initializing\n##D res3 <- bayes(formula = ~1/(1 + exp(-b *(x - a))), predvars = \"x\", parvars = c(\"a\", \"b\"),\n##D family = binomial(), lx = -3, ux = 3, k = 4, iter = 1, prior = norm2,\n##D ICA.control = list(rseed = 1366))\n##D \n##D res3 <- update(res3, 700)\n##D plot(res3,\n##D sens.bayes.control = list(cubature = list(maxEval = 3000, tol = 1e-4)),\n##D sens.control = list(optslist = list(maxeval = 3000)))\n## End(Not run)\n\n\n######################################################\n# Two parameter logistic model: skewed normal prior #1\n######################################################\nskew1 <- skewnormal(xi = c(0, 1),\n Omega = matrix(c(1, -0.17, -0.17, .5), nrow = 2),\n alpha = c(1, 0), lower = c(-3, .1), upper = c(3, 2))\n## Not run: \n##D res4 <- bayes(formula = ~1/(1 + exp(-b *(x - a))), predvars = \"x\", parvars = c(\"a\", \"b\"),\n##D family = binomial(), lx = -3, ux = 3, k = 4, iter = 700, prior = skew1,\n##D ICA.control = list(rseed = 1366, ncount = 60))\n##D plot(res4,\n##D sens.bayes.control = list(cubature = list(maxEval = 3000, tol = 1e-4)),\n##D sens.control = list(optslist = list(maxeval = 3000)))\n## End(Not run)\n\n\n######################################################\n# Two parameter logistic model: skewed normal prior #2\n######################################################\nskew2 <- skewnormal(xi = c(0, 1),\n Omega = matrix(c(1, -0.17, -0.17, .5), nrow = 2),\n alpha = c(-1, 0), lower = c(-3, .1), upper = c(3, 2))\n## Not run: \n##D res5 <- bayes(formula = ~1/(1 + exp(-b *(x - a))), predvars = \"x\", parvars = c(\"a\", \"b\"),\n##D family = binomial(), lx = -3, ux = 3, k = 4, iter = 700, prior = skew2,\n##D ICA.control = list(rseed = 1366, ncount = 60))\n##D plot(res5,\n##D sens.bayes.control = list(cubature = list(maxEval = 3000, tol = 1e-4)),\n##D sens.control = list(optslist = list(maxeval = 3000)))\n## End(Not run)\n\n###############################################\n# Two parameter logistic model: t student prior\n###############################################\n# set the prior\nstud <- student(mean = c(0, 1), S = matrix(c(1, -0.17, -0.17, .5), nrow = 2),\n df = 3, lower = c(-3, .1), upper = c(3, 2))\n## Not run: \n##D res6 <- bayes(formula = ~1/(1 + exp(-b *(x - a))), predvars = \"x\", parvars = c(\"a\", \"b\"),\n##D family = binomial(), lx = -3, ux = 3, k = 5, iter = 500, prior = stud,\n##D ICA.control = list(ncount = 50, rseed = 1366))\n##D plot(res6)\n## End(Not run)\n# not bad, but to find a very accurate designs we increase\n# the ncount to 200 and repeat the optimization\n## Not run: \n##D res6 <- bayes(formula = ~1/(1 + exp(-b *(x - a))),\n##D predvars = \"x\", parvars = c(\"a\", \"b\"),\n##D family = binomial(), lx = -3, ux = 3, k = 5, iter = 1000, prior = stud,\n##D ICA.control = list(ncount = 200, rseed = 1366))\n##D plot(res6)\n## End(Not run)\n\n\n##############################################\n# 4-parameter sigmoid Emax model: unform prior\n##############################################\nlb <- c(4, 11, 100, 5)\nub <- c(8, 15, 130, 9)\n## Not run: \n##D res7 <- bayes(formula = ~ theta1 + (theta2 - theta1)*(x^theta4)/(x^theta4 + theta3^theta4),\n##D predvars = c(\"x\"), parvars = c(\"theta1\", \"theta2\", \"theta3\", \"theta4\"),\n##D lx = .001, ux = 500, k = 5, iter = 200, prior = uniform(lb, ub),\n##D ICA.control = list(rseed = 1366, ncount = 60))\n##D plot(res7,\n##D sens.bayes.control = list(cubature = list(maxEval = 500, tol = 1e-3)),\n##D sens.control = list(optslist = list(maxeval = 500)))\n## End(Not run)\n\n#######################################################################\n# 2-parameter Cox Proportional-Hazards Model for type one cenosred data\n#######################################################################\n# The Fisher information matrix is available here with name FIM_2par_exp_censor1\n# However, we should reparameterize the function to match the standard of the argument 'fimfunc'\nmyfim <- function(x, w, param)\n FIM_2par_exp_censor1(x = x, w = w, param = param, tcensor = 30)\n## Not run: \n##D res8 <- bayes(fimfunc = myfim, lx = 0, ux = 1, k = 4,\n##D iter = 1, prior = uniform(c(-11, -11), c(11, 11)),\n##D ICA.control = list(rseed = 1366))\n##D \n##D res8 <- update(res8, 200)\n##D plot(res8,\n##D sens.bayes.control = list(cubature = list(maxEval = 500, tol = 1e-3)),\n##D sens.control = list(optslist = list(maxeval = 500)))\n## End(Not run)\n\n\n#######################################################################\n# 2-parameter Cox Proportional-Hazards Model for random cenosred data\n#######################################################################\n# The Fisher information matrix is available here with name FIM_2par_exp_censor2\n# However, we should reparameterize the function to match the standard of the argument 'fimfunc'\nmyfim <- function(x, w, param)\n FIM_2par_exp_censor2(x = x, w = w, param = param, tcensor = 30)\n## Not run: \n##D res9 <- bayes(fimfunc = myfim, lx = 0, ux = 1, k = 2,\n##D iter = 200, prior = uniform(c(-11, -11), c(11, 11)),\n##D ICA.control = list(rseed = 1366))\n##D plot(res9,\n##D sens.bayes.control = list(cubature = list(maxEval = 100, tol = 1e-3)),\n##D sens.control = list(optslist = list(maxeval = 100)))\n## End(Not run)\n\n#################################\n# Weibull model: Uniform prior\n################################\n# see Dette, H., & Pepelyshev, A. (2008).\n# Efficient experimental designs for sigmoidal growth models.\n# Journal of statistical planning and inference, 138(1), 2-17.\n\n## See how we fixed a some parameters in Bayesian designs\n## Not run: \n##D res10 <- bayes(formula = ~a - b * exp(-lambda * t ^h),\n##D predvars = c(\"t\"),\n##D parvars = c(\"a=1\", \"b=1\", \"lambda\", \"h=1\"),\n##D lx = .00001, ux = 20,\n##D prior = uniform(.5, 2.5), k = 5, iter = 400,\n##D ICA.control = list(rseed = 1366))\n##D plot(res10)\n## End(Not run)\n\n#################################\n# Weibull model: Normal prior\n################################\nnorm3 <- normal(mu = 1, sigma = .1, lower = .5, upper = 2.5)\nres11 <- bayes(formula = ~a - b * exp(-lambda * t ^h),\n predvars = c(\"t\"),\n parvars = c(\"a=1\", \"b=1\", \"lambda\", \"h=1\"),\n lx = .00001, ux = 20, prior = norm3, k = 4, iter = 1,\n ICA.control = list(rseed = 1366))\n\n## Not run: \n##D res11 <- update(res11, 400)\n##D plot(res11)\n## End(Not run)\n\n#################################\n# Richards model: Normal prior\n#################################\nnorm4 <- normal(mu = c(1, 1), sigma = matrix(c(.2, 0.1, 0.1, .4), 2, 2),\n lower = c(.4, .4), upper = c(1.6, 1.6))\n## Not run: \n##D res12 <- bayes(formula = ~a/(1 + b * exp(-lambda*t))^h,\n##D predvars = c(\"t\"),\n##D parvars = c(\"a=1\", \"b\", \"lambda\", \"h=1\"),\n##D lx = .00001, ux = 10,\n##D prior = norm4,\n##D k = 5, iter = 400,\n##D ICA.control = list(rseed = 1366))\n##D plot(res12,\n##D sens.bayes.control = list(cubature = list(maxEval = 1000, tol = 1e-3)),\n##D sens.control = list(optslist = list(maxeval = 1000)))\n##D ## or we can use the quadrature formula to plot the derivative function\n##D plot(res12,\n##D sens.bayes.control = list(method = \"quadrature\"),\n##D sens.control = list(optslist = list(maxeval = 1000)))\n##D \n## End(Not run)\n\n#################################\n# Exponential model: Uniform prior\n#################################\n## Not run: \n##D res13 <- bayes(formula = ~a + exp(-b*x), predvars = \"x\",\n##D parvars = c(\"a = 1\", \"b\"),\n##D lx = 0.0001, ux = 1,\n##D prior = uniform(lower = 1, upper = 20),\n##D iter = 300, k = 3,\n##D ICA.control= list(rseed = 100))\n##D plot(res13)\n## End(Not run)\n\n#################################\n# Power logistic model\n#################################\n# See, Duarte, B. P., & Wong, W. K. (2014).\n# A Semidefinite Programming based approach for finding\n# Bayesian optimal designs for nonlinear models\nuni1 <- uniform(lower = c(-.3, 6, .5), upper = c(.3, 8, 1))\n## Not run: \n##D res14 <- bayes(formula = ~1/(1 + exp(-b *(x - a)))^s, predvars = \"x\",\n##D parvars = c(\"a\", \"b\", \"s\"),\n##D lx = -1, ux = 1, prior = uni1, k = 5, iter = 1)\n##D res14 <- update(res14, 300)\n##D plot(res14)\n## End(Not run)\n\n############################################################################\n# A two-variable generalized linear model with a gamma distributed response\n############################################################################\nlb <- c(.5, 0, 0, 0, 0, 0)\nub <- c(2, 1, 1, 1, 1, 1)\nmyformula1 <- ~beta0+beta1*x1+beta2*x2+beta3*x1^2+beta4*x2^2+beta5*x1*x2\n## Not run: \n##D res15 <- bayes(formula = myformula1,\n##D predvars = c(\"x1\", \"x2\"), parvars = paste(\"beta\", 0:5, sep = \"\"),\n##D family = Gamma(),\n##D lx = rep(0, 2), ux = rep(1, 2),\n##D prior = uniform(lower = lb, upper = ub),\n##D k = 7,iter = 1, ICA.control = list(rseed = 1366))\n##D res14 <- update(res14, 500)\n##D plot(res14,\n##D sens.bayes.control = list(cubature = list(maxEval = 5000, tol = 1e-4)),\n##D sens.control = list(optslist = list(maxeval = 3000)))\n## End(Not run)\n\n#################################\n# Three parameter logistic model\n#################################\n## Not run: \n##D sigma1 <- matrix(-0.1, nrow = 3, ncol = 3)\n##D diag(sigma1) <- c(.5, .4, .1)\n##D norm5 <- normal(mu = c(0, 1, .2), sigma = sigma1,\n##D lower = c(-3, .1, 0), upper = c(3, 2, .7))\n##D res16 <- bayes(formula = ~ c + (1-c)/(1 + exp(-b *(x - a))), predvars = \"x\",\n##D parvars = c(\"a\", \"b\", \"c\"),\n##D family = binomial(), lx = -3, ux = 3,\n##D k = 4, iter = 500, prior = norm5,\n##D ICA.control = list(rseed = 1366, ncount = 50),\n##D crt.bayes.control = list(cubature = list(maxEval = 2500, tol = 1e-4)))\n##D plot(res16,\n##D sens.bayes.control = list(cubature = list(maxEval = 3000, tol = 1e-4)),\n##D sens.control = list(optslist = list(maxeval = 3000)))\n##D # took 925 second on my system\n## End(Not run)\n\n\n\n\n"} {"package":"ICAOD","topic":"bayescomp","snippet":"### Name: bayescomp\n### Title: Bayesian Compound DP-Optimal Designs\n### Aliases: bayescomp\n\n### ** Examples\n\n##########################################################################\n# DP-optimal design for a logitic model with two predictors: with formula\n##########################################################################\np <- c(1, -2, 1, -1)\nmyprior <- uniform(p -1.5, p + 1.5)\nmyformula1 <- ~exp(b0+b1*x1+b2*x2+b3*x1*x2)/(1+exp(b0+b1*x1+b2*x2+b3*x1*x2))\nres1 <- bayescomp(formula = myformula1,\n predvars = c(\"x1\", \"x2\"),\n parvars = c(\"b0\", \"b1\", \"b2\", \"b3\"),\n family = binomial(),\n lx = c(-1, -1), ux = c(1, 1),\n prior = myprior, iter = 1, k = 7,\n prob = ~1-1/(1+exp(b0 + b1 * x1 + b2 * x2 + b3 * x1 * x2)),\n alpha = .5, ICA.control = list(rseed = 1366),\n crt.bayes.control = list(cubature = list(tol = 1e-4, maxEval = 1000)))\n\n\n## Not run: \n##D res1 <- update(res1, 1000)\n##D plot(res1, sens.bayes.control = list(cubature = list(tol = 1e-3, maxEval = 1000)))\n##D # or use quadrature method\n##D plot(res1, sens.bayes.control= list(method = \"quadrature\"))\n## End(Not run)\n\n##########################################################################\n# DP-optimal design for a logitic model with two predictors: with fimfunc\n##########################################################################\n# The function of the Fisher information matrix for this model is 'FIM_logistic_2pred'\n# We should reparameterize it to match the standard of the argument 'fimfunc'\n## Not run: \n##D myfim <- function(x, w, param){\n##D npoint <- length(x)/2\n##D x1 <- x[1:npoint]\n##D x2 <- x[(npoint+1):(npoint*2)]\n##D FIM_logistic_2pred(x1 = x1,x2 = x2, w = w, param = param)\n##D }\n##D \n##D ## The following function is equivalent to the function created\n##D # by the formula: ~1-1/(1+exp(b0 + b1 * x1 + b2 * x2 + b3 * x1 * x2))\n##D # It returns probability of success given x and param\n##D # x = c(x1, x2) and param = c()\n##D \n##D myprob <- function(x, param){\n##D npoint <- length(x)/2\n##D x1 <- x[1:npoint]\n##D x2 <- x[(npoint+1):(npoint*2)]\n##D b0 <- param[1]\n##D b1 <- param[2]\n##D b2 <- param[3]\n##D b3 <- param[4]\n##D out <- 1-1/(1+exp(b0 + b1 * x1 + b2 * x2 + b3 * x1 * x2))\n##D return(out)\n##D }\n##D \n##D res2 <- bayescomp(fimfunc = myfim,\n##D lx = c(-1, -1), ux = c(1, 1),\n##D prior = myprior, iter = 1000, k = 7,\n##D prob = myprob, alpha = .5,\n##D ICA.control = list(rseed = 1366))\n##D plot(res2, sens.bayes.control = list(cubature = list(maxEval = 1000, tol = 1e-4)))\n##D # quadrature with 6 nodes (default)\n##D plot(res2, sens.bayes.control= list(method = \"quadrature\"))\n## End(Not run)\n\n\n\n\n"} {"package":"ICAOD","topic":"beff","snippet":"### Name: beff\n### Title: Calculates Relative Efficiency for Bayesian Optimal Designs\n### Aliases: beff\n\n### ** Examples\n\n#############################\n# 2PL model\n############################\nformula4.1 <- ~ 1/(1 + exp(-b *(x - a)))\npredvars4.1 <- \"x\"\nparvars4.1 <- c(\"a\", \"b\")\n\n# des4.1 is a list of Bayesian optimal designs with corresponding priors.\n\n\ndes4.1 <- vector(\"list\", 6)\ndes4.1[[1]]$x <- c(-3, -1.20829, 0, 1.20814, 3)\ndes4.1[[1]]$w <- c(.24701, .18305, .13988, .18309, .24702)\ndes4.1[[1]]$prior <- uniform(lower = c(-3, .1), upper = c(3, 2))\n\ndes4.1[[2]]$x <- c(-2.41692, -1.16676, .04386, 1.18506, 2.40631)\ndes4.1[[2]]$w <- c(.26304, .18231, .14205, .16846, .24414)\ndes4.1[[2]]$prior <- student(mean = c(0, 1), S = matrix(c(1, -0.17, -0.17, .5), nrow = 2),\n df = 3, lower = c(-3, .1), upper = c(3, 2))\n\ndes4.1[[3]]$x <- c(-2.25540, -.76318, .54628, 2.16045)\ndes4.1[[3]]$w <- c(.31762, .18225, .18159, .31853)\ndes4.1[[3]]$prior <- normal(mu = c(0, 1),\n sigma = matrix(c(1, -0.17, -0.17, .5), nrow = 2),\n lower = c(-3, .1), upper = c(3, 2))\n\ndes4.1[[4]]$x <- c(-2.23013, -.66995, .67182, 2.23055)\ndes4.1[[4]]$w <- c(.31420, .18595, .18581, .31404)\ndes4.1[[4]]$prior <- normal(mu = c(0, 1),\n sigma = matrix(c(1, 0, 0, .5), nrow = 2),\n lower = c(-3, .1), upper = c(3, 2))\n\ndes4.1[[5]]$x <- c(-1.51175, .12043, 1.05272, 2.59691)\ndes4.1[[5]]$w <- c(.37679, .14078, .12676, .35567)\ndes4.1[[5]]$prior <- skewnormal(xi = c(0, 1),\n Omega = matrix(c(1, -0.17, -0.17, .5), nrow = 2),\n alpha = c(1, 0), lower = c(-3, .1), upper = c(3, 2))\n\n\ndes4.1[[6]]$x <- c(-2.50914, -1.16780, -.36904, 1.29227)\ndes4.1[[6]]$w <- c(.35767, .11032, .15621, .37580)\ndes4.1[[6]]$prior <- skewnormal(xi = c(0, 1),\n Omega = matrix(c(1, -0.17, -0.17, .5), nrow = 2),\n alpha = c(-1, 0), lower = c(-3, .1), upper = c(3, 2))\n\n## now we want to find the relative efficiency of\n## all Bayesian optimal designs assuming different priors (6 * 6)\neff4.1 <- matrix(NA, 6, 6)\ncolnames(eff4.1) <- c(\"uni\", \"t\", \"norm1\", \"norm2\", \"skew1\", \"skew2\")\nrownames(eff4.1) <- colnames(eff4.1)\n## Not run: \n##D for (i in 1:6)\n##D for(j in 1:6)\n##D eff4.1[i, j] <- beff(formula = formula4.1,\n##D predvars = predvars4.1,\n##D parvars = parvars4.1,\n##D family = binomial(),\n##D prior = des4.1[[i]]$prior,\n##D x2 = des4.1[[i]]$x,\n##D w2 = des4.1[[i]]$w,\n##D x1 = des4.1[[j]]$x,\n##D w1 = des4.1[[j]]$w)\n##D # For example the first row represents Bayesian D-efficiencies of different\n##D # Bayesian optimal design found assuming different priors with respect to\n##D # the Bayesian D-optimal design found under uniform prior distribution.\n##D eff4.1\n## End(Not run)\n\n#############################\n# Relative efficiency for the DP-Compund criterion\n############################\np <- c(1, -2, 1, -1)\nprior4.4 <- uniform(p -1.5, p + 1.5)\nformula4.4 <- ~exp(b0+b1*x1+b2*x2+b3*x1*x2)/(1+exp(b0+b1*x1+b2*x2+b3*x1*x2))\nprob4.4 <- ~1-1/(1+exp(b0 + b1 * x1 + b2 * x2 + b3 * x1 * x2))\npredvars4.4 <- c(\"x1\", \"x2\")\nparvars4.4 <- c(\"b0\", \"b1\", \"b2\", \"b3\")\nlb <- c(-1, -1)\nub <- c(1, 1)\n\n\n\n## des4.4 is a list of DP-optimal designs found using different values for alpha\ndes4.4 <- vector(\"list\", 5)\ndes4.4[[1]]$x <- c(-1, 1)\ndes4.4[[1]]$w <- c(1)\ndes4.4[[1]]$alpha <- 0\n\n\ndes4.4[[2]]$x <- c(1, -.62534, .11405, -1, 1, .28175, -1, -1, 1, -1, -1, 1, 1, .09359)\ndes4.4[[2]]$w <- c(.08503, .43128, .01169, .14546, .05945, .08996, .17713)\ndes4.4[[2]]$alpha <- .25\n\n\ndes4.4[[3]]$x <- c(-1, .30193, 1, 1, .07411, -1, -.31952, -.08251, 1, -1, 1, -1, -1, 1)\ndes4.4[[3]]$w <- c(.09162, .10288, .15615, .13123, .01993, .22366, .27454)\ndes4.4[[3]]$alpha <- .5\n\ndes4.4[[4]]$x <- c(1, -1, .28274, 1, -1, -.19674, .03288, 1, -1, 1, -1, -.16751, 1, -1)\ndes4.4[[4]]$w <- c(.19040, .24015, .10011, .20527, .0388, .20075, .02452)\ndes4.4[[4]]$alpha <- .75\n\ndes4.4[[5]]$x <- c(1, -1, .26606, -.13370, 1, -.00887, -1, 1, -.2052, 1, 1, -1, -1, -1)\ndes4.4[[5]]$w <- c(.23020, .01612, .09546, .16197, .23675, .02701, .2325)\ndes4.4[[5]]$alpha <- 1\n\n# D-efficiency of the DP-optimal designs:\n# des4.4[[5]]$x and des4.4[[5]]$w is the D-optimal design\n\nbeff(formula = formula4.4,\n predvars = predvars4.4,\n parvars = parvars4.4,\n family = binomial(),\n prior = prior4.4,\n x2 = des4.4[[5]]$x,\n w2 = des4.4[[5]]$w,\n x1 = des4.4[[2]]$x,\n w1 = des4.4[[2]]$w)\n\nbeff(formula = formula4.4,\n predvars = predvars4.4,\n parvars = parvars4.4,\n family = binomial(),\n prior = prior4.4,\n x2 = des4.4[[5]]$x,\n w2 = des4.4[[5]]$w,\n x1 = des4.4[[3]]$x,\n w1 = des4.4[[3]]$w)\n\nbeff(formula = formula4.4,\n predvars = predvars4.4,\n parvars = parvars4.4,\n family = binomial(),\n prior = prior4.4,\n x2 = des4.4[[5]]$x,\n w2 = des4.4[[5]]$w,\n x1 = des4.4[[4]]$x,\n w1 = des4.4[[4]]$w)\n\n# must be one!\nbeff(formula = formula4.4,\n predvars = predvars4.4,\n parvars = parvars4.4,\n family = binomial(),\n prior = prior4.4,\n prob = prob4.4,\n type = \"PA\",\n x2 = des4.4[[5]]$x,\n w2 = des4.4[[5]]$w,\n x1 = des4.4[[5]]$x,\n w1 = des4.4[[5]]$w)\n\n## P-efficiency\n# reported in Table 4 as eff_P\n# des4.4[[1]]$x and des4.4[[1]]$w is the P-optimal design\nbeff(formula = formula4.4,\n predvars = predvars4.4,\n parvars = parvars4.4,\n family = binomial(),\n prior = prior4.4,\n prob = prob4.4,\n type = \"PA\",\n x2 = des4.4[[1]]$x,\n w2 = des4.4[[1]]$w,\n x1 = des4.4[[2]]$x,\n w1 = des4.4[[2]]$w)\n\nbeff(formula = formula4.4,\n predvars = predvars4.4,\n parvars = parvars4.4,\n family = binomial(),\n prior = prior4.4,\n prob = prob4.4,\n type = \"PA\",\n x2 = des4.4[[1]]$x,\n w2 = des4.4[[1]]$w,\n x1 = des4.4[[3]]$x,\n w1 = des4.4[[3]]$w)\n\nbeff(formula = formula4.4,\n predvars = predvars4.4,\n parvars = parvars4.4,\n family = binomial(),\n prior = prior4.4,\n prob = prob4.4,\n type = \"PA\",\n x2 = des4.4[[1]]$x,\n w2 = des4.4[[1]]$w,\n x1 = des4.4[[4]]$x,\n w1 = des4.4[[4]]$w)\n\nbeff(formula = formula4.4,\n predvars = predvars4.4,\n parvars = parvars4.4,\n family = binomial(),\n prior = prior4.4,\n prob = prob4.4,\n type = \"PA\",\n x2 = des4.4[[1]]$x,\n w2 = des4.4[[1]]$w,\n x1 = des4.4[[5]]$x,\n w1 = des4.4[[5]]$w)\n\n\n\n\n\n\n\n"} {"package":"ICAOD","topic":"crt.bayes.control","snippet":"### Name: crt.bayes.control\n### Title: Returns Control Parameters for Approximating Bayesian Criteria\n### Aliases: crt.bayes.control\n\n### ** Examples\n\ncrt.bayes.control()\ncrt.bayes.control(cubature = list(tol = 1e-4))\ncrt.bayes.control(quadrature = list(level = 4))\n\n\n"} {"package":"ICAOD","topic":"crt.minimax.control","snippet":"### Name: crt.minimax.control\n### Title: Returns Control Parameters for Optimizing Minimax Criteria Over\n### The Parameter Space\n### Aliases: crt.minimax.control\n\n### ** Examples\n\ncrt.minimax.control(optslist = list(maxeval = 2000))\n\n\n"} {"package":"ICAOD","topic":"leff","snippet":"### Name: leff\n### Title: Calculates Relative Efficiency for Locally Optimal Designs\n### Aliases: leff\n\n### ** Examples\n\np <- c(1, -2, 1, -1)\nprior4.4 <- uniform(p -1.5, p + 1.5)\nformula4.4 <- ~exp(b0+b1*x1+b2*x2+b3*x1*x2)/(1+exp(b0+b1*x1+b2*x2+b3*x1*x2))\nprob4.4 <- ~1-1/(1+exp(b0 + b1 * x1 + b2 * x2 + b3 * x1 * x2))\npredvars4.4 <- c(\"x1\", \"x2\")\nparvars4.4 <- c(\"b0\", \"b1\", \"b2\", \"b3\")\n\n\n# Locally D-optimal design is as follows:\n## weight and point of D-optimal design\n# Point1 Point2 Point3 Point4\n# /1.00000 \\ /-1.00000\\ /0.06801 \\ /1.00000 \\\n# \\-1.00000/ \\-1.00000/ \\1.00000 / \\1.00000 /\n# Weight1 Weight2 Weight3 Weight4\n# 0.250 0.250 0.250 0.250\n\nxopt_D <- c(1, -1, .0680, 1, -1, -1, 1, 1)\nwopt_D <- rep(.25, 4)\n\n# Let see if we use only three of the design points, what is the relative efficiency.\nleff(formula = formula4.4, predvars = predvars4.4, parvars = parvars4.4, family = binomial(),\n x1 = c(1, -1, .0680, -1, -1, 1), w1 = c(.33, .33, .33),\n inipars = p,\n x2 = xopt_D, w2 = wopt_D)\n# Wow, it heavily drops!\n\n\n# Locally P-optimal design has only one support point and is -1 and 1\nxopt_P <- c(-1, 1)\nwopt_P <- 1\n\n# What is the relative P-efficiency of the D-optimal design with respect to P-optimal design?\nleff(formula = formula4.4, predvars = predvars4.4, parvars = parvars4.4, family = binomial(),\n x1 = xopt_D, w1 = wopt_D,\n inipars = p,\n type = \"PA\",\n prob = prob4.4,\n x2 = xopt_P, w2 = wopt_P)\n# .535\n\n\n\n\n"} {"package":"ICAOD","topic":"locally","snippet":"### Name: locally\n### Title: Locally D-Optimal Designs\n### Aliases: locally\n\n### ** Examples\n\n#################################\n# Exponential growth model\n################################\n# See how we set stopping rule by adjusting 'stop_rule', 'checkfreq' and 'stoptol'\n# It calls the 'senslocally' function every checkfreq = 50 iterations to\n# calculate the ELB. if ELB is greater than stoptol = .95, then the algoithm stops.\n\n# initializing by one iteration\nres1 <- locally(formula = ~a + exp(-b*x), predvars = \"x\", parvars = c(\"a\", \"b\"),\n lx = 0, ux = 1, inipars = c(1, 10),\n iter = 1, k = 2,\n ICA.control= ICA.control(rseed = 100,\n stop_rule = \"equivalence\",\n checkfreq = 20, stoptol = .95))\n## Not run: \n##D # update the algorithm\n##D res1 <- update(res1, 150)\n##D #stops at iteration 21 because ELB is greater than .95\n## End(Not run)\n\n### fixed x, lx and ux are only required for equivalence theorem\n## Not run: \n##D res1.1 <- locally(formula = ~a + exp(-b*x), predvars = \"x\", parvars = c(\"a\", \"b\"),\n##D lx = 0, ux = 1, inipars = c(1, 10),\n##D iter = 100,\n##D x = c(.25, .5, .75),\n##D ICA.control= ICA.control(rseed = 100))\n##D plot(res1.1)\n##D # we can not have an optimal design using this x\n## End(Not run)\n\n################################\n## two parameter logistic model\n################################\nres2 <- locally(formula = ~1/(1 + exp(-b *(x - a))),\n predvars = \"x\", parvars = c(\"a\", \"b\"),\n family = binomial(), lx = -3, ux = 3,\n inipars = c(1, 3), iter = 1, k = 2,\n ICA.control= list(rseed = 100, stop_rule = \"equivalence\",\n checkfreq = 50, stoptol = .95))\n## Not run: \n##D res2 <- update(res2, 100)\n##D # stops at iteration 51\n## End(Not run)\n\n\n\n\n################################\n# A model with two predictors\n################################\n# mixed inhibition model\n## Not run: \n##D res3 <- locally(formula = ~ V*S/(Km * (1 + I/Kic)+ S * (1 + I/Kiu)),\n##D predvars = c(\"S\", \"I\"),\n##D parvars = c(\"V\", \"Km\", \"Kic\", \"Kiu\"),\n##D family = gaussian(),\n##D lx = c(0, 0), ux = c(30, 60),\n##D k = 4,\n##D iter = 300,\n##D inipars = c(1.5, 5.2, 3.4, 5.6),\n##D ICA.control= list(rseed = 100, stop_rule = \"equivalence\",\n##D checkfreq = 50, stoptol = .95))\n##D # stops at iteration 100\n## End(Not run)\n\n\n## Not run: \n##D # fixed x\n##D res3.1 <- locally(formula = ~ V*S/(Km * (1 + I/Kic)+ S * (1 + I/Kiu)),\n##D predvars = c(\"S\", \"I\"),\n##D parvars = c(\"V\", \"Km\", \"Kic\", \"Kiu\"),\n##D family = gaussian(),\n##D lx = c(0, 0), ux = c(30, 60),\n##D iter = 100,\n##D x = c(20, 4, 20, 4, 10, 0, 0, 30, 3, 2),\n##D inipars = c(1.5, 5.2, 3.4, 5.6),\n##D ICA.control= list(rseed = 100))\n## End(Not run)\n\n\n###################################\n# user-defined optimality criterion\n##################################\n# When the model is defined by the formula interface\n# A-optimal design for the 2PL model.\n# the criterion function must have argument x, w fimfunc and the parameters defined in 'parvars'.\n# use 'fimfunc' as a function of the design points x, design weights w and\n# the 'parvars' parameters whenever needed.\nAopt <-function(x, w, a, b, fimfunc){\n sum(diag(solve(fimfunc(x = x, w = w, a = a, b = b))))\n}\n## the sensitivtiy function\n# xi_x is a design that put all its mass on x in the definition of the sensitivity function\n# x is a vector of design points\nAopt_sens <- function(xi_x, x, w, a, b, fimfunc){\n fim <- fimfunc(x = x, w = w, a = a, b = b)\n M_inv <- solve(fim)\n M_x <- fimfunc(x = xi_x, w = 1, a = a, b = b)\n sum(diag(M_inv %*% M_x %*% M_inv)) - sum(diag(M_inv))\n}\n\nres4 <- locally(formula = ~1/(1 + exp(-b * (x-a))), predvars = \"x\",\n parvars = c(\"a\", \"b\"), family = \"binomial\",\n lx = -3, ux = 3, inipars = c(1, 1.25),\n iter = 1, k = 2,\n crtfunc = Aopt,\n sensfunc = Aopt_sens,\n ICA.control = list(checkfreq = Inf))\n## Not run: \n##D res4 <- update(res4, 50)\n## End(Not run)\n\n# When the FIM of the model is defined directly via the argument 'fimfunc'\n# the criterion function must have argument x, w fimfunc and param.\n# use 'fimfunc' as a function of the design points x, design weights w\n# and param whenever needed.\nAopt2 <-function(x, w, param, fimfunc){\n sum(diag(solve(fimfunc(x = x, w = w, param = param))))\n}\n## the sensitivtiy function\n# xi_x is a design that put all its mass on x in the definition of the sensitivity function\n# x is a vector of design points\nAopt_sens2 <- function(xi_x, x, w, param, fimfunc){\n fim <- fimfunc(x = x, w = w, param = param)\n M_inv <- solve(fim)\n M_x <- fimfunc(x = xi_x, w = 1, param = param)\n sum(diag(M_inv %*% M_x %*% M_inv)) - sum(diag(M_inv))\n}\n\nres4.1 <- locally(fimfunc = FIM_logistic,\n lx = -3, ux = 3, inipars = c(1, 1.25),\n iter = 1, k = 2,\n crtfunc = Aopt2,\n sensfunc = Aopt_sens2,\n ICA.control = list(checkfreq = Inf))\n## Not run: \n##D res4.1 <- update(res4.1, 50)\n##D plot(res4.1)\n## End(Not run)\n\n\n# locally c-optimal design\n# example from Chaloner and Larntz (1989) Figure 3\nc_opt <-function(x, w, a, b, fimfunc){\n gam <- log(.95/(1-.95))\n M <- fimfunc(x = x, w = w, a = a, b = b)\n c <- matrix(c(1, -gam * b^(-2)), nrow = 1)\n B <- t(c) %*% c\n sum(diag(B %*% solve(M)))\n}\n\nc_sens <- function(xi_x, x, w, a, b, fimfunc){\n gam <- log(.95/(1-.95))\n M <- fimfunc(x = x, w = w, a = a, b = b)\n M_inv <- solve(M)\n M_x <- fimfunc(x = xi_x, w = 1, a = a, b = b)\n c <- matrix(c(1, -gam * b^(-2)), nrow = 1)\n B <- t(c) %*% c\n sum(diag(B %*% M_inv %*% M_x %*% M_inv)) - sum(diag(B %*% M_inv))\n}\n\n\nres4.2 <- locally(formula = ~1/(1 + exp(-b * (x-a))), predvars = \"x\",\n parvars = c(\"a\", \"b\"), family = \"binomial\",\n lx = -1, ux = 1, inipars = c(0, 7),\n iter = 1, k = 2,\n crtfunc = c_opt, sensfunc = c_sens,\n ICA.control = list(rseed = 1, checkfreq = Inf))\n## Not run: \n##D res4.2 <- update(res4.2, 100)\n## End(Not run)\n\n\n\n"} {"package":"ICAOD","topic":"locallycomp","snippet":"### Name: locallycomp\n### Title: Locally DP-Optimal Designs\n### Aliases: locallycomp\n\n### ** Examples\n\n## Here we produce the results of Table 2 in in McGree and Eccleston (2008)\n# For D- and P-efficiency see, ?leff and ?peff\n\np <- c(1, -2, 1, -1)\nprior4.4 <- uniform(p -1.5, p + 1.5)\nformula4.4 <- ~exp(b0+b1*x1+b2*x2+b3*x1*x2)/(1+exp(b0+b1*x1+b2*x2+b3*x1*x2))\nprob4.4 <- ~1-1/(1+exp(b0 + b1 * x1 + b2 * x2 + b3 * x1 * x2))\npredvars4.4 <- c(\"x1\", \"x2\")\nparvars4.4 <- c(\"b0\", \"b1\", \"b2\", \"b3\")\nlb <- c(-1, -1)\nub <- c(1, 1)\n\n\n# set checkfreq = Inf to ask for equivalence theorem at final step.\nres.0 <- locallycomp(formula = formula4.4, predvars = predvars4.4, parvars = parvars4.4,\n family = binomial(), prob = prob4.4, lx = lb, ux = ub,\n alpha = 0, k = 1, inipars = p, iter = 10,\n ICA.control = ICA.control(checkfreq = Inf))\n\n## Not run: \n##D res.25 <- locallycomp(formula = formula4.4, predvars = predvars4.4, parvars = parvars4.4,\n##D family = binomial(), prob = prob4.4, lx = lb, ux = ub,\n##D alpha = .25, k = 4, inipars = p, iter = 350,\n##D ICA.control = ICA.control(checkfreq = Inf))\n##D \n##D res.5 <- locallycomp(formula = formula4.4, predvars = predvars4.4, parvars = parvars4.4,\n##D family = binomial(), prob = prob4.4, lx = lb, ux = ub,\n##D alpha = .5, k = 4, inipars = p, iter = 350,\n##D ICA.control = ICA.control(checkfreq = Inf))\n##D res.75 <- locallycomp(formula = formula4.4, predvars = predvars4.4, parvars = parvars4.4,\n##D family = binomial(), prob = prob4.4, lx = lb, ux = ub,\n##D alpha = .75, k = 4, inipars = p, iter = 350,\n##D ICA.control = ICA.control(checkfreq = Inf))\n##D \n##D res.1 <- locallycomp(formula = formula4.4, predvars = predvars4.4, parvars = parvars4.4,\n##D family = binomial(), prob = prob4.4, lx = lb, ux = ub,\n##D alpha = 1, k = 4, inipars = p, iter = 350,\n##D ICA.control = ICA.control(checkfreq = Inf))\n##D \n##D #### computing the D-efficiency\n##D # locally D-optimal design is locally DP-optimal design when alpha = 1.\n##D \n##D leff(formula = formula4.4, predvars = predvars4.4, parvars = parvars4.4, family = binomial(),\n##D x1 = res.0$evol[[10]]$x, w1 = res.0$evol[[10]]$w,\n##D inipars = p,\n##D x2 = res.1$evol[[350]]$x, w2 = res.1$evol[[350]]$w)\n##D \n##D leff(formula = formula4.4, predvars = predvars4.4, parvars = parvars4.4, family = binomial(),\n##D x1 = res.25$evol[[350]]$x, w1 = res.25$evol[[350]]$w,\n##D inipars = p,\n##D x2 = res.1$evol[[350]]$x, w2 = res.1$evol[[350]]$w)\n##D \n##D leff(formula = formula4.4, predvars = predvars4.4, parvars = parvars4.4, family = binomial(),\n##D x1 = res.5$evol[[350]]$x, w1 = res.5$evol[[350]]$w,\n##D inipars = p,\n##D x2 = res.1$evol[[350]]$x, w2 = res.1$evol[[350]]$w)\n##D \n##D \n##D leff(formula = formula4.4, predvars = predvars4.4, parvars = parvars4.4, family = binomial(),\n##D x1 = res.75$evol[[350]]$x, w1 = res.75$evol[[350]]$w,\n##D inipars = p,\n##D x2 = res.1$evol[[350]]$x, w2 = res.1$evol[[350]]$w)\n##D \n##D \n##D \n##D #### computing the P-efficiency\n##D # locally p-optimal design is locally DP-optimal design when alpha = 0.\n##D \n##D leff(formula = formula4.4, predvars = predvars4.4, parvars = parvars4.4, family = binomial(),\n##D x2 = res.0$evol[[10]]$x, w2 = res.0$evol[[10]]$w,\n##D prob = prob4.4,\n##D type = \"PA\",\n##D inipars = p,\n##D x1 = res.25$evol[[350]]$x, w1 = res.25$evol[[350]]$w)\n##D \n##D leff(formula = formula4.4, predvars = predvars4.4, parvars = parvars4.4, family = binomial(),\n##D x2 = res.0$evol[[10]]$x, w2 = res.0$evol[[10]]$w,\n##D prob = prob4.4,\n##D inipars = p,\n##D type = \"PA\",\n##D x1 = res.5$evol[[350]]$x, w1 = res.5$evol[[350]]$w)\n##D \n##D leff(formula = formula4.4, predvars = predvars4.4, parvars = parvars4.4, family = binomial(),\n##D x2 = res.0$evol[[10]]$x, w2 = res.0$evol[[10]]$w,\n##D prob = prob4.4,\n##D inipars = p,\n##D type = \"PA\",\n##D x1 = res.75$evol[[350]]$x, w1 = res.75$evol[[350]]$w)\n##D \n##D \n##D leff(formula = formula4.4, predvars = predvars4.4, parvars = parvars4.4, family = binomial(),\n##D x2 = res.0$evol[[10]]$x, w2 = res.1$evol[[10]]$w,\n##D prob = prob4.4,\n##D type = \"PA\",\n##D inipars = p,\n##D x1 = res.1$evol[[350]]$x, w1 = res.1$evol[[350]]$w)\n##D \n##D \n## End(Not run)\n\n\n"} {"package":"ICAOD","topic":"meff","snippet":"### Name: meff\n### Title: Calculates Relative Efficiency for Minimax Optimal Designs\n### Aliases: meff\n\n### ** Examples\n\n# Relative D-efficiency with respect to the minimax criterion\nmeff(formula = ~1/(1 + exp(-b * (x-a))), predvars = \"x\",\n parvars = c(\"a\", \"b\"), family = \"binomial\",\n lp = c(-3, .5), up = c(3, 2),\n x2 = c(-3, -1.608782, 0, 1.608782, 3),\n w2 = c(0.22291601, 0.26438449, 0.02539899, 0.26438449, 0.22291601),\n x1 = c(-1, 1), w1 = c(.5, .5))\n\n\n\n# A function to calculate the locally D-optimal design for the 2PL model\nDopt_2pl <- function(a, b){\n x <- c(a + (1/b) * 1.5434046, a - (1/b) * 1.5434046)\n return(list(x = x, w = c(.5, .5)))\n}\n# Relative D-efficiency with respect to the standardized maximin criterion\nmeff (formula = ~1/(1 + exp(-b * (x-a))), predvars = \"x\",\n parvars = c(\"a\", \"b\"), family = \"binomial\",\n lp = c(-3, .5), up = c(3, 2),\n x2 = c(-3, -1.611255, 0, 1.611255, 3),\n w2 = c(0.22167034, 0.26592974, 0.02479984, 0.26592974, 0.22167034),\n x1 = c(0, -1), w1 = c(.5, .5),\n standardized = TRUE,\n localdes = Dopt_2pl)\n\n\n\n\n"} {"package":"ICAOD","topic":"minimax","snippet":"### Name: minimax\n### Title: Minimax and Standardized Maximin D-Optimal Designs\n### Aliases: minimax\n\n### ** Examples\n\n########################################\n# Two-parameter exponential growth model\n########################################\nres1 <- minimax (formula = ~a + exp(-b*x), predvars = \"x\", parvars = c(\"a\", \"b\"),\n lx = 0, ux = 1, lp = c(1, 1), up = c(1, 10),\n iter = 1, k = 4,\n ICA.control= ICA.control(rseed = 100),\n crt.minimax.control = list(optslist = list(maxeval = 100)))\n# The optimal design has 3 points, but we set k = 4 for illustration purpose to\n# show how the algorithm modifies the design by adjusting the weights\n# The value of maxeval is changed to reduce the CPU time\n## Not run: \n##D res1 <- update(res1, 150)\n##D # iterating the algorithm up to 150 more iterations\n## End(Not run)\n\nres1 # print method\nplot(res1) # Veryfying the general equivalence theorem\n\n## Not run: \n##D ## fixed x\n##D res1.1 <- minimax (formula = ~a + exp(-b*x), predvars = \"x\", parvars = c(\"a\", \"b\"),\n##D lx = 0, ux = 1, lp = c(1, 1), up = c(1, 10),\n##D x = c(0, .5, 1),\n##D iter = 150, k = 3, ICA.control= ICA.control(rseed = 100))\n##D # not optimal\n## End(Not run)\n\n########################################\n# Two-parameter logistic model.\n########################################\n# A little playing with the tuning parameters\n# The value of maxeval is reduced to 200 to increase the speed\ncont1 <- crt.minimax.control(optslist = list(maxeval = 200))\ncont2 <- ICA.control(rseed = 100, checkfreq = Inf, ncount = 60)\n\n## Not run: \n##D res2 <- minimax (formula = ~1/(1 + exp(-b *(x - a))), predvars = \"x\",\n##D parvars = c(\"a\", \"b\"),\n##D family = binomial(), lx = -3, ux = 3,\n##D lp = c(0, 1), up = c(1, 2.5), iter = 200, k = 3,\n##D ICA.control= cont2, crt.minimax.control = cont1)\n##D print(res2)\n##D plot(res2)\n## End(Not run)\n\n############################################\n# An example of a model with two predictors\n############################################\n# Mixed inhibition model\nlower <- c(1, 4, 2, 4)\nupper <- c(1, 5, 3, 5)\ncont <- crt.minimax.control(optslist = list(maxeval = 100)) # to be faster\n## Not run: \n##D res3 <- minimax(formula = ~ V*S/(Km * (1 + I/Kic)+ S * (1 + I/Kiu)),\n##D predvars = c(\"S\", \"I\"),\n##D parvars = c(\"V\", \"Km\", \"Kic\", \"Kiu\"),\n##D lx = c(0, 0), ux = c(30, 60), k = 4,\n##D iter = 100, lp = lower, up = upper,\n##D ICA.control= list(rseed = 100),\n##D crt.minimax.control = cont)\n##D \n##D res3 <- update(res3, 100)\n##D print(res3)\n##D plot(res3) # sensitivity plot\n##D res3$arg$time\n## End(Not run)\n\n# Now consider grid points instead of assuming continuous parameter space\n# set n.grid to 5\n## Not run: \n##D res4 <- minimax(formula = ~ V*S/(Km * (1 + I/Kic)+ S * (1 + I/Kiu)),\n##D predvars = c(\"S\", \"I\"),\n##D parvars = c(\"V\", \"Km\", \"Kic\", \"Kiu\"),\n##D lx = c(0, 0), ux = c(30, 60),\n##D k = 4, iter = 130, n.grid = 5, lp = lower, up = upper,\n##D ICA.control= list(rseed = 100, checkfreq = Inf),\n##D crt.minimax.control = cont)\n##D print(res4)\n##D plot(res4) # sensitivity plot\n## End(Not run)\n\n############################################\n# Standardized maximin D-optimal designs\n############################################\n# Assume the purpose is finding STANDARDIZED designs\n# We know from literature that the locally D-optimal design (LDOD)\n# for this model has an analytical solution.\n# The follwoing function takes the parameter as input and returns\n# the design points and weights of LDOD.\n# x and w are exactly similar to the arguments of 'fimfunc'.\n# x is a vector and returns the design points 'dimension-wise'.\n# see explanation of the arguments of 'fimfunc' in 'Details'.\n\nLDOD <- function(V, Km, Kic, Kiu){\n #first dimention is for S and the second one is for I.\n S_min <- 0\n S_max <- 30\n I_min <- 0\n I_max <- 60\n s2 <- max(S_min, S_max*Km*Kiu*(Kic+I_min)/\n (S_max*Kic*I_min+S_max*Kic*Kiu+2*Km*Kiu*I_min+2*Km*Kiu*Kic))\n i3 <- min((2*S_max*Kic*I_min + S_max*Kic*Kiu+2*Km*Kiu*I_min+Km*Kiu*Kic)/\n (Km*Kiu+S_max*Kic), I_max)\n i4 <- min(I_min + (sqrt((Kic+I_min)*(Km*Kic*Kiu+Km*Kiu*I_min+\n S_max*Kic*Kiu+S_max*Kic*I_min)/\n (Km*Kiu+S_max*Kic))), I_max )\n s4 <- max(-Km*Kiu*(Kic+2*I_min-i4)/(Kic*(Kiu+2*I_min-i4)), S_min)\n x <- c(S_max, s2, S_max, s4, I_min, I_min, i3, i4)\n return(list(x = x, w =rep(1/4, 4)))\n\n}\nformalArgs(LDOD)\n## Not run: \n##D minimax(formula = ~ V*S/(Km * (1 + I/Kic)+ S * (1 + I/Kiu)),\n##D predvars = c(\"S\", \"I\"),\n##D parvars = c(\"V\", \"Km\", \"Kic\", \"Kiu\"),\n##D lx = c(0, 0), ux = c(30, 60),\n##D k = 4, iter = 300,\n##D lp = lower, up = upper,\n##D ICA.control= list(rseed = 100, checkfreq = Inf),\n##D crt.minimax.control = cont,\n##D standardized = TRUE,\n##D localdes = LDOD)\n## End(Not run)\n\n\n################################################################\n# Not necessary!\n# The rest of the examples here are only for professional uses.\n################################################################\n# Imagine you have written your own FIM, say in Rcpp that is faster than\n# the FIM created by the formula interface above.\n\n###########################################\n# An example of a model with two predictors\n###########################################\n# For example, th cpp FIM function for the mixed inhibition model is named:\nformalArgs(FIM_mixed_inhibition)\n\n# We should reparamterize the arguments to match the standard of the\n# argument 'fimfunc' (see 'Details').\nmyfim <- function(x, w, param){\n npoint <- length(x)/2\n S <- x[1:npoint]\n I <- x[(npoint+1):(npoint*2)]\n out <- FIM_mixed_inhibition(S = S, I = I, w = w, param = param)\n return(out)\n}\nformalArgs(myfim)\n\n# Finds minimax optimal design, exactly as before, but NOT using the\n# formula interface.\n## Not run: \n##D res5 <- minimax(fimfunc = myfim,\n##D lx = c(0, 0), ux = c(30, 60), k = 4,\n##D iter = 100, lp = lower, up = upper,\n##D ICA.control= list(rseed = 100),\n##D crt.minimax.control = cont)\n##D print(res5)\n##D plot(res5) # sensitivity plot\n## End(Not run)\n#########################################\n# Standardized maximin D-optimal designs\n#########################################\n# To match the argument 'localdes' when no formula inteface is used,\n# we should reparameterize LDOD.\n# The input must be 'param' same as the argument of 'fimfunc'\nLDOD2 <- function(param)\n LDOD(V = param[1], Km = param[2], Kic = param[3], Kiu = param[4])\n\n# compare these two:\nformalArgs(LDOD)\nformalArgs(LDOD2)\n## Not run: \n##D res6 <- minimax(fimfunc = myfim,\n##D lx = c(0, 0), ux = c(30, 60), k = 4,\n##D iter = 300, lp = lower, up = upper,\n##D ICA.control= list(rseed = 100, checkfreq = Inf),\n##D crt.minimax.control = cont,\n##D standardized = TRUE,\n##D localdes = LDOD2)\n##D res6\n##D plot(res6)\n## End(Not run)\n\n###################################\n# user-defined optimality criterion\n##################################\n# When the model is defined by the formula interface\n# A-optimal design for the 2PL model.\n# the criterion function must have argument x, w fimfunc and the parameters defined in 'parvars'.\n# use 'fimfunc' as a function of the design points x, design weights w and\n# the 'parvars' parameters whenever needed.\nAopt <-function(x, w, a, b, fimfunc){\n sum(diag(solve(fimfunc(x = x, w = w, a = a, b = b))))\n}\n## the sensitivtiy function\n# xi_x is a design that put all its mass on x in the definition of the sensitivity function\n# x is a vector of design points\nAopt_sens <- function(xi_x, x, w, a, b, fimfunc){\n fim <- fimfunc(x = x, w = w, a = a, b = b)\n M_inv <- solve(fim)\n M_x <- fimfunc(x = xi_x, w = 1, a = a, b = b)\n sum(diag(M_inv %*% M_x %*% M_inv)) - sum(diag(M_inv))\n}\n## Not run: \n##D res7 <- minimax(formula = ~1/(1 + exp(-b * (x-a))), predvars = \"x\",\n##D parvars = c(\"a\", \"b\"), family = \"binomial\",\n##D lx = -2, ux = 2,\n##D lp = c(-2, 1), up = c(2, 1.5),\n##D iter = 400, k = 3,\n##D crtfunc = Aopt,\n##D sensfunc = Aopt_sens,\n##D crt.minimax.control = list(optslist = list(maxeval = 200)),\n##D ICA.control = list(rseed = 1))\n##D plot(res7)\n## End(Not run)\n# with grid points\nres7.1 <- minimax(formula = ~1/(1 + exp(-b * (x-a))), predvars = \"x\",\n parvars = c(\"a\", \"b\"), family = \"binomial\",\n lx = -2, ux = 2,\n lp = c(-2, 1), up = c(2, 1.5),\n iter = 1, k = 3,\n crtfunc = Aopt,\n sensfunc = Aopt_sens,\n n.grid = 9,\n ICA.control = list(rseed = 1))\n## Not run: \n##D res7.1 <- update(res7.1, 400)\n##D plot(res7.1)\n## End(Not run)\n\n# When the FIM of the model is defined directly via the argument 'fimfunc'\n# the criterion function must have argument x, w fimfunc and param.\n# use 'fimfunc' as a function of the design points x, design weights w and\n# the 'parvars' parameters whenever needed.\nAopt2 <-function(x, w, param, fimfunc){\n sum(diag(solve(fimfunc(x = x, w = w, param = param))))\n}\n## the sensitivtiy function\n# xi_x is a design that put all its mass on x in the definition of the sensitivity function\n# x is a vector of design points\nAopt_sens2 <- function(xi_x, x, w, param, fimfunc){\n fim <- fimfunc(x = x, w = w, param = param)\n M_inv <- solve(fim)\n M_x <- fimfunc(x = xi_x, w = 1, param = param)\n sum(diag(M_inv %*% M_x %*% M_inv)) - sum(diag(M_inv))\n}\n## Not run: \n##D res7.2 <- minimax(fimfunc = FIM_logistic,\n##D lx = -2, ux = 2,\n##D lp = c(-2, 1), up = c(2, 1.5),\n##D iter = 1, k = 3,\n##D crtfunc = Aopt2,\n##D sensfunc = Aopt_sens2,\n##D crt.minimax.control = list(optslist = list(maxeval = 200)),\n##D ICA.control = list(rseed = 1))\n##D res7.2 <- update(res7.2, 200)\n##D plot(res7.2)\n## End(Not run)\n# with grid points\nres7.3 <- minimax(fimfunc = FIM_logistic,\n lx = -2, ux = 2,\n lp = c(-2, 1), up = c(2, 1.5),\n iter = 1, k = 3,\n crtfunc = Aopt2,\n sensfunc = Aopt_sens2,\n n.grid = 9,\n ICA.control = list(rseed = 1))\n## Not run: \n##D res7.3 <- update(res7.2, 200)\n##D plot(res7.3)\n## End(Not run)\n\n\n# robust c-optimal design\n# example from Chaloner and Larntz (1989), Figure 3, but robust design\nc_opt <-function(x, w, a, b, fimfunc){\n gam <- log(.95/(1-.95))\n M <- fimfunc(x = x, w = w, a = a, b = b)\n c <- matrix(c(1, -gam * b^(-2)), nrow = 1)\n B <- t(c) %*% c\n sum(diag(B %*% solve(M)))\n}\n\nc_sens <- function(xi_x, x, w, a, b, fimfunc){\n gam <- log(.95/(1-.95))\n M <- fimfunc(x = x, w = w, a = a, b = b)\n M_inv <- solve(M)\n M_x <- fimfunc(x = xi_x, w = 1, a = a, b = b)\n c <- matrix(c(1, -gam * b^(-2)), nrow = 1)\n B <- t(c) %*% c\n sum(diag(B %*% M_inv %*% M_x %*% M_inv)) - sum(diag(B %*% M_inv))\n}\n\n## Not run: \n##D res8 <- minimax(formula = ~1/(1 + exp(-b * (x-a))), predvars = \"x\",\n##D parvars = c(\"a\", \"b\"), family = \"binomial\",\n##D lx = -1, ux = 1,\n##D lp = c(-.3, 6), up = c(.3, 8),\n##D iter = 500, k = 3,\n##D crtfunc = c_opt, sensfunc = c_sens,\n##D ICA.control = list(rseed = 1, ncount = 100),\n##D n.grid = 12)\n##D plot(res8)\n## End(Not run)\n\n\n\n\n\n"} {"package":"ICAOD","topic":"multiple","snippet":"### Name: multiple\n### Title: Locally Multiple Objective Optimal Designs for the 4-Parameter\n### Hill Model\n### Aliases: multiple\n\n### ** Examples\n\n# All the examples are available in Hyun and Wong (2015)\n\n#################################\n# 4-parameter logistic model\n# Example 1, Table 3\n#################################\nlam <- c(0.05, 0.05, .90)\n# Initial estimates are derived from Table 1\n# See how the stopping rules are set via 'stop_rul', checkfreq' and 'stoptol'\nTheta1 <- c(1.563, 1.790, 8.442, 0.137)\nres1 <- multiple(minDose = log(.001), maxDose = log(1000),\n inipars = Theta1, k = 4, lambda = lam, delta = -1,\n Hill_par = FALSE,\n iter = 1,\n ICA.control = list(rseed = 1366, ncount = 100,\n stop_rule = \"equivalence\",\n checkfreq = 100, stoptol = .95))\n## Not run: \n##D res1 <- update(res1, 1000)\n##D # stops at iteration 101\n## End(Not run)\n\n#################################\n# 4-parameter Hill model\n#################################\n## initial estimates for the parameters of Hill model:\na <- 0.008949 # ED50\nb <- -1.79 # Hill constant\nc <- 0.137 # lower limit\nd <- 1.7 # upper limit\n# D belongs to c(.001, 1000) ## dose in mg\n## the vector of Hill parameters are now c(a, b, c, d)\n## Not run: \n##D res2 <- multiple(minDose = .001, maxDose = 1000,\n##D inipars = c(a, b, c, d),\n##D Hill_par = TRUE, k = 4, lambda = lam,\n##D delta = -1, iter = 1000,\n##D ICA.control = list(rseed = 1366, ncount = 100,\n##D stop_rule = \"equivalence\",\n##D checkfreq = 100, stoptol = .95))\n##D # stops at iteration 100\n## End(Not run)\n\n\n\n# use x argument to provide fix number of dose levels.\n# In this case, the optimization is only over weights\n## Not run: \n##D res3 <- multiple(minDose = log(.001), maxDose = log(1000),\n##D inipars = Theta1, k = 4, lambda = lam, delta = -1,\n##D iter = 300,\n##D Hill_par = FALSE,\n##D x = c(-6.90, -4.66, -3.93, 3.61),\n##D ICA.control = list(rseed = 1366))\n##D res3$evol[[300]]$w\n##D # if the user provide the desugn points via x, there is no guarantee\n##D # that the resulted design is optimal. It only provides the optimal weights given\n##D # the x points of the design.\n##D plot(res3)\n## End(Not run)\n\n\n\n"} {"package":"ICAOD","topic":"normal","snippet":"### Name: normal\n### Title: Assumes A Multivariate Normal Prior Distribution for The Model\n### Parameters\n### Aliases: normal\n\n### ** Examples\n\nnormal(mu = c(0, 1), sigma = matrix(c(1, -0.17, -0.17, .5), nrow = 2),\n lower = c(-3, .1), upper = c(3, 2))\n\n\n"} {"package":"ICAOD","topic":"robust","snippet":"### Name: robust\n### Title: Robust D-Optimal Designs\n### Aliases: robust\n\n### ** Examples\n\n# Finding a robust design for the two-parameter logistic model\n# See how we set a stopping rule.\n# The ELB is computed every checkfreq = 30 iterations\n# The optimization stops when the ELB is larger than stoptol = .95\nres1 <- robust(formula = ~1/(1 + exp(-b *(x - a))),\n predvars = c(\"x\"), parvars = c(\"a\", \"b\"),\n family = binomial(),\n lx = -5, ux = 5, prob = rep(1/4, 4),\n parset = matrix(c(0.5, 1.5, 0.5, 1.5, 4.0, 4.0, 5.0, 5.0), 4, 2),\n iter = 1, k =3,\n ICA.control = list(stop_rule = \"equivalence\",\n stoptol = .95, checkfreq = 30))\n\n## Not run: \n##D res1 <- update(res1, 100)\n##D # stops at iteration 51\n## End(Not run)\n\n\n## Not run: \n##D res1.1 <- robust(formula = ~1/(1 + exp(-b *(x - a))),\n##D predvars = c(\"x\"), parvars = c(\"a\", \"b\"),\n##D family = binomial(),\n##D lx = -5, ux = 5, prob = rep(1/4, 4),\n##D parset = matrix(c(0.5, 1.5, 0.5, 1.5, 4.0, 4.0, 5.0, 5.0), 4, 2),\n##D x = c(-3, 0, 3),\n##D iter = 150, k =3)\n##D plot(res1.1)\n##D # not optimal\n## End(Not run)\n\n\n###################################\n# user-defined optimality criterion\n##################################\n# When the model is defined by the formula interface\n# A-optimal design for the 2PL model.\n# the criterion function must have argument x, w fimfunc and the parameters defined in 'parvars'.\n# use 'fimfunc' as a function of the design points x, design weights w and\n# the 'parvars' parameters whenever needed.\nAopt <-function(x, w, a, b, fimfunc){\n sum(diag(solve(fimfunc(x = x, w = w, a = a, b = b))))\n}\n## the sensitivtiy function\n# xi_x is a design that put all its mass on x in the definition of the sensitivity function\n# x is a vector of design points\nAopt_sens <- function(xi_x, x, w, a, b, fimfunc){\n fim <- fimfunc(x = x, w = w, a = a, b = b)\n M_inv <- solve(fim)\n M_x <- fimfunc(x = xi_x, w = 1, a = a, b = b)\n sum(diag(M_inv %*% M_x %*% M_inv)) - sum(diag(M_inv))\n}\n\nres2 <- robust(formula = ~1/(1 + exp(-b * (x-a))), predvars = \"x\",\n parvars = c(\"a\", \"b\"), family = \"binomial\",\n lx = -3, ux = 3,\n iter = 1, k = 4,\n crtfunc = Aopt,\n sensfunc = Aopt_sens,\n prob = c(.25, .5, .25),\n parset = matrix(c(-2, 0, 2, 1.25, 1.25, 1.25), 3, 2),\n ICA.control = list(checkfreq = 50, stoptol = .999,\n stop_rule = \"equivalence\",\n rseed = 1))\n## Not run: \n##D res2 <- update(res2, 500)\n## End(Not run)\n\n\n\n\n\n# robust c-optimal design\n# example from Chaloner and Larntz (1989), Figure 3, but robust design\nc_opt <-function(x, w, a, b, fimfunc){\n gam <- log(.95/(1-.95))\n M <- fimfunc(x = x, w = w, a = a, b = b)\n c <- matrix(c(1, -gam * b^(-2)), nrow = 1)\n B <- t(c) %*% c\n sum(diag(B %*% solve(M)))\n}\n\nc_sens <- function(xi_x, x, w, a, b, fimfunc){\n gam <- log(.95/(1-.95))\n M <- fimfunc(x = x, w = w, a = a, b = b)\n M_inv <- solve(M)\n M_x <- fimfunc(x = xi_x, w = 1, a = a, b = b)\n c <- matrix(c(1, -gam * b^(-2)), nrow = 1)\n B <- t(c) %*% c\n sum(diag(B %*% M_inv %*% M_x %*% M_inv)) - sum(diag(B %*% M_inv))\n}\n\n\nres3 <- robust(formula = ~1/(1 + exp(-b * (x-a))), predvars = \"x\",\n parvars = c(\"a\", \"b\"), family = \"binomial\",\n lx = -1, ux = 1,\n parset = matrix(c(0, 7, .2, 6.5), 2, 2, byrow = TRUE),\n prob = c(.5, .5),\n iter = 1, k = 3,\n crtfunc = c_opt, sensfunc = c_sens,\n ICA.control = list(rseed = 1, checkfreq = Inf))\n\n## Not run: \n##D res3 <- update(res3, 300)\n## End(Not run)\n\n\n\n"} {"package":"ICAOD","topic":"sens.bayes.control","snippet":"### Name: sens.bayes.control\n### Title: Returns Control Parameters for Approximating The Integrals In\n### The Bayesian Sensitivity Functions\n### Aliases: sens.bayes.control\n\n### ** Examples\n\nsens.bayes.control()\nsens.bayes.control(cubature = list(maxEval = 50000))\nsens.bayes.control(quadrature = list(level = 4))\n\n\n"} {"package":"ICAOD","topic":"sens.control","snippet":"### Name: sens.control\n### Title: Returns Control Parameters To Find Maximum of The Sensitivity\n### (Derivative) Function Over The Design Space\n### Aliases: sens.control\n\n### ** Examples\n\nsens.control()\nsens.control(optslist = list(maxeval = 1000))\n\n\n"} {"package":"ICAOD","topic":"sens.minimax.control","snippet":"### Name: sens.minimax.control\n### Title: Returns Control Parameters for Verifying General Equivalence\n### Theorem For Minimax Optimal Designs\n### Aliases: sens.minimax.control\n\n### ** Examples\n\nsens.minimax.control()\nsens.minimax.control(n_seg = 4)\n\n\n"} {"package":"ICAOD","topic":"sensbayes","snippet":"### Name: sensbayes\n### Title: Verifying Optimality of Bayesian D-optimal Designs\n### Aliases: sensbayes\n\n### ** Examples\n\n##################################################################\n# Checking the Bayesian D-optimality of a design for the 2Pl model\n##################################################################\nskew2 <- skewnormal(xi = c(0, 1), Omega = matrix(c(1, -0.17, -0.17, .5), nrow = 2),\n alpha = c(-1, 0), lower = c(-3, .1), upper = c(3, 2))\n## Not run: \n##D sensbayes(formula = ~1/(1 + exp(-b *(x - a))),\n##D predvars = \"x\", parvars = c(\"a\", \"b\"),\n##D family = binomial(),\n##D x= c(-2.50914, -1.16780, -0.36904, 1.29227),\n##D w =c(0.35767, 0.11032, 0.15621, 0.37580),\n##D lx = -3, ux = 3,\n##D prior = skew2)\n##D # took 29 seconds on my system!\n## End(Not run)\n\n# It took very long.\n# We re-adjust the tuning parameters in sens.bayes.control to be faster\n# See how we drastically reduce the maxEval and increase the tolerance\n## Not run: \n##D sensbayes(formula = ~1/(1 + exp(-b *(x - a))),\n##D predvars = \"x\", parvars = c(\"a\", \"b\"),\n##D family = binomial(),\n##D x= c(-2.50914, -1.16780, -0.36904, 1.29227),\n##D w =c(0.35767, 0.11032, 0.15621, 0.37580),\n##D lx = -3, ux = 3,prior = skew2,\n##D sens.bayes.control = list(cubature = list(tol = 1e-4, maxEval = 300)))\n##D # took 5 Seconds on my system!\n## End(Not run)\n\n\n\n# Compare it with the following:\nsensbayes(formula = ~1/(1 + exp(-b *(x - a))),\n predvars = \"x\", parvars = c(\"a\", \"b\"),\n family = binomial(),\n x= c(-2.50914, -1.16780, -0.36904, 1.29227),\n w =c(0.35767, 0.11032, 0.15621, 0.37580),\n lx = -3, ux = 3,prior = skew2,\n sens.bayes.control = list(cubature = list(tol = 1e-4, maxEval = 200)))\n# Look at the plot!\n# took 3 seconds on my system\n\n\n########################################################################################\n# Checking the Bayesian D-optimality of a design for the 4-parameter sigmoid emax model\n########################################################################################\nlb <- c(4, 11, 100, 5)\nub <- c(9, 17, 140, 10)\n## Not run: \n##D sensbayes(formula = ~ theta1 + (theta2 - theta1)*(x^theta4)/(x^theta4 + theta3^theta4),\n##D predvars = c(\"x\"), parvars = c(\"theta1\", \"theta2\", \"theta3\", \"theta4\"),\n##D x = c(0.78990, 95.66297, 118.42964,147.55809, 500),\n##D w = c(0.23426, 0.17071, 0.17684, 0.1827, 0.23549),\n##D lx = .001, ux = 500, prior = uniform(lb, ub))\n##D # took 200 seconds on my system\n## End(Not run)\n\n# Re-adjust the tuning parameters to have it faster\n## Not run: \n##D sensbayes(formula = ~ theta1 + (theta2 - theta1)*(x^theta4)/(x^theta4 + theta3^theta4),\n##D predvars = c(\"x\"), parvars = c(\"theta1\", \"theta2\", \"theta3\", \"theta4\"),\n##D x = c(0.78990, 95.66297, 118.42964,147.55809, 500),\n##D w = c(0.23426, 0.17071, 0.17684, 0.1827, 0.23549),\n##D lx = .001, ux = 500, prior = uniform(lb, ub),\n##D sens.bayes.control = list(cubature = list(tol = 1e-3, maxEval = 300)))\n##D # took 4 seconds on my system. See how much it makes difference\n## End(Not run)\n\n## Not run: \n##D # Now we try it with quadrature. Default is 6 nodes\n##D sensbayes(formula = ~ theta1 + (theta2 - theta1)*(x^theta4)/(x^theta4 + theta3^theta4),\n##D predvars = c(\"x\"), parvars = c(\"theta1\", \"theta2\", \"theta3\", \"theta4\"),\n##D x = c(0.78990, 95.66297, 118.42964,147.55809, 500),\n##D w = c(0.23426, 0.17071, 0.17684, 0.1827, 0.23549),\n##D sens.bayes.control = list(method = \"quadrature\"),\n##D lx = .001, ux = 500, prior = uniform(lb, ub))\n##D # 166.519 s\n##D \n##D # use less number of nodes to see if we can reduce the CPU time\n##D sensbayes(formula = ~ theta1 + (theta2 - theta1)*(x^theta4)/(x^theta4 + theta3^theta4),\n##D predvars = c(\"x\"), parvars = c(\"theta1\", \"theta2\", \"theta3\", \"theta4\"),\n##D x = c(0.78990, 95.66297, 118.42964,147.55809, 500),\n##D w = c(0.23426, 0.17071, 0.17684, 0.1827, 0.23549),\n##D sens.bayes.control = list(method = \"quadrature\",\n##D quadrature = list(level = 3)),\n##D lx = .001, ux = 500, prior = uniform(lb, ub))\n##D # we don't have an accurate plot\n##D \n##D # use less number of levels: use 4 nodes\n##D sensbayes(formula = ~ theta1 + (theta2 - theta1)*(x^theta4)/(x^theta4 + theta3^theta4),\n##D predvars = c(\"x\"), parvars = c(\"theta1\", \"theta2\", \"theta3\", \"theta4\"),\n##D x = c(0.78990, 95.66297, 118.42964,147.55809, 500),\n##D w = c(0.23426, 0.17071, 0.17684, 0.1827, 0.23549),\n##D sens.bayes.control = list(method = \"quadrature\",\n##D quadrature = list(level = 4)),\n##D lx = .001, ux = 500, prior = uniform(lb, ub))\n##D \n## End(Not run)\n\n\n"} {"package":"ICAOD","topic":"sensbayescomp","snippet":"### Name: sensbayescomp\n### Title: Verifying Optimality of Bayesian Compound DP-optimal Designs\n### Aliases: sensbayescomp\n\n### ** Examples\n\n##########################################\n# Verifing the DP-optimality of a design\n# The logistic model with two predictors\n##########################################\n\n# The design points and corresponding weights are as follows:\n# Point1 Point2 Point3 Point4 Point5 Point6 Point7\n# 0.07410 -0.31953 -1.00000 1.00000 -1.00000 1.00000 0.30193\n# -1.00000 1.00000 -1.00000 1.00000 -0.08251 -1.00000 1.00000\n# Weight1 Weight2 Weight3 Weight4 Weight5 Weight6 Weight7\n# 0.020 0.275 0.224 0.131 0.092 0.156 0.103\n\n# It should be given to the function as two seperate vectors:\nx1 <- c(0.07409639, -0.3195265, -1, 1, -1, 1, 0.3019317, -1, 1, -1, 1, -0.08251169, -1, 1)\nw1 <- c(0.01992863, 0.2745394, 0.2236575, 0.1312331, 0.09161503, 0.1561454, 0.1028811)\n\n\np <- c(1, -2, 1, -1)\n\n## Not run: \n##D sensbayescomp(formula = ~exp(b0+b1*x1+b2*x2+b3*x1*x2)/(1+exp(b0+b1*x1+b2*x2+b3*x1*x2)),\n##D predvars = c(\"x1\", \"x2\"),\n##D parvars = c(\"b0\", \"b1\", \"b2\", \"b3\"),\n##D family = binomial(),\n##D x = x1, w = w1,\n##D lx = c(-1, -1), ux = c(1, 1),\n##D prior = uniform(p -1.5, p + 1.5),\n##D prob = ~1-1/(1+exp(b0 + b1 * x1 + b2 * x2 + b3 * x1 * x2)),\n##D alpha = .5, plot_3d = \"rgl\",\n##D sens.bayes.control = list(cubature = list(tol = 1e-3, maxEval = 1000)))\n## End(Not run)\n\n\n\n\n\n\n\n"} {"package":"ICAOD","topic":"senslocally","snippet":"### Name: senslocally\n### Title: Verifying Optimality of The Locally D-optimal Designs\n### Aliases: senslocally\n\n### ** Examples\n\n############################\n# Exponential growth model\n############################\n# Verifying optimailty of a locally D-optimal design\nsenslocally(formula = ~a + exp(-b*x),\n predvars = \"x\", parvars = c(\"a\", \"b\"),\n x = c(.1, 1), w = c(.5, .5),\n lx = 0, ux = 1, inipars = c(1, 10))\n\n\n##############################\n# A model with two predictors\n##############################\nx0 <- c(30, 3.861406, 30, 4.600633, 0, 0, 5.111376, 4.168798)\nw0 <- rep(.25, 4)\nsenslocally(formula = ~ V*S/(Km * (1 + I/Kic)+ S * (1 + I/Kiu)),\n predvars = c(\"S\", \"I\"),\n parvars = c(\"V\", \"Km\", \"Kic\", \"Kiu\"),\n x = x0, w = w0,\n lx = c(0, 0), ux = c(30, 60),\n inipars = c(1.5, 5.2, 3.4, 5.6))\n## Not run: \n##D # using package rgl for 3d plot:\n##D res<- senslocally(formula = ~ V*S/(Km * (1 + I/Kic)+ S * (1 + I/Kiu)),\n##D predvars = c(\"S\", \"I\"),\n##D parvars = c(\"V\", \"Km\", \"Kic\", \"Kiu\"),\n##D x = x0, w = w0,\n##D lx = c(0, 0), ux = c(30, 60),\n##D inipars = c(1.5, 5.2, 3.4, 5.6),\n##D plot_3d = \"rgl\")\n##D \n## End(Not run)\n\n###################################\n# user-defined optimality criterion\n##################################\n# When the model is defined by the formula interface\n# Checking the A-optimality for the 2PL model.\n# the criterion function must have argument x, w fimfunc and the parameters defined in 'parvars'.\n# use 'fimfunc' as a function of the design points x, design weights w and\n# the 'parvars' parameters whenever needed.\nAopt <-function(x, w, a, b, fimfunc){\n sum(diag(solve(fimfunc(x = x, w = w, a = a, b = b))))\n}\n## the sensitivtiy function\n# xi_x is a design that put all its mass on x in the definition of the sensitivity function\n# x is a vector of design points\nAopt_sens <- function(xi_x, x, w, a, b, fimfunc){\n fim <- fimfunc(x = x, w = w, a = a, b = b)\n M_inv <- solve(fim)\n M_x <- fimfunc(x = xi_x, w = 1, a = a, b = b)\n sum(diag(M_inv %*% M_x %*% M_inv)) - sum(diag(M_inv))\n}\n\nsenslocally(formula = ~1/(1 + exp(-b * (x-a))), predvars = \"x\",\n parvars = c(\"a\", \"b\"), family = \"binomial\",\n inipars = c(0, 1.5),\n crtfunc = Aopt,\n lx = -2, ux = 2,\n sensfunc = Aopt_sens,\n x = c(-1, 1), w = c(.5, .5))\n# not optimal\n\n\n"} {"package":"ICAOD","topic":"senslocallycomp","snippet":"### Name: senslocallycomp\n### Title: Verifying Optimality of The Locally DP-optimal Designs\n### Aliases: senslocallycomp\n\n### ** Examples\n\n\np <- c(1, -2, 1, -1)\nprior4.4 <- uniform(p -1.5, p + 1.5)\nformula4.4 <- ~exp(b0+b1*x1+b2*x2+b3*x1*x2)/(1+exp(b0+b1*x1+b2*x2+b3*x1*x2))\nprob4.4 <- ~1-1/(1+exp(b0 + b1 * x1 + b2 * x2 + b3 * x1 * x2))\npredvars4.4 <- c(\"x1\", \"x2\")\nparvars4.4 <- c(\"b0\", \"b1\", \"b2\", \"b3\")\nlb <- c(-1, -1)\nub <- c(1, 1)\n\n## That is the optimal design when alpha = .25, see ?locallycomp on how to find it\nxopt <- c(-1, -0.389, 1, 0.802, -1, 1, -1, 1)\nwopt <- c(0.198, 0.618, 0.084, 0.1)\n\n# We want to verfiy the optimality of the optimal design by the general equivalence theorem.\n\nsenslocallycomp(formula = formula4.4, predvars = predvars4.4, parvars = parvars4.4,\n family = binomial(), prob = prob4.4, lx = lb, ux = ub,\n alpha = .25, inipars = p, x = xopt, w = wopt)\n\n## Not run: \n##D # is this design also optimal when alpha = .3\n##D \n##D senslocallycomp(formula = formula4.4, predvars = predvars4.4, parvars = parvars4.4,\n##D family = binomial(), prob = prob4.4, lx = lb, ux = ub,\n##D alpha = .3, inipars = p, x = xopt, w = wopt)\n##D \n##D # when alpha = .3\n##D senslocallycomp(formula = formula4.4, predvars = predvars4.4, parvars = parvars4.4,\n##D family = binomial(), prob = prob4.4, lx = lb, ux = ub,\n##D alpha = .5, inipars = p, x = xopt, w = wopt)\n##D # when alpha = .8\n##D senslocallycomp(formula = formula4.4, predvars = predvars4.4, parvars = parvars4.4,\n##D family = binomial(), prob = prob4.4, lx = lb, ux = ub,\n##D alpha = .8, inipars = p, x = xopt, w = wopt)\n##D \n##D \n##D # when alpha = .9\n##D senslocallycomp(formula = formula4.4, predvars = predvars4.4, parvars = parvars4.4,\n##D family = binomial(), prob = prob4.4, lx = lb, ux = ub,\n##D alpha = .9, inipars = p, x = xopt, w = wopt)\n##D \n##D ## As can be seen, the design looses efficiency as alpha increases.\n## End(Not run)\n\n\n"} {"package":"ICAOD","topic":"sensminimax","snippet":"### Name: sensminimax\n### Title: Verifying Optimality of The Minimax and Standardized maximin\n### D-optimal Designs\n### Aliases: sensminimax\n\n### ** Examples\n\n##########################\n# Power logistic model\n##########################\n# verifying the minimax D-optimality of a design with points x0 and weights w0\nx0 <- c(-4.5515, 0.2130, 2.8075)\nw0 <- c(0.4100, 0.3723, 0.2177)\n# Power logistic model when s = .2\nsensminimax(formula = ~ (1/(1 + exp(-b * (x-a))))^.2,\n predvars = \"x\",\n parvars = c(\"a\", \"b\"),\n family = binomial(),\n x = x0, w = w0,\n lx = -5, ux = 5,\n lp = c(0, 1), up = c(3, 1.5))\n\n##############################\n# A model with two predictors\n##############################\n# Verifying the minimax D-optimality of a design for a model with two predictors\n# The model is the mixed inhibition model.\n# X0 is the vector of four design points that are:\n# (3.4614, 0) (4.2801, 3.1426) (30, 0) (30, 4.0373)\nx0 <- c(3.4614, 4.2801, 30, 30, 0, 3.1426, 0, 4.0373)\nw0 <- rep(1/4, 4)\nsensminimax(formula = ~ V*S/(Km * (1 + I/Kic)+ S * (1 + I/Kiu)),\n predvars = c(\"S\", \"I\"),\n parvars = c(\"V\", \"Km\", \"Kic\", \"Kiu\"),\n family = \"gaussian\",\n x = x0, w = w0,\n lx = c(0, 0), ux = c(30, 60),\n lp = c(1, 4, 2, 4), up = c(1, 5, 3, 5))\n\n##########################################\n# Standardized maximin D-optimal designs\n##########################################\n# Verifying the standardized maximin D-optimality of a design for\n# the loglinear model\n# First we should define the function for 'localdes' argument\n# The function LDOD takes the parameters and returns the points and\n# weights of the locally D-optimal design\nLDOD <- function(theta0, theta1, theta2){\n ## param is the vector of theta = (theta0, theta1, theta2)\n lx <- 0 # lower bound of the design space\n ux <- 150 # upper bound of the design space\n param <- c()\n param[1] <- theta0\n param[2] <- theta1\n param[3] <- theta2\n xstar <- (ux+param[3]) * (lx + param[3]) *\n (log(ux + param[3]) - log(lx + param[3]))/(ux - lx) - param[3]\n return(list(x = c(lx, xstar, ux) , w = rep(1/3, 3)))\n}\nx0 <- c(0, 4.2494, 17.0324, 149.9090)\nw0 <- c(0.3204, 0.1207, 0.2293, 0.3296)\n## Not run: \n##D sensminimax(formula = ~theta0 + theta1* log(x + theta2),\n##D predvars = c(\"x\"),\n##D parvars = c(\"theta0\", \"theta1\", \"theta2\"),\n##D x = x0, w = w0,\n##D lx = 0, ux = 150,\n##D lp = c(2, 2, 1), up = c(2, 2, 15),\n##D localdes = LDOD,\n##D standardized = TRUE,\n##D sens.minimax.control = list(n_seg = 10))\n## End(Not run)\n################################################################\n# Not necessary!\n# The rest of the examples here are only for professional uses.\n################################################################\n# Imagine you have written your own FIM, say in Rcpp that is faster than\n# the FIM created by the formula interface here.\n\n##########################\n# Power logistic model\n##########################\n# For example, th cpp FIM function for the power logistic model is named:\nFIM_power_logistic\nargs(FIM_power_logistic)\n# The arguments do not match the standard of the argument 'fimfunc'\n# in 'sensminimax'\n# So we reparameterize it:\nmyfim1 <- function(x, w, param)\n FIM_power_logistic(x = x, w = w, param =param, s = .2)\n\nargs(myfim1)\n## Not run: \n##D # Verify minimax D-optimality of a design\n##D sensminimax(fimfunc = myfim1,\n##D x = c(-4.5515, 0.2130, 2.8075),\n##D w = c(0.4100, 0.3723, 0.2177),\n##D lx = -5, ux = 5,\n##D lp = c(0, 1), up = c(3, 1.5))\n## End(Not run)\n##############################\n# A model with two predictors\n##############################\n# An example of a model with two-predictors: mixed inhibition model\n# Fisher information matrix:\nFIM_mixed_inhibition\nargs(FIM_mixed_inhibition)\n\n# We should first reparameterize the FIM to match the standard of the\n# argument 'fimfunc'\nmyfim2 <- function(x, w, param){\n npoint <- length(x)/2\n S <- x[1:npoint]\n I <- x[(npoint+1):(npoint*2)]\n out <- FIM_mixed_inhibition(S = S, I = I, w = w, param = param)\n return(out)\n}\nargs(myfim2)\n## Not run: \n##D # Verifyng minimax D-optimality of a design\n##D sensminimax(fimfunc = myfim2,\n##D x = c(3.4614, 4.2801, 30, 30, 0, 3.1426, 0, 4.0373),\n##D w = rep(1/4, 4),\n##D lx = c(0, 0), ux = c(30, 60),\n##D lp = c(1, 4, 2, 4), up = c(1, 5, 3, 5))\n## End(Not run)\n\n#########################################\n# Standardized maximin D-optimal designs\n#########################################\n# An example of a user-written FIM function:\nhelp(FIM_loglin)\n# An example of verfying standardaized maximin D-optimality for a design\n# Look how we re-define the function LDOD above\nLDOD2 <- function(param){\n ## param is the vector of theta = (theta0, theta1, theta2)\n lx <- 0 # lower bound of the design space\n ux <- 150 # upper bound of the design space\n xstar <- (ux + param[3]) * (lx + param[3]) *\n (log(ux + param[3]) - log(lx + param[3]))/(ux - lx) - param[3]\n return(list(x = c(lx, xstar, ux) , w = rep(1/3, 3)))\n}\n\nargs(LDOD2)\n\nsensminimax(fimfunc = FIM_loglin,\n x = x0,\n w = w0,\n lx = 0, ux = 150,\n lp = c(2, 2, 1), up = c(2, 2, 15),\n localdes = LDOD2,\n standardized = TRUE)\n\n\n\n###################################\n# user-defined optimality criterion\n##################################\n# When the model is defined by the formula interface\n# Checking the A-optimality for the 2PL model.\n# the criterion function must have argument x, w fimfunc and the parameters defined in 'parvars'.\n# use 'fimfunc' as a function of the design points x, design weights w and\n# the 'parvars' parameters whenever needed.\nAopt <-function(x, w, a, b, fimfunc){\n sum(diag(solve(fimfunc(x = x, w = w, a = a, b = b))))\n}\n## the sensitivtiy function\n# xi_x is a design that put all its mass on x in the definition of the sensitivity function\n# x is a vector of design points\nAopt_sens <- function(xi_x, x, w, a, b, fimfunc){\n fim <- fimfunc(x = x, w = w, a = a, b = b)\n M_inv <- solve(fim)\n M_x <- fimfunc(x = xi_x, w = 1, a = a, b = b)\n sum(diag(M_inv %*% M_x %*% M_inv)) - sum(diag(M_inv))\n}\n\nsensminimax(formula = ~1/(1 + exp(-b * (x-a))), predvars = \"x\",\n parvars = c(\"a\", \"b\"), family = \"binomial\",\n lp = c(-2, 1), up = c(2, 1.5),\n crtfunc = Aopt,\n lx = -2, ux = 2,\n sensfunc = Aopt_sens,\n x = c(-2, .0033, 2), w = c(.274, .452, .274))\n\n\n\n"} {"package":"ICAOD","topic":"sensmultiple","snippet":"### Name: sensmultiple\n### Title: Verifying Optimality of The Multiple Objective Designs for The\n### 4-Parameter Hill Model\n### Aliases: sensmultiple\n\n### ** Examples\n\n#################################################################\n# Verifying optimality of a design for the 4-parameter Hill model\n#################################################################\n\n## initial estiamtes for the parameters of the Hill model\na <- 0.008949 # ED50\nb <- -1.79 # Hill constant\nc <- 0.137 # lower limit\nd <- 1.7 # upper limit\n# D belongs to c(.001, 1000) ## dose in mg\n## Hill parameters are c(a, b, c, d)\n# dose, minDose and maxDose vector in mg scale\n\nsensmultiple (dose = c(0.001, 0.009426562, 0.01973041, 999.9974),\n w = c(0.4806477, 0.40815, 0.06114173, 0.05006055),\n minDose = .001, maxDose = 1000,\n Hill_par = TRUE,\n inipars = c(a, b, c, d),\n lambda = c(0.05, 0.05, .90),\n delta = -1)\n\n\n\n\n\n## Don't show: \n\n## examples fof using this function for c-optimal designs\n# first row second column: c-optimal design for estimating ED50 of the 4-parameter logistic model\nsensmultiple (dose = c(log(.001), -4.80, log(1000)),\n w = c(.276, .500, .224),\n Hill_par = FALSE,\n minDose = log(.001), maxDose = log(1000),\n inipars = c(d - c, -b, b * log(a), c),\n lambda = c(0, 1, 0),\n delta = -1)\n## criterion value is 1e+24 which will be returned when variance for estimating ED50 is comutationaly negative!\n## if we change the tolerance for finding Moore-Penrose Matrix Inverse to .Machine$double.eps\n# when get 2.201179 for the criterion value\n\n\nsensmultiple (dose = c(-6.907755, -4.664224, -3.925594, 6.907753 ),\n w = c(0.4806477, 0.40815, 0.06114173, 0.05006055 ),\n minDose = log(.001), maxDose = log(1000),\n Hill_par = FALSE,\n inipars = c(d - c, -b, b * log(a), c),\n lambda = c(0.05, 0.05, .90),\n delta = -1)\n## End(Don't show)\n\n\n"} {"package":"ICAOD","topic":"sensrobust","snippet":"### Name: sensrobust\n### Title: Verifying Optimality of The Robust Designs\n### Aliases: sensrobust\n\n### ** Examples\n\n# Verifying a robust design for the two-parameter logistic model\nsensrobust(formula = ~1/(1 + exp(-b *(x - a))),\n predvars = c(\"x\"),\n parvars = c(\"a\", \"b\"),\n family = binomial(),\n prob = rep(1/4, 4),\n parset = matrix(c(0.5, 1.5, 0.5, 1.5, 4.0, 4.0, 5.0, 5.0), 4, 2),\n x = c(0.260, 1, 1.739), w = c(0.275, 0.449, 0.275),\n lx = -5, ux = 5)\n\n\n###################################\n# user-defined optimality criterion\n##################################\n# When the model is defined by the formula interface\n# Checking the A-optimality for the 2PL model.\n# the criterion function must have argument x, w fimfunc and the parameters defined in 'parvars'.\n# use 'fimfunc' as a function of the design points x, design weights w and\n# the 'parvars' parameters whenever needed.\nAopt <-function(x, w, a, b, fimfunc){\n sum(diag(solve(fimfunc(x = x, w = w, a = a, b = b))))\n}\n## the sensitivtiy function\n# xi_x is a design that put all its mass on x in the definition of the sensitivity function\n# x is a vector of design points\nAopt_sens <- function(xi_x, x, w, a, b, fimfunc){\n fim <- fimfunc(x = x, w = w, a = a, b = b)\n M_inv <- solve(fim)\n M_x <- fimfunc(x = xi_x, w = 1, a = a, b = b)\n sum(diag(M_inv %*% M_x %*% M_inv)) - sum(diag(M_inv))\n}\n\nsensrobust(formula = ~1/(1 + exp(-b * (x-a))), predvars = \"x\",\n parvars = c(\"a\", \"b\"), family = \"binomial\",\n crtfunc = Aopt,\n sensfunc = Aopt_sens,\n lx = -3, ux = 3,\n prob = c(.25, .5, .25),\n parset = matrix(c(-2, 0, 2, 1.25, 1.25, 1.25), 3, 2),\n x = c(-2.469, 0, 2.469), w = c(.317, .365, .317))\n# not optimal. the optimal design has four points. see the last example in ?robust\n\n\n"} {"package":"ICAOD","topic":"skewnormal","snippet":"### Name: skewnormal\n### Title: Assumes A Multivariate Skewed Normal Prior Distribution for The\n### Model Parameters\n### Aliases: skewnormal\n\n### ** Examples\n\nskewnormal(xi = c(0, 1),\n Omega = matrix(c(1, -0.17, -0.17, .5), nrow = 2),\n alpha = c(1, 0), lower = c(-3, .1), upper = c(3, 2))\n\n\n"} {"package":"ICAOD","topic":"student","snippet":"### Name: student\n### Title: Multivariate Student's t Prior Distribution for Model Parameters\n### Aliases: student\n\n### ** Examples\n\nskewnormal(xi = c(0, 1),\n Omega = matrix(c(1, -0.17, -0.17, .5), nrow = 2),\n alpha = c(1, 0), lower = c(-3, .1), upper = c(3, 2))\n\n\n"} {"package":"ICAOD","topic":"uniform","snippet":"### Name: uniform\n### Title: Assume A Multivariate Uniform Prior Distribution for The Model\n### Parameters\n### Aliases: uniform\n\n### ** Examples\n\nuniform(lower = c(-3, .1), upper = c(3, 2))\n\n\n"} {"package":"higrad","topic":"higrad","snippet":"### Name: higrad\n### Title: Fitting HiGrad\n### Aliases: higrad\n\n### ** Examples\n\n# fitting linear regression on a simulated dataset\nn <- 1e3\nd <- 10\nsigma <- 0.1\ntheta <- rep(1, d)\nx <- matrix(rnorm(n * d), n, d)\ny <- as.numeric(x %*% theta + rnorm(n, 0, sigma))\nfit <- higrad(x, y, model = \"lm\")\nprint(fit)\n# predict for 10 new samples\nnewx <- matrix(rnorm(10 * d), 10, d)\npred <- predict(fit, newx)\npred\n\n\n\n"} {"package":"ktsolve","topic":"ktsolve","snippet":"### Name: ktsolve\n### Title: Configurable Function for Solving Families of Nonlinear\n### Equations Version: 1.3\n### Aliases: ktsolve\n\n### ** Examples\n\nzfunc<-function(x) {\n\tz<-vector()\nz[1]<- 4*var1 -3*var2 +5*var3\nz[2]<-8*var1 +5*var2 -2*var3\nz\n}\n\n known=list(var2=5)\n guess=list(var1=2,var3=0)\n solv1 <- ktsolve(zfunc,known=known,guess=guess)\n# Successful convergence.\n# solution is:\n# var1 var3 \n# -1.979167 4.583333 \n# \"known\" inputs were:\n# var2\n# known 5 \n eval(solv1$yfunc)(solv1$results$par)\n\n \n known=list(var1=5)\n guess=list(var2=2,var3=0)\n solv2<- ktsolve(zfunc,known=known,guess=guess)\n# Successful convergence.\n# solution is:\n# var2 var3 \n# -12.63158 -11.57895 \n# \"known\" inputs were:\n# var1\n# known 5 \neval(solv2$yfunc)(solv2$results$par)\n \n\n"} {"package":"rabhit","topic":"createFullHaplotype","snippet":"### Name: createFullHaplotype\n### Title: Anchor gene haplotype inference\n### Aliases: createFullHaplotype\n\n### ** Examples\n\n# Load example data and germlines\ndata(samples_db, HVGERM, HDGERM)\n\n# Selecting a single individual\nclip_db = samples_db[samples_db$subject=='I5', ]\n\n# Infering haplotype\nhaplo_db = createFullHaplotype(clip_db,toHap_col=c('v_call','d_call'),\nhapBy_col='j_call',hapBy='IGHJ6',toHap_GERM=c(HVGERM,HDGERM))\n\n\n\n\n"} {"package":"rabhit","topic":"deletionHeatmap","snippet":"### Name: deletionHeatmap\n### Title: Graphical output of single chromosome deletions\n### Aliases: deletionHeatmap\n\n### ** Examples\n\n# Plotting single choromosme deletion from haplotype inference\ndeletionHeatmap(samplesHaplotype)\n\n\n"} {"package":"rabhit","topic":"deletionsByBinom","snippet":"### Name: deletionsByBinom\n### Title: Double chromosome deletion by relative gene usage\n### Aliases: deletionsByBinom\n\n### ** Examples\n\n# Load example data and germlines\ndata(samples_db)\n\n# Selecting a single individual\nclip_db = samples_db[samples_db$subject=='I5', ]\n# Infering haplotype\ndel_binom_df = deletionsByBinom(clip_db)\nhead(del_binom_df)\n\n\n\n"} {"package":"rabhit","topic":"deletionsByVpooled","snippet":"### Name: deletionsByVpooled\n### Title: Single chromosomal D or J gene deletions inferred by the V\n### pooled method\n### Aliases: deletionsByVpooled\n\n### ** Examples\n\n## No test: \ndata(samples_db)\n\n# Infering V pooled deletions\ndel_db <- deletionsByVpooled(samples_db)\nhead(del_db)\n## End(No test)\n\n\n"} {"package":"rabhit","topic":"hapDendo","snippet":"### Name: hapDendo\n### Title: Hierarchical clustering of haplotypes graphical output\n### Aliases: hapDendo\n\n### ** Examples\n\n# Plotting haplotype hierarchical clustering based on the Jaccard distance\n## No test: \nhapDendo(samplesHaplotype)\n## End(No test)\n\n\n\n"} {"package":"rabhit","topic":"hapHeatmap","snippet":"### Name: hapHeatmap\n### Title: Graphical output of alleles division by chromosome\n### Aliases: hapHeatmap\n\n### ** Examples\n\n# Plotting haplotpe heatmap\np <- hapHeatmap(samplesHaplotype)\np$p\n\n\n"} {"package":"rabhit","topic":"nonReliableVGenes","snippet":"### Name: nonReliableVGenes\n### Title: Detect non reliable gene assignment\n### Aliases: nonReliableVGenes\n\n### ** Examples\n\n# Example IGHV call data frame\nclip_db <- data.frame(subject=rep('S1',6),\nv_call=c('IGHV1-69*01','IGHV1-69*01','IGHV1-69*01,IGHV1-69*02',\n'IGHV4-59*01,IGHV4-61*01','IGHV4-59*01,IGHV4-31*02','IGHV4-59*01'))\n# Detect non reliable genes\nnonReliableVGenes(clip_db)\n\n\n"} {"package":"rabhit","topic":"plotDeletionsByBinom","snippet":"### Name: plotDeletionsByBinom\n### Title: Graphical output of double chromosome deletions\n### Aliases: plotDeletionsByBinom\n\n### ** Examples\n\n\n# Load example data and germlines\ndata(samples_db)\n\n# Infering haplotype\ndeletions_db = deletionsByBinom(samples_db);\nplotDeletionsByBinom(deletions_db)\n\n\n\n"} {"package":"rabhit","topic":"plotDeletionsByVpooled","snippet":"### Name: plotDeletionsByVpooled\n### Title: Graphical output for single chromosome D or J gene deletions\n### according to V pooled method\n### Aliases: plotDeletionsByVpooled\n\n### ** Examples\n\n## No test: \n# Load example data and germlines\ndata(samples_db)\ndel_db <- deletionsByVpooled(samples_db)\nplotDeletionsByVpooled(del_db)\n## End(No test)\n\n\n"} {"package":"rabhit","topic":"plotHaplotype","snippet":"### Name: plotHaplotype\n### Title: Graphical output of an inferred haplotype\n### Aliases: plotHaplotype\n\n### ** Examples\n\n\n# Selecting a single individual from the haplotype samples data\nhaplo_db = samplesHaplotype[samplesHaplotype$subject=='I5', ]\n\n# plot haplotype\nplotHaplotype(haplo_db)\n\n\n\n"} {"package":"gJLS2","topic":"gJLS2","snippet":"### Name: gJLS2\n### Title: A Generalized Joint-Location-Scale (gJLS) Test\n### Aliases: gJLS2\n\n### ** Examples\n\nN <- 1000\ngenDAT <- rbinom(N, 2, 0.3)\nsex <- rbinom(N, 1, 0.5)+1\ny <- rnorm(N)\ncovar <- matrix(rnorm(N*10), ncol=10)\n\ngJLS2(GENO=data.frame(\"SNP1\" = genDAT, \"aSNP1\" = genDAT), SEX=sex, Y=y, COVAR=covar)\n\ngJLS2(GENO=genDAT, SEX=sex, Y=y, COVAR=covar , Xchr=TRUE)\n\n\n\n\n"} {"package":"gJLS2","topic":"gJLS2s","snippet":"### Name: gJLS2s\n### Title: generalized Joint-Location-Scale (gJLS) test with summary\n### statistics\n### Aliases: gJLS2s\n\n### ** Examples\n\ngL <- data.frame(\"SNP\" = paste(\"rs\", 1:100, sep=\"\"), \"gL\"=runif(100))\ngS <- runif(100)\n\ngJLS2s(gL = gL, gS=gS)\n\n\n\n\n"} {"package":"gJLS2","topic":"leveneRegA_per_SNP","snippet":"### Name: leveneRegA_per_SNP\n### Title: The generalized Levene's test via a two-stage regression for\n### variance homogeneity by SNP genotype (autosomes)\n### Aliases: leveneRegA_per_SNP\n\n### ** Examples\n\nN <- 100\ngenDAT <- rbinom(N, 2, 0.3)\nY <- rnorm(N)\ncovar <- matrix(rnorm(N*10), ncol=10)\n\n# vanilla example:\nleveneRegA_per_SNP(geno_one=genDAT, Y=Y, COVAR=covar)\n\n# relatedness samples:\nleveneRegA_per_SNP(geno_one=genDAT, Y=Y, COVAR=covar,\nrelated=TRUE)\nleveneRegA_per_SNP(geno_one=genDAT, Y=Y, COVAR=covar,\nrelated=TRUE, clust = factor(rbinom(N, 2, 0.6)))\n\n\n# dosage genotypes example:\nlibrary(\"MCMCpack\")\na <- 0.3\ngeno <- rbinom(N, 2, 0.3)\na <- 0.3 ## uncertainty\ngenPP <- rbind(rdirichlet(sum(geno==0),c(a,(1-a)/2,(1-a)/2)),\n rdirichlet(sum(geno==1),c((1-a)/2,a,(1-a)/2)),\n rdirichlet(sum(geno==2),c((1-a)/2,(1-a)/2,a)))\n\nleveneRegA_per_SNP(geno_one=genPP, Y=Y, COVAR=covar)\nleveneRegA_per_SNP(geno_one=genPP, Y=Y, COVAR=covar,\ngenotypic=TRUE)\n\n# dosage and related samples:\nleveneRegA_per_SNP(geno_one=genPP, Y=Y, COVAR=covar,\nrelated=TRUE, clust = factor(rbinom(N, 1, 0.3)))\nleveneRegA_per_SNP(geno_one=genPP, Y=Y, COVAR=covar,\nrelated=TRUE, clust = factor(rbinom(N, 1, 0.3)), genotypic=TRUE)\n\n\n\n\n\n"} {"package":"gJLS2","topic":"leveneRegX_per_SNP","snippet":"### Name: leveneRegX_per_SNP\n### Title: Levene's regression tests for variance homogeneity by SNP\n### genotype (X-chromosome specific)\n### Aliases: leveneRegX_per_SNP\n\n### ** Examples\n\nN <- 1000\nsex <- rbinom(N, 1, 0.5)+1\nY <- rnorm(N)\ngenDAT <- NA\ngenDAT[sex==2] <- rbinom(sum(sex==2), 2, 0.3)\ntable(genDAT, sex)\ngenDAT[sex==1] <- rbinom(sum(sex==1), 1, 0.3)\ntable(genDAT, sex)\n\nleveneRegX_per_SNP(geno_one=genDAT, SEX=sex, Y=Y)\nleveneRegX_per_SNP(geno_one=genDAT, SEX=sex, Y=Y, genotypic=TRUE)\nleveneRegX_per_SNP(geno_one=genDAT, SEX=sex, Y=Y, loc_alg=\"OLS\")\n\n\n\n"} {"package":"gJLS2","topic":"leveneTests_per_SNP","snippet":"### Name: leveneTests_per_SNP\n### Title: Levene's test for variance homogeneity by SNP genotypes\n### (sex-specific p-values)\n### Aliases: leveneTests_per_SNP\n\n### ** Examples\n\nN <- 5000\nsex <- rbinom(N, 1, 0.5)+1\ngenDAT <- rbinom(N, 2, 0.3)\ny <- rnorm(N);\n\ngenDAT[sex==2] <- rbinom(sum(sex==2), 1, 0.3)\ntable(genDAT, sex)\nleveneTests_per_SNP(geno_one=genDAT, SEX=sex, Y=y^2, transform=TRUE)\n\ngenDAT[sex==2] <- rbinom(sum(sex==2), 1, 0.01)\ntable(genDAT, sex)\nleveneTests_per_SNP(geno_one=genDAT, SEX=sex, Y=y^2, transform=FALSE)\n\nleveneTests_per_SNP(geno_one=rep(0, N), SEX=sex, Y=y^2, transform=TRUE)\nleveneTests_per_SNP(geno_one=rep(0, N), Y=y^2, transform=TRUE)\n\n\n\n\n"} {"package":"gJLS2","topic":"locReg","snippet":"### Name: locReg\n### Title: Location (mean-based association) test\n### Aliases: locReg\n\n### ** Examples\n\nN <- 100\ngenDAT <- rbinom(N, 2, 0.3)\nsex <- rbinom(N, 1, 0.5)+1\ny <- rnorm(N)\nCOVAR <- matrix(rnorm(N*10), ncol=10)\n\nlocReg(GENO=genDAT, SEX=sex, Y=y, COVAR=COVAR)\n\n# correlated example:\nlibrary(\"MASS\")\nyy <- mvrnorm(1, mu= rep(0, N), Sigma = matrix(0.3, N, N) + diag(0.7, N))\nlocReg(GENO=list(\"SNP1\"= genDAT, \"SNP2\" = genDAT[sample(1:100)]),\nSEX=sex, Y=as.numeric(yy), COVAR=COVAR, related = TRUE,\nclust = rep(1, 100))\n\n# sibpair example:\npairedY <- mvrnorm(N/2,rep(0,2),matrix(c(1,0.2,0.2,1), 2))\nyy <- c(pairedY[,1], pairedY[,2])\nlocReg(GENO=list(\"SNP1\"= genDAT, \"SNP2\" = genDAT[sample(1:100)]),\nSEX=sex, Y=as.numeric(yy), COVAR=COVAR, related = TRUE,\nclust = rep(c(1:50), 2))\n\n# Xchr data example:\ngenDAT1 <- rep(NA, N)\ngenDAT1[sex==1] <- rbinom(sum(sex==1), 1, 0.5)\ngenDAT1[sex==2] <-rbinom(sum(sex==2), 2, 0.5)\nlocReg(GENO=genDAT1, SEX=sex, Y=y, COVAR=COVAR, Xchr=TRUE)\n\n\n\n\n"} {"package":"gJLS2","topic":"scaleReg","snippet":"### Name: scaleReg\n### Title: Scale (variance-based association) test\n### Aliases: scaleReg\n\n### ** Examples\n\nN <- 1000\ngenoDAT <- rbinom(N, 2, 0.3)\nsex <- rbinom(N, 1, 0.5)+1\nY <- rnorm(N)\ncovar <- matrix(rnorm(N*10), ncol=10)\n\n# vanilla example:\nscaleReg(GENO=list(genoDAT, genoDAT), Y=Y, COVAR=covar)\nscaleReg(GENO=list(genoDAT, genoDAT), Y=Y, COVAR=covar, genotypic=TRUE)\nscaleReg(GENO=list(genoDAT, genoDAT), Y=Y, COVAR=covar, origLev = TRUE)\nscaleReg(GENO=list(genoDAT, genoDAT), Y=Y, COVAR=covar, origLev = TRUE, SEX=sex)\n\n\n\n"} {"package":"swipeR","topic":"swipeR","snippet":"### Name: swipeR\n### Title: HTML widget displaying a carousel\n### Aliases: swipeR\n\n### ** Examples\n\nlibrary(swipeR)\nlibrary(htmltools)\n\nwrapper <- swipeRwrapper(\n tags$img(src = \"https://swiperjs.com/demos/images/nature-1.jpg\"),\n tags$img(src = \"https://swiperjs.com/demos/images/nature-2.jpg\"),\n tags$img(src = \"https://swiperjs.com/demos/images/nature-3.jpg\"),\n tags$img(src = \"https://swiperjs.com/demos/images/nature-4.jpg\"),\n tags$img(src = \"https://swiperjs.com/demos/images/nature-5.jpg\"),\n tags$img(src = \"https://swiperjs.com/demos/images/nature-6.jpg\"),\n tags$img(src = \"https://swiperjs.com/demos/images/nature-7.jpg\"),\n tags$img(src = \"https://swiperjs.com/demos/images/nature-8.jpg\")\n)\n\nswipeR(\n wrapper, height = \"400px\", width = \"70%\", thumbs = TRUE, keyboard = TRUE,\n on = list(reachEnd = htmlwidgets::JS(\"function() {alert('the end');}\"))\n)\n\n# Shiny example ####\nlibrary(swipeR)\nlibrary(shiny)\nlibrary(ggplot2)\n\nwrapper <- swipeRwrapper(\n div(\n plotOutput(\"ggplot1\", width = \"500px\", height = \"400px\"),\n align = \"center\"\n ),\n div(\n plotOutput(\"ggplot2\", width = \"500px\", height = \"400px\"),\n align = \"center\"\n ),\n div(\n plotOutput(\"ggplot3\", width = \"500px\", height = \"400px\"),\n align = \"center\"\n ),\n div(\n plotOutput(\"ggplot4\", width = \"500px\", height = \"400px\"),\n align = \"center\"\n )\n)\n\nui <- fluidPage(\n tags$head(\n tags$style(HTML(\n \".shiny-plot-output {border: 2px solid royalblue;}\"\n ))\n ),\n br(),\n fluidRow(\n column(\n 12,\n swipeR(\n wrapper, height = \"450px\", width = \"80%\", effect = \"cube\", speed = 2000,\n navigationColor = \"black\", rewind = TRUE, id = \"CAROUSEL\"\n )\n ),\n column(\n 12,\n br(), br(), br(),\n ),\n column(\n 3, align = \"center\",\n actionButton(\n \"btn1\", \"Scatter plot\", class = \"btn-primary\",\n onclick = \"document.getElementById('CAROUSEL').swiper.slideTo(0);\"\n )\n ),\n column(\n 3, align = \"center\",\n actionButton(\n \"btn2\", \"Line chart\", class = \"btn-primary\",\n onclick = \"document.getElementById('CAROUSEL').swiper.slideTo(1);\"\n )\n ),\n column(\n 3, align = \"center\",\n actionButton(\n \"btn3\", \"Bar chart\", class = \"btn-primary\",\n onclick = \"document.getElementById('CAROUSEL').swiper.slideTo(2);\"\n )\n ),\n column(\n 3, align = \"center\",\n actionButton(\n \"btn4\", \"Boxplots\", class = \"btn-primary\",\n onclick = \"document.getElementById('CAROUSEL').swiper.slideTo(3);\"\n )\n )\n )\n)\n\nserver <- function(input, output, session) {\n output[[\"ggplot1\"]] <- renderPlot({\n ggplot(mtcars, aes(wt, mpg)) + geom_point() +\n theme(panel.border = element_rect(fill = NA, color = \"firebrick\"))\n }, width = 500, height = 400)\n output[[\"ggplot2\"]] <- renderPlot({\n ggplot(economics, aes(date, unemploy)) + geom_line()\n }, width = 500, height = 400)\n output[[\"ggplot3\"]] <- renderPlot({\n ggplot(mpg, aes(class)) + geom_bar()\n }, width = 500, height = 400)\n output[[\"ggplot4\"]] <- renderPlot({\n ggplot(mpg, aes(class, hwy)) + geom_boxplot()\n }, width = 500, height = 400)\n}\n\nif(interactive()) shinyApp(ui, server)\n\n\n# other Shiny example ####\nlibrary(swipeR)\nlibrary(shiny)\nlibrary(shinyWidgets)\nlibrary(ggplot2)\nlibrary(ggthemes)\n\nwrapper <- swipeRwrapper(\n div(\n fluidRow(\n column(\n 6,\n awesomeRadio(\n \"theme\", \"Choose a theme\",\n c(\n \"Calc\",\n \"Clean\",\n \"Economist\",\n \"Excel\",\n \"FiveThirtyEight\",\n \"Foundation\",\n \"Google Docs\",\n \"Highcharts\",\n \"Pander\",\n \"Solarized\",\n \"Stata\",\n \"Wall Street\"\n )\n )\n ),\n column(\n 6,\n tags$p(\"The Shiny slider does not work here...\"),\n tags$label(\"Base font size\"),\n tags$input(\n type = \"range\", min = \"10\", max = \"20\", value = \"12\",\n oninput =\n \"this.nextElementSibling.value = this.value;\n Shiny.setInputValue('slider', this.value);\"\n ),\n tags$output(\"12\", style = \"font-weight: bold; color: blue\"),\n br(), hr(), br(),\n materialSwitch(\"facets\", \"Facets?\", status = \"info\"),\n conditionalPanel(\n condition = \"input.facets\",\n awesomeRadio(\n \"direction\", label = NULL, status = \"info\",\n choices = c(\"by row\" = \"row\", \"by column\" = \"column\"),\n )\n ),\n br(), hr(), br(),\n actionButton(\n \"btn\", \"Add slide\", class = \"btn-primary btn-block\",\n onclick = \"document.getElementById('SWIPER').swiper.appendSlide(\n '
<\/div>');\n Shiny.setInputValue('newslide', true, {priority: 'event'});\"\n )\n )\n ),\n style = \"margin-left: 10%; margin-right: 10%; font-size: 2rem;\"\n ),\n div(\n plotOutput(\"ggplot\", width = \"85%\", height = \"400px\"),\n align = \"center\"\n )\n)\n\nui <- fluidPage(\n tags$head(\n tags$style(HTML(\n \".shiny-plot-output {\n border: 2px solid royalblue;\n }\n .shiny-text-output {\n font-size: 30px;\n font-style: italic;\n }\n .recalculating {\n display: none; /* otherwise there's a flash */\n }\n .rlogo {\n width: 100%;\n height: 100%;\n background-image: url(https://www.r-project.org/logo/Rlogo.png);\n background-repeat: no-repeat;\n background-size: contain;\n background-position: center;\n }\"\n ))\n ),\n br(), br(), br(),\n fluidRow(\n column(\n 12,\n swipeR(\n wrapper, id = \"SWIPER\", effect = \"flip\", rewind = TRUE,\n height = \"450px\", width = \"90%\",\n navigationColor = \"black\", paginationColor = \"black\",\n on = list(\n afterInit = htmlwidgets::JS(\n \"function(swiper) {\n setTimeout(function(){ Shiny.setInputValue('index', 1); }, 0);\n }\"\n ),\n slideChange = htmlwidgets::JS(\n \"function(swiper) {\n Shiny.setInputValue('index', swiper.activeIndex + 1);\n }\"\n )\n )\n )\n ),\n column(\n 12,\n textOutput(\"slideIndex\")\n )\n )\n)\n\nserver <- function(input, output, session) {\n\n ggtheme <- reactive({\n size <- input[[\"slider\"]]\n size <- if(is.null(size)) 12 else as.integer(size)\n switch(\n input[[\"theme\"]],\n \"Calc\" = theme_calc(base_size = size),\n \"Clean\" = theme_clean(base_size = size),\n \"Economist\" = theme_economist(base_size = size),\n \"Excel\" = theme_excel_new(base_size = size),\n \"FiveThirtyEight\" = theme_fivethirtyeight(base_size = size),\n \"Foundation\" = theme_foundation(base_size = size),\n \"Google Docs\" = theme_gdocs(base_size = size),\n \"Highcharts\" = theme_hc(base_size = size),\n \"Pander\" = theme_pander(base_size = size),\n \"Solarized\" = theme_solarized(base_size = size),\n \"Stata\" = theme_stata(base_size = size),\n \"Wall Street\" = theme_wsj(base_size = size)\n )\n })\n\n output[[\"ggplot\"]] <- renderPlot({\n gg <- ggplot(iris, aes(x = Sepal.Length, y = Petal.Length, color = Species)) +\n geom_point(size = 6) + ggtheme()\n if(input[[\"facets\"]]) {\n if(input[[\"direction\"]] == \"row\") {\n gg <- gg + facet_grid(rows = vars(Species))\n } else {\n gg <- gg + facet_grid(cols = vars(Species))\n }\n }\n gg\n })\n\n nSlides <- reactiveVal(2)\n observeEvent(input[[\"newslide\"]], {\n nSlides(nSlides() + 1)\n })\n\n output[[\"slideIndex\"]] <- renderText({\n paste0(input[[\"index\"]], \"/\", nSlides())\n })\n\n}\n\nif(interactive()) shinyApp(ui, server)\n\n\n"} {"package":"ggsolvencyii","topic":"geom_sii_riskconnection","snippet":"### Name: geom_sii_riskconnection\n### Title: geom_sii_riskconnection\n### Aliases: geom_sii_riskconnection\n\n### ** Examples\n\nlibrary(ggsolvencyii)\nlibrary(ggplot2)\n\nsii_z_ex3_data[sii_z_ex3_data$description == \"SCR\", ]\n\nggplot() + geom_sii_riskconnection(data = sii_z_ex3_data, mapping = aes(\n comparewithid = comparewithid,\n x = time,\n y = ratio,\n id = id,\n ),\ncolor = \"red\",\nlwd = 0.7,\narrow = arrow()\n)\n\n\n"} {"package":"ggsolvencyii","topic":"geom_sii_riskoutline","snippet":"### Name: geom_sii_riskoutline\n### Title: geom_sii_riskoutline\n### Aliases: geom_sii_riskoutline\n\n### ** Examples\n\nlibrary(ggsolvencyii)\nlibrary(ggplot2)\n\n## see details about id and comparewithid\n# sii_z_ex3_data[sii_z_ex3_data$description == \"SCR\", ]\n\nggplot()+\ngeom_sii_riskoutline(data = sii_z_ex3_data, mapping = aes(\n # comparewithid = comparewithid,\n x = time,\n y = ratio,\n value = value,\n id = id,\n description = description),\ncolor = \"red\",\nlwd = 0.7\n)\n\n##and with comparewithid in aes()\n\nggplot()+\ngeom_sii_riskoutline(data = sii_z_ex3_data, mapping = aes(\n comparewithid = comparewithid,\n x = time,\n y = ratio,\n value = value,\n id = id,\n description = description),\ncolor = \"red\",\nlwd = 0.7\n)\n\n\n\n\n"} {"package":"ggsolvencyii","topic":"geom_sii_risksurface","snippet":"### Name: geom_sii_risksurface\n### Title: geom_sii_risksurface\n### Aliases: geom_sii_risksurface\n\n### ** Examples\n\n## dataset human readable\nlibrary(ggsolvencyii)\nlibrary(ggplot2)\nt <- tidyr::spread(data = sii_z_ex1_data, key = description, value = value)\nt <- as.data.frame(t)\nt <- t[order(t$id),]\nt <- dplyr::select( t, id, time, comparewithid, ratio, SCR, dplyr::everything())\nt[1:3 ,1:8]\n\nggplot() +\ngeom_sii_risksurface(\n data = sii_z_ex1_data[sii_z_ex1_data$id == 1, ],\nmapping = aes(x = time,\n y = ratio,\n id = id,\n value = value,\n description = description,\n color = description,\n fill = description\n ) ) +\ntheme_bw() +\nscale_fill_manual(name = \"Risks\",values = sii_x_fillcolors_sf16_eng) +\nscale_color_manual(name = \"Risks\",values = sii_x_edgecolors_sf16_eng)\n\nggplot() +\n geom_sii_risksurface(\n data = sii_z_ex2_data,\n mapping = aes(x = time, y = ratio, id = id, value = value,\n description = description,\n # color = description,\n fill = description\n ),\n color = \"black\",\n levelmax = sii_levelmax_sf16_993) +\ntheme_bw() +\nscale_fill_manual(name = \"Risks\",values = sii_x_fillcolors_sf16_eng) # +\n# scale_color_manual(name = \"Risks\",values = sii_x_edgecolors_sf16_eng)\n\n\n\nggplot() +\n geom_sii_risksurface(data = sii_z_ex1_data[sii_z_ex1_data$id == 1, ],\n mapping = ggplot2::aes(x = time,\n y = ratio,\n ## x and y could for example be\n ## longitude and latitude\n ## in combination with plotted map\n value = value,\n id = id,\n description = description,\n fill = description, ## optional\n color = description ## optional\n ),\n ## all parameters are shown here,\n ## the values behind the outcommented are the default values\n ## how and what\n ## structure = sii_structure_sf16_eng,\n ## plotdetails = NULL,\n ## grouping\n # levelmax = 99,\n # aggregatesuffix = \"other\",\n ## scaling\n # maxscrvalue = NULL,\n # scalingx = 1,\n # scalingy = 1,\n ## rotation and squared\n # rotationdegrees = NULL,\n # rotationdescription = NULL,\n # squared = FALSE,\n ## cosmetic\n lwd = 0.25,\n # alpha = 1\n ) +\n theme_bw() +\n scale_fill_manual(name = \"risks\", values = sii_z_ex1_fillcolors) +\n scale_color_manual(name = \"risks\", values = sii_z_ex1_edgecolors)\n\n\n"} {"package":"ggsolvencyii","topic":"sii_levelmax_sf16_993","snippet":"### Name: sii_levelmax_sf16_993\n### Title: sii_levelmax_sf16_993\n### Aliases: sii_levelmax_sf16_993\n### Keywords: datasets\n\n### ** Examples\n\ninstalledtable <- sii_levelmax_sf16_993\ninstalledtable\n\n\n"} {"package":"ggsolvencyii","topic":"sii_levelmax_sf16_995","snippet":"### Name: sii_levelmax_sf16_995\n### Title: sii_levelmax_sf16_995\n### Aliases: sii_levelmax_sf16_995\n### Keywords: datasets\n\n### ** Examples\n\ninstalledtable <- sii_levelmax_sf16_995\ninstalledtable\n\n\n"} {"package":"ggsolvencyii","topic":"sii_plotdetails_sf16","snippet":"### Name: sii_plotdetails_sf16\n### Title: sii_plotdetails_sf16\n### Aliases: sii_plotdetails_sf16\n### Keywords: datasets\n\n### ** Examples\n\ninstalledtable <- sii_plotdetails_sf16\ninstalledtable\n\n\n"} {"package":"ggsolvencyii","topic":"sii_structure_sf16_eng","snippet":"### Name: sii_structure_sf16_eng\n### Title: sii_structure_sf16_eng\n### Aliases: sii_structure_sf16_eng\n### Keywords: datasets\n\n### ** Examples\n\ninstalledtable <- sii_structure_sf16_eng\ninstalledtable\n\n\n"} {"package":"ggsolvencyii","topic":"sii_structure_sf16_nld","snippet":"### Name: sii_structure_sf16_nld\n### Title: sii_structure_sf16_nld\n### Aliases: sii_structure_sf16_nld\n### Keywords: datasets\n\n### ** Examples\n\ninstalledtable <- sii_structure_sf16_nld\ninstalledtable\n\n\n"} {"package":"ggsolvencyii","topic":"sii_x_edgecolors_sf16_eng","snippet":"### Name: sii_x_edgecolors_sf16_eng\n### Title: sii_x_edgecolors_sf16_eng\n### Aliases: sii_x_edgecolors_sf16_eng\n### Keywords: datasets\n\n### ** Examples\n\ninstalledtable <- sii_x_edgecolors_sf16_eng\ninstalledtable\n\n\n"} {"package":"ggsolvencyii","topic":"sii_x_edgecolors_sf16_nld","snippet":"### Name: sii_x_edgecolors_sf16_nld\n### Title: sii_x_edgecolors_sf16_nld\n### Aliases: sii_x_edgecolors_sf16_nld\n### Keywords: datasets\n\n### ** Examples\n\ninstalledtable <- sii_x_edgecolors_sf16_nld\ninstalledtable\n\n\n"} {"package":"ggsolvencyii","topic":"sii_x_fillcolors_sf16_eng","snippet":"### Name: sii_x_fillcolors_sf16_eng\n### Title: sii_x_fillcolors_sf16_eng\n### Aliases: sii_x_fillcolors_sf16_eng\n### Keywords: datasets\n\n### ** Examples\n\ninstalledtable <- sii_x_fillcolors_sf16_eng\ninstalledtable\n\n\n"} {"package":"ggsolvencyii","topic":"sii_x_fillcolors_sf16_nld","snippet":"### Name: sii_x_fillcolors_sf16_nld\n### Title: sii_x_fillcolors_sf16_nld\n### Aliases: sii_x_fillcolors_sf16_nld\n### Keywords: datasets\n\n### ** Examples\n\ninstalledtable <- sii_x_fillcolors_sf16_nld\ninstalledtable\n\n\n"} {"package":"ggsolvencyii","topic":"sii_z_ex1_data","snippet":"### Name: sii_z_ex1_data\n### Title: sii_z_ex1_data\n### Aliases: sii_z_ex1_data\n### Keywords: datasets\n\n### ** Examples\n\ninstalledtable <- sii_z_ex1_data\ninstalledtable\n\n\n"} {"package":"ggsolvencyii","topic":"sii_z_ex1_edgecolors","snippet":"### Name: sii_z_ex1_edgecolors\n### Title: sii_z_ex1_edgecolors\n### Aliases: sii_z_ex1_edgecolors\n### Keywords: datasets\n\n### ** Examples\n\ninstalledtable <- sii_z_ex1_edgecolors\ninstalledtable\n\n\n"} {"package":"ggsolvencyii","topic":"sii_z_ex1_fillcolors","snippet":"### Name: sii_z_ex1_fillcolors\n### Title: sii_z_ex1_fillcolors\n### Aliases: sii_z_ex1_fillcolors\n### Keywords: datasets\n\n### ** Examples\n\ninstalledtable <- sii_z_ex1_fillcolors\ninstalledtable\n\n\n"} {"package":"ggsolvencyii","topic":"sii_z_ex1_levelmax","snippet":"### Name: sii_z_ex1_levelmax\n### Title: sii_z_ex1_levelmax\n### Aliases: sii_z_ex1_levelmax\n### Keywords: datasets\n\n### ** Examples\n\ninstalledtable <- sii_z_ex1_levelmax\ninstalledtable\n\n\n"} {"package":"ggsolvencyii","topic":"sii_z_ex1_plotdetails","snippet":"### Name: sii_z_ex1_plotdetails\n### Title: sii_z_ex1_plotdetails #' A table for 'geom_sii_risksurface' and\n### 'geom_sii_riskoutline' indicating which outlines of each item should\n### be shown, specified per level and/or description. the latter overrule\n### the former. when defining an item (or the 'squared = TRUE'\n### transformation) 4 lines can be distinguished, a radialline going\n### outwards, a circle segment (clockwise), a radialline going inwards, a\n### circle segment (counterclockwise). These are numbered as outline1 to\n### outline4.\n### Aliases: sii_z_ex1_plotdetails\n### Keywords: datasets\n\n### ** Examples\n\ninstalledtable <- sii_z_ex1_plotdetails\ninstalledtable\n\n\n"} {"package":"ggsolvencyii","topic":"sii_z_ex1_plotdetails2","snippet":"### Name: sii_z_ex1_plotdetails2\n### Title: sii_z_ex1_plotdetails2 #' A table for 'geom_sii_risksurface' and\n### 'geom_sii_riskoutline' indicating which outlines of each item should\n### be shown, specified per level and/or description. the latter overrule\n### the former. when defining an item (or the 'squared = TRUE'\n### transformation) 4 lines can be distinguished, a radialline going\n### outwards, a circle segment (clockwise), a radialline going inwards, a\n### circle segment (counterclockwise). These are numbered as outline1 to\n### outline4.\n### Aliases: sii_z_ex1_plotdetails2\n### Keywords: datasets\n\n### ** Examples\n\ninstalledtable <- sii_z_ex1_plotdetails2\ninstalledtable\n\n\n"} {"package":"ggsolvencyii","topic":"sii_z_ex1_structure","snippet":"### Name: sii_z_ex1_structure\n### Title: sii_z_ex1_structure\n### Aliases: sii_z_ex1_structure\n### Keywords: datasets\n\n### ** Examples\n\ninstalledtable <- sii_z_ex1_structure\ninstalledtable\n\n\n"} {"package":"ggsolvencyii","topic":"sii_z_ex2_data","snippet":"### Name: sii_z_ex2_data\n### Title: sii_z_ex2_data\n### Aliases: sii_z_ex2_data\n### Keywords: datasets\n\n### ** Examples\n\ninstalledtable <- sii_z_ex2_data\ninstalledtable\n\n\n"} {"package":"ggsolvencyii","topic":"sii_z_ex3_data","snippet":"### Name: sii_z_ex3_data\n### Title: sii_z_ex3_data\n### Aliases: sii_z_ex3_data\n### Keywords: datasets\n\n### ** Examples\n\ninstalledtable <- sii_z_ex3_data\ninstalledtable\n\n\n"} {"package":"ggsolvencyii","topic":"sii_z_ex3_plotdetails","snippet":"### Name: sii_z_ex3_plotdetails\n### Title: sii_z_ex3_plotdetails #' A table for 'geom_sii_risksurface' and\n### 'geom_sii_riskoutline' indicating which outlines of each item should\n### be shown, specified per level and/or description. the latter overrule\n### the former. when defining an item (or the 'squared = TRUE'\n### transformation) 4 lines can be distinguished, a radialline going\n### outwards, a circle segment (clockwise), a radialline going inwards, a\n### circle segment (counterclockwise). These are numbered as outline1 to\n### outline4.\n### Aliases: sii_z_ex3_plotdetails\n### Keywords: datasets\n\n### ** Examples\n\ninstalledtable <- sii_z_ex3_plotdetails\ninstalledtable\n\n\n"} {"package":"ggsolvencyii","topic":"sii_z_ex4_data","snippet":"### Name: sii_z_ex4_data\n### Title: sii_z_ex4_data\n### Aliases: sii_z_ex4_data\n### Keywords: datasets\n\n### ** Examples\n\ninstalledtable <- sii_z_ex4_data\ninstalledtable\n\n\n"} {"package":"ggsolvencyii","topic":"sii_z_ex4_levelmax","snippet":"### Name: sii_z_ex4_levelmax\n### Title: sii_z_ex4_levelmax\n### Aliases: sii_z_ex4_levelmax\n### Keywords: datasets\n\n### ** Examples\n\ninstalledtable <- sii_z_ex4_levelmax\ninstalledtable\n\n\n"} {"package":"ggsolvencyii","topic":"sii_z_ex4_structure","snippet":"### Name: sii_z_ex4_structure\n### Title: sii_z_ex4_structure\n### Aliases: sii_z_ex4_structure\n### Keywords: datasets\n\n### ** Examples\n\ninstalledtable <- sii_z_ex4_structure\ninstalledtable\n\n\n"} {"package":"ggsolvencyii","topic":"sii_z_ex6_data","snippet":"### Name: sii_z_ex6_data\n### Title: sii_z_ex6_data\n### Aliases: sii_z_ex6_data\n### Keywords: datasets\n\n### ** Examples\n\ninstalledtable <- sii_z_ex6_data\ninstalledtable\n\n\n"} {"package":"ggsolvencyii","topic":"sii_z_ex6_data2","snippet":"### Name: sii_z_ex6_data2\n### Title: sii_z_ex6_data2\n### Aliases: sii_z_ex6_data2\n### Keywords: datasets\n\n### ** Examples\n\ninstalledtable <- sii_z_ex6_data2\ninstalledtable\n\n\n"} {"package":"ggsolvencyii","topic":"sii_z_ex6_edgecolors","snippet":"### Name: sii_z_ex6_edgecolors\n### Title: sii_z_ex6_edgecolors\n### Aliases: sii_z_ex6_edgecolors\n### Keywords: datasets\n\n### ** Examples\n\ninstalledtable <- sii_z_ex6_edgecolors\ninstalledtable\n\n\n"} {"package":"ggsolvencyii","topic":"sii_z_ex6_fillcolors","snippet":"### Name: sii_z_ex6_fillcolors\n### Title: sii_z_ex6_fillcolors\n### Aliases: sii_z_ex6_fillcolors\n### Keywords: datasets\n\n### ** Examples\n\ninstalledtable <- sii_z_ex6_fillcolors\ninstalledtable\n\n\n"} {"package":"ggsolvencyii","topic":"sii_z_ex6_levelmax","snippet":"### Name: sii_z_ex6_levelmax\n### Title: sii_z_ex6_levelmax\n### Aliases: sii_z_ex6_levelmax\n### Keywords: datasets\n\n### ** Examples\n\ninstalledtable <- sii_z_ex6_levelmax\ninstalledtable\n\n\n"} {"package":"ggsolvencyii","topic":"sii_z_ex6_plotdetails","snippet":"### Name: sii_z_ex6_plotdetails\n### Title: sii_z_ex6_plotdetails #' A table for 'geom_sii_risksurface' and\n### 'geom_sii_riskoutline' indicating which outlines of each item should\n### be shown, specified per level and/or description. the latter overrule\n### the former. when defining an item (or the 'squared = TRUE'\n### transformation) 4 lines can be distinguished, a radialline going\n### outwards, a circle segment (clockwise), a radialline going inwards, a\n### circle segment (counterclockwise). These are numbered as outline1 to\n### outline4.\n### Aliases: sii_z_ex6_plotdetails\n### Keywords: datasets\n\n### ** Examples\n\ninstalledtable <- sii_z_ex6_plotdetails\ninstalledtable\n\n\n"} {"package":"ggsolvencyii","topic":"sii_z_ex6_structure","snippet":"### Name: sii_z_ex6_structure\n### Title: sii_z_ex6_structure\n### Aliases: sii_z_ex6_structure\n### Keywords: datasets\n\n### ** Examples\n\ninstalledtable <- sii_z_ex6_structure\ninstalledtable\n\n\n"} {"package":"ggsolvencyii","topic":"sii_z_ex7_data","snippet":"### Name: sii_z_ex7_data\n### Title: sii_z_ex7_data\n### Aliases: sii_z_ex7_data\n### Keywords: datasets\n\n### ** Examples\n\ninstalledtable <- sii_z_ex7_data\ninstalledtable\n\n\n"} {"package":"ggsolvencyii","topic":"sii_z_ex7_plotdetails","snippet":"### Name: sii_z_ex7_plotdetails\n### Title: sii_z_ex7_plotdetails #' A table for 'geom_sii_risksurface' and\n### 'geom_sii_riskoutline' indicating which outlines of each item should\n### be shown, specified per level and/or description. the latter overrule\n### the former. when defining an item (or the 'squared = TRUE'\n### transformation) 4 lines can be distinguished, a radialline going\n### outwards, a circle segment (clockwise), a radialline going inwards, a\n### circle segment (counterclockwise). These are numbered as outline1 to\n### outline4.\n### Aliases: sii_z_ex7_plotdetails\n### Keywords: datasets\n\n### ** Examples\n\ninstalledtable <- sii_z_ex7_plotdetails\ninstalledtable\n\n\n"} {"package":"bioimagetools","topic":"distance2border","snippet":"### Name: distance2border\n### Title: A function to compute the distance from spots to borders of\n### classes\n### Aliases: distance2border\n\n### ** Examples\n\n## Not run: \n##D #simulate random data\n##D randompoints<-data.frame(\"X\"=runif(100,0,3),\"Y\"=runif(100,0,3),\"Z\"=runif(100,0,.5))\n##D # coordinates in microns!\n##D plot(randompoints$X,randompoints$Y,xlim=c(0,3),ylim=c(0,3),pch=19)\n##D \n##D # points in a circle\n##D circlepoints<-read.table(system.file(\"extdata\",\"kreispunkte.table\",\n##D package=\"bioimagetools\"),header=TRUE)\n##D plot(circlepoints$X,circlepoints$Y,xlim=c(0,3),ylim=c(0,3),pch=19)\n##D \n##D # a circle like image\n##D img<-readTIF(system.file(\"extdata\",\"kringel.tif\",package=\"bioimagetools\"))\n##D img<-array(img,dim(img)) # save as array for easier handling\n##D img(img, z=1)\n##D \n##D #and a mask\n##D mask<-readTIF(system.file(\"extdata\",\"amask.tif\",package=\"bioimagetools\"))\n##D img(mask, z=1, col=\"greyinverted\")\n##D \n##D xy.microns <- 3 # size in x and y direction (microns)\n##D z.microns <- 0.5 # size in z direction (microns)\n##D \n##D # distance from points to class \n##D d1<-distance2border(randompoints, img, xy.microns, xy.microns, z.microns, class1=1,hist=TRUE)\n##D d2<-distance2border(circlepoints, img, xy.microns, xy.microns, z.microns, class1=1,hist=FALSE)\n##D plot(density(d2),type=\"l\")\n##D lines(c(0,0),c(0,10),lty=3)\n##D lines(density(d1),col=\"blue\")\n##D \n##D # use mask, should give some small changes\n##D d3<-distance2border(circlepoints, img, xy.microns, xy.microns, z.microns, \n##D class1=1,mask=mask,hist=FALSE)\n##D plot(density(d2),type=\"l\")\n##D lines(c(0,0),c(0,10),lty=3)\n##D lines(density(d3),col=\"blue\")\n##D \n##D # distance from border between classes\n##D anotherimg<-img+mask\n##D image(seq(0,3,length=300),seq(0,3,length=300),anotherimg[,,1])\n##D points(circlepoints,pch=19)\n##D d4<-distance2border(circlepoints, anotherimg, xy.microns, xy.microns, z.microns, \n##D class1=1,class2=2)\n##D plot(density(d4),lwd=2)\n##D \n##D # this should give the same answer\n##D d5<-distance2border(circlepoints, anotherimg, xy.microns, xy.microns, z.microns, \n##D class1=2,class2=1)\n##D lines(density(-d5),lty=3,col=\"blue\",lwd=1.5)\n## End(Not run)\n\n\n"} {"package":"bioimagetools","topic":"nearest.neighbour.distribution","snippet":"### Name: nearest.neighbour.distribution\n### Title: Nearest neighbor distribution (D curve)\n### Aliases: nearest.neighbour.distribution\n\n### ** Examples\n\np<-read.csv(system.file(\"extdata\",\"cell.csv\",package=\"bioimagetools\")) \nnearest.neighbour.distribution(p$X,p$Y,p$Z)\n\n\n"} {"package":"bioimagetools","topic":"readBMP","snippet":"### Name: readBMP\n### Title: Read bitmap files\n### Aliases: readBMP\n\n### ** Examples\n\nbi<-readBMP(system.file(\"extdata/V.bmp\",package=\"bioimagetools\"))\nimage(bi,col=grey(seq(1,0,length=100)))\n\n\n"} {"package":"bioimagetools","topic":"readTIF","snippet":"### Name: readTIF\n### Title: Read tif stacks\n### Aliases: readTIF\n\n### ** Examples\n\n## No test: \nkringel <- readTIF(system.file(\"extdata\",\"kringel.tif\",package=\"bioimagetools\"))\nimg(kringel)\n## End(No test)\n\n\n"} {"package":"bioimagetools","topic":"segment","snippet":"### Name: segment\n### Title: Segmentation of 3D images using EM algorithms\n### Aliases: segment\n\n### ** Examples\n\n## Not run: \n##D original<-array(1,c(300,300,50))\n##D for (i in 1:5)original[(i*60)-(0:20),,]<-original[(i*60)-(0:20),,]+1\n##D for (i in 1:10)original[,(i*30)-(0:15),]<-original[,(i*30)-(0:15),]+1\n##D original[,,26:50]<-4-aperm(original[,,26:50],c(2,1,3))\n##D \n##D img<-array(rnorm(300*300*50,original,.2),c(300,300,50))\n##D img<-img-min(img)\n##D img<-img/max(img)\n##D \n##D try1<-segment(img,3,beta=0.5,z.scale=.3)\n##D print(sum(try1$class!=original)/prod(dim(original)))\n##D \n##D beta<-matrix(rep(-.5,9),nrow=3)\n##D beta<-beta+1.5*diag(3)\n##D try2<-segment(img,3,beta,z.scale=.3)\n##D print(sum(try2$class!=original)/prod(dim(original)))\n##D \n##D par(mfrow=c(2,2))\n##D img(original)\n##D img(img)\n##D img(try1$class)\n##D img(try2$class)\n## End(Not run)\n\n\n"} {"package":"bioimagetools","topic":"segment.outside","snippet":"### Name: segment.outside\n### Title: Segmentation of the background of 3D images based on automatic\n### threshold\n### Aliases: segment.outside\n\n### ** Examples\n\n## No test: \nkringel <- readTIF(system.file(\"extdata\",\"kringel.tif\",package=\"bioimagetools\"))\nout <- segment.outside(kringel)\nimg(out, z=1)\n## End(No test)\n\n\n"} {"package":"bioimagetools","topic":"standardize","snippet":"### Name: standardize\n### Title: Standardize images\n### Aliases: standardize\n\n### ** Examples\n\n#simuliere Daten zum Testen\ntest2<-runif(128*128,0,1)\ntest2<-sort(test2)\ntest2<-array(test2,c(128,128))\nimg(test2)\n# Standardisiere test2 in 32 Klassen\nstd<-standardize(test2,N=32,sd=4)\n\n\n"} {"package":"bioimagetools","topic":"table.n","snippet":"### Name: table.n\n### Title: Cross Tabulation and Table Creation (including empty classes)\n### Aliases: table.n\n\n### ** Examples\n\nx <- c(1,1,2,2,4,4,4)\ntable.n(x)\n# [1] 2 2 0 3\ntable.n(x, m=5)\n# [1] 2 2 0 3 0\ntable.n(x, weight=c(1,1,1,2,.5,.5,.5))\n# [1] 2.0 3.0 0.0 1.5\n\n\n\n"} {"package":"psidR","topic":"build.panel","snippet":"### Name: build.panel\n### Title: build.panel: Build PSID panel data set\n### Aliases: build.panel\n\n### ** Examples\n\n## Not run: \n##D # ################################################\n##D # Real-world example: not run because takes long.\n##D # Build panel with income, wage, age and education\n##D # optionally: add wealth supplements!\n##D # ################################################\n##D \n##D # The package is installed with a list of variables\n##D # Alternatively, search for names with \\code{getNamesPSID}\n##D # This is the body of function build.psid()\n##D # (so why not call build.psid() and see what happens!)\n##D r = system.file(package=\"psidR\")\n##D if (small){\n##D f = fread(file.path(r,\"psid-lists\",\"famvars-small.txt\"))\n##D i = fread(file.path(r,\"psid-lists\",\"indvars-small.txt\"))\n##D } else {\n##D f = fread(file.path(r,\"psid-lists\",\"famvars.txt\"))\n##D i = fread(file.path(r,\"psid-lists\",\"indvars.txt\"))\n##D }\n##D setkey(i,\"name\")\n##D setkey(f,\"name\")\n##D i = dcast(i[,list(year,name,variable)],year~name)\n##D f = dcast(f[,list(year,name,variable)],year~name)\n##D d = build.panel(datadir=\"~/datasets/psid/\",fam.vars=f,\n##D ind.vars=i, \n##D heads.only =TRUE,sample=\"SRC\",\n##D design=\"all\")\n##D save(d,file=\"~/psid.RData\")\n## End(Not run)\n\n# ######################################\n# reproducible example on artifical data. \n# run this with example(build.panel).\n# ######################################\n\n## make reproducible family data sets for 2 years\n## variables are: family income (Money) and age\n\n## Data acquisition step:\n## run build.panel with sascii=TRUE\n\n# testPSID creates artifical PSID data\ntd <- testPSID(N=12,N.attr=0)\nfam1985 <- data.table::copy(td$famvars1985)\nfam1986 <- data.table::copy(td$famvars1986)\nIND2019ER <- data.table::copy(td$IND2019ER)\n\n# create a temporary datadir\nmy.dir <- tempdir()\n#save those in the datadir\n# notice different R formats admissible\nsave(fam1985,file=paste0(my.dir,\"/FAM1985ER.rda\"))\nsave(fam1986,file=paste0(my.dir,\"/FAM1986ER.RData\"))\nsave(IND2019ER,file=paste0(my.dir,\"/IND2019ER.RData\"))\n\n## end Data acquisition step.\n\n# now define which famvars\nfamvars <- data.frame(year=c(1985,1986),\n money=c(\"Money85\",\"Money86\"),\n age=c(\"age85\",\"age86\"))\n\n# create ind.vars\nindvars <- data.frame(year=c(1985,1986),ind.weight=c(\"ER30497\",\"ER30534\"))\n\n# call the builder\n# data will contain column \"relation.head\" holding the relationship code.\n\nd <- build.panel(datadir=my.dir,fam.vars=famvars,\n ind.vars=indvars,\n heads.only=FALSE)\t\n\n# see what happens if we drop non-heads\n# only the ones who are heads in BOTH years \n# are present (since design='balanced' by default)\nd <- build.panel(datadir=my.dir,fam.vars=famvars,\n ind.vars=indvars,\n heads.only=TRUE)\t\nprint(d[order(pid)],nrow=Inf)\n\n# change sample design to \"all\": \n# we'll keep individuals if they are head in one year,\n# and drop in the other\nd <- build.panel(datadir=my.dir,fam.vars=famvars,\n ind.vars=indvars,heads.only=TRUE,\n design=\"all\")\t\nprint(d[order(pid)],nrow=Inf)\n\nfile.remove(paste0(my.dir,\"/FAM1985ER.rda\"),\n paste0(my.dir,\"/FAM1986ER.RData\"),\n paste0(my.dir,\"/IND2019ER.RData\"))\n\n# END psidR example\n\n# #####################################################################\n# Please go to https://github.com/floswald/psidR for more example usage\n# #####################################################################\n\n\n"} {"package":"psidR","topic":"getNamesPSID","snippet":"### Name: getNamesPSID\n### Title: GetPSID variables names from various years\n### Aliases: getNamesPSID\n\n### ** Examples\n\n# read UMich crosswalk from installed file\nr = system.file(package=\"psidR\")\ncwf = openxlsx::read.xlsx(file.path(r,\"psid-lists\",\"psid.xlsx\"))\n\n# or download directly\n# cwf <- read.xlsx(\"http://psidonline.isr.umich.edu/help/xyr/psid.xlsx\")\n\n# then get names with\ngetNamesPSID(\"ER17013\", cwf, years = 2001)\ngetNamesPSID(\"ER17013\", cwf, years = 2003)\ngetNamesPSID(\"ER17013\", cwf, years = NULL)\ngetNamesPSID(\"ER17013\", cwf, years = c(2005, 2007, 2009))\n\n\n"} {"package":"todor","topic":"clean_comments","snippet":"### Name: clean_comments\n### Title: Clean line from comment tags\n### Aliases: clean_comments\n\n### ** Examples\n\n## Not run: \n##D clean_comments(\"#' TODO abc abc\") #\"TODO abc abc\"\n## End(Not run)\n\n\n"} {"package":"M3JF","topic":"M3JF","snippet":"### Name: M3JF\n### Title: Multi-Modal Matrix Joint Factorization\n### Aliases: M3JF\n\n### ** Examples\n\nlibrary(InterSIM)\nsim.data <- InterSIM(n.sample=500, cluster.sample.prop = c(0.20,0.30,0.27,0.23),\ndelta.methyl=5, delta.expr=5, delta.protein=5,p.DMP=0.2, p.DEG=NULL,\np.DEP=NULL,sigma.methyl=NULL, sigma.expr=NULL, sigma.protein=NULL,cor.methyl.expr=NULL,\ncor.expr.protein=NULL,do.plot=FALSE, sample.cluster=TRUE, feature.cluster=TRUE)\nsim.methyl <- sim.data$dat.methyl\nsim.expr <- sim.data$dat.expr\nsim.protein <- sim.data$dat.protein\ntemp_data <- list(sim.methyl, sim.expr, sim.protein)\nM3JF_res <- M3JF(temp_data,k=4)\n\n\n"} {"package":"M3JF","topic":"RotationCostBestGivenGraph","snippet":"### Name: RotationCostBestGivenGraph\n### Title: Evaluate the cluster number of multiple modality data\n### Aliases: RotationCostBestGivenGraph\n\n### ** Examples\n\nlibrary(InterSIM)\nlibrary(SNFtool)\nsim.data <- InterSIM(n.sample=100, cluster.sample.prop = c(0.20,0.30,0.27,0.23),\ndelta.methyl=5, delta.expr=5, delta.protein=5,p.DMP=0.2, p.DEG=NULL,\np.DEP=NULL,sigma.methyl=NULL, sigma.expr=NULL, sigma.protein=NULL,cor.methyl.expr=NULL,\ncor.expr.protein=NULL,do.plot=FALSE, sample.cluster=TRUE, feature.cluster=TRUE)\nsim.methyl <- sim.data$dat.methyl\nsim.expr <- sim.data$dat.expr\nsim.protein <- sim.data$dat.protein\ntemp_data <- list(sim.methyl, sim.expr, sim.protein)\ndat <- lapply(temp_data, function(dd) {\n dd <- as.matrix(dd)\n dd1 <- dist2(dd,dd)\n W1 <- affinityMatrix(dd1, K = 10, sigma = 0.5)\n})\nW <- SNF(dat, 10, 10)\nclu_eval <- RotationCostBestGivenGraph(W,2:10)\n\n\n"} {"package":"M3JF","topic":"cost","snippet":"### Name: cost\n### Title: Calculate the cost defined by the objective function\n### Aliases: cost\n\n### ** Examples\n\nlibrary(InterSIM)\nsim.data <- InterSIM(n.sample=500, cluster.sample.prop = c(0.20,0.30,0.27,0.23),\ndelta.methyl=5, delta.expr=5, delta.protein=5,p.DMP=0.2, p.DEG=NULL,\np.DEP=NULL,sigma.methyl=NULL, sigma.expr=NULL, sigma.protein=NULL,cor.methyl.expr=NULL,\ncor.expr.protein=NULL,do.plot=FALSE, sample.cluster=TRUE, feature.cluster=TRUE)\nsim.methyl <- sim.data$dat.methyl\nsim.expr <- sim.data$dat.expr\nsim.protein <- sim.data$dat.protein\ntemp_data <- list(sim.methyl, sim.expr, sim.protein)\ninit_list <- initialize_WL(temp_data,k=4)\nupdate_H_list <- update_H(temp_data,init_list)\nlambda <- 0.01\nupdate_E_list <- update_E(temp_data,update_H_list,lambda)\nnew_cost <- cost(temp_data,update_E_list,lambda)\n\n\n"} {"package":"M3JF","topic":"crimmix_data_gen","snippet":"### Name: crimmix_data_gen\n### Title: Generate the simulated dataset with three modalities with the\n### package crimmix\n### Aliases: crimmix_data_gen\n\n### ** Examples\n\ncrimmix_data <- crimmix_data_gen(nclust=4, n_byClust=c(10,20,5,25),\nfeature_nums=c(1000,500,5000), noises=c(0.5,0.01,0.3),props=c(0.005,0.01,0.02))\n\n\n"} {"package":"M3JF","topic":"feature_screen_sd","snippet":"### Name: feature_screen_sd\n### Title: Screen the cluster related features via hypergeometric test p\n### value and distribution standard derivation\n### Aliases: feature_screen_sd\n\n### ** Examples\n\nlibrary(InterSIM)\nsim.data <- InterSIM(n.sample=500, cluster.sample.prop = c(0.20,0.30,0.27,0.23),\ndelta.methyl=5, delta.expr=5, delta.protein=5,p.DMP=0.2, p.DEG=NULL,\np.DEP=NULL,sigma.methyl=NULL, sigma.expr=NULL, sigma.protein=NULL,cor.methyl.expr=NULL,\ncor.expr.protein=NULL,do.plot=FALSE, sample.cluster=TRUE, feature.cluster=TRUE)\nsim.methyl <- sim.data$dat.methyl\nsim.expr <- sim.data$dat.expr\nsim.protein <- sim.data$dat.protein\ntemp_data <- list(sim.methyl, sim.expr, sim.protein)\nM3JF_res <- M3JF(temp_data,k=4)\nfeature_list <- feature_selection(temp_data[[1]],M3JF_res$cluster_res,z_score=TRUE,\nupper_bound=1, lower_bound=-1)\nselected_features <- feature_screen_sd(feature_list,sig_num=20)\n\n\n"} {"package":"M3JF","topic":"feature_selection","snippet":"### Name: feature_selection\n### Title: Select the cluster related features via hypergeometric test\n### Aliases: feature_selection\n\n### ** Examples\n\nlibrary(InterSIM)\nsim.data <- InterSIM(n.sample=500, cluster.sample.prop = c(0.20,0.30,0.27,0.23),\ndelta.methyl=5, delta.expr=5, delta.protein=5,p.DMP=0.2, p.DEG=NULL,\np.DEP=NULL,sigma.methyl=NULL, sigma.expr=NULL, sigma.protein=NULL,cor.methyl.expr=NULL,\ncor.expr.protein=NULL,do.plot=FALSE, sample.cluster=TRUE, feature.cluster=TRUE)\nsim.methyl <- sim.data$dat.methyl\nsim.expr <- sim.data$dat.expr\nsim.protein <- sim.data$dat.protein\ntemp_data <- list(sim.methyl, sim.expr, sim.protein)\nM3JF_res <- M3JF(temp_data,k=4)\nfeature_list <- feature_selection(temp_data[[1]],M3JF_res$cluster_res,z_score=TRUE,\nupper_bound=1, lower_bound=-1)\n\n\n"} {"package":"M3JF","topic":"iNMF_data_gen","snippet":"### Name: iNMF_data_gen\n### Title: Generate the simulated dataset with three modalities as the work\n### iNMF\n### Aliases: iNMF_data_gen\n\n### ** Examples\n\niNMF_data <- iNMF_data_gen(Xs_dim_list=list(c(100,100),c(100,100),c(100,100)),\nmod_dim_list=list(matrix(c(20,30,20,30,20,30,20,30),4,2),\nmatrix(c(20,20,30,30,20,30,20,30),4,2),\nmatrix(c(26,24,26,24,20,30,20,30),4,2)),e_u=0.15, e_s=0.9, e_h=0)\n\n\n"} {"package":"M3JF","topic":"initialize_WL","snippet":"### Name: initialize_WL\n### Title: Initialize the shared sub-matrix E and modality specific\n### sub-matrices list Hi\n### Aliases: initialize_WL\n\n### ** Examples\n\nlibrary(InterSIM)\nsim.data <- InterSIM(n.sample=500, cluster.sample.prop = c(0.20,0.30,0.27,0.23),\ndelta.methyl=5, delta.expr=5, delta.protein=5,p.DMP=0.2, p.DEG=NULL,\np.DEP=NULL,sigma.methyl=NULL, sigma.expr=NULL, sigma.protein=NULL,cor.methyl.expr=NULL,\ncor.expr.protein=NULL,do.plot=FALSE, sample.cluster=TRUE, feature.cluster=TRUE)\nsim.methyl <- sim.data$dat.methyl\nsim.expr <- sim.data$dat.expr\nsim.protein <- sim.data$dat.protein\ntemp_data <- list(sim.methyl, sim.expr, sim.protein)\ninit_list <- initialize_WL(temp_data,k=4)\n\n\n"} {"package":"M3JF","topic":"intersim_data_gen","snippet":"### Name: intersim_data_gen\n### Title: Generate the simulated dataset with three modalities with the\n### package InterSIM\n### Aliases: intersim_data_gen\n\n### ** Examples\n\nlibrary(InterSIM)\nintersim_data <- intersim_data_gen(prop=c(0.20,0.30,0.27,0.23), n_sample=500)\n\n\n"} {"package":"M3JF","topic":"kmeanspp","snippet":"### Name: kmeanspp\n### Title: A new version of kmeans that generates stable cluster result\n### Aliases: kmeanspp\n\n### ** Examples\n\nlibrary(InterSIM)\nsim.data <- InterSIM(n.sample=500, cluster.sample.prop = c(0.20,0.30,0.27,0.23),\ndelta.methyl=5, delta.expr=5, delta.protein=5,p.DMP=0.2, p.DEG=NULL,\np.DEP=NULL,sigma.methyl=NULL, sigma.expr=NULL, sigma.protein=NULL,cor.methyl.expr=NULL,\ncor.expr.protein=NULL,do.plot=FALSE, sample.cluster=TRUE, feature.cluster=TRUE)\nsim.methyl <- sim.data$dat.methyl\nsim.expr <- sim.data$dat.expr\nsim.protein <- sim.data$dat.protein\ntemp_data <- list(sim.methyl, sim.expr, sim.protein)\ninit_list <- initialize_WL(temp_data,k=4)\nlambda <- 0.01\nupdate_E_list <- update_E(temp_data,init_list,lambda)\ncluster_res <- kmeanspp(update_E_list[[4]],4)\n\n\n"} {"package":"M3JF","topic":"simulateY","snippet":"### Name: simulateY\n### Title: Generate the simulated dataset with specified parameters\n### Aliases: simulateY\n\n### ** Examples\n\ntemp_data <- simulateY(nclust = 4, n_byClust = c(10,20,5,25), J=1000,\nprop = 0.01, noise = 0.1,flavor =c(\"normal\", \"beta\", \"binary\"),\nparams = list(c(mean = 1,sd = 1)))\n\n\n"} {"package":"M3JF","topic":"update_E","snippet":"### Name: update_E\n### Title: Update sub-matrix E\n### Aliases: update_E\n\n### ** Examples\n\nlibrary(InterSIM)\nsim.data <- InterSIM(n.sample=500, cluster.sample.prop = c(0.20,0.30,0.27,0.23),\ndelta.methyl=5, delta.expr=5, delta.protein=5,p.DMP=0.2, p.DEG=NULL,\np.DEP=NULL,sigma.methyl=NULL, sigma.expr=NULL, sigma.protein=NULL,cor.methyl.expr=NULL,\ncor.expr.protein=NULL,do.plot=FALSE, sample.cluster=TRUE, feature.cluster=TRUE)\nsim.methyl <- sim.data$dat.methyl\nsim.expr <- sim.data$dat.expr\nsim.protein <- sim.data$dat.protein\ntemp_data <- list(sim.methyl, sim.expr, sim.protein)\ninit_list <- initialize_WL(temp_data,k=4)\nupdate_H_list <- update_H(temp_data,init_list)\nlambda <- 0.01\nupdate_E_list <- update_E(temp_data,update_H_list,lambda)\n\n\n"} {"package":"M3JF","topic":"update_H","snippet":"### Name: update_H\n### Title: Update sub-matrices list Hi\n### Aliases: update_H\n\n### ** Examples\n\nlibrary(InterSIM)\nsim.data <- InterSIM(n.sample=500, cluster.sample.prop = c(0.20,0.30,0.27,0.23),\ndelta.methyl=5, delta.expr=5, delta.protein=5,p.DMP=0.2, p.DEG=NULL,\np.DEP=NULL,sigma.methyl=NULL, sigma.expr=NULL, sigma.protein=NULL,cor.methyl.expr=NULL,\ncor.expr.protein=NULL,do.plot=FALSE, sample.cluster=TRUE, feature.cluster=TRUE)\nsim.methyl <- sim.data$dat.methyl\nsim.expr <- sim.data$dat.expr\nsim.protein <- sim.data$dat.protein\ntemp_data <- list(sim.methyl, sim.expr, sim.protein)\ninit_list <- initialize_WL(temp_data,k=4)\nupdate_H_list <- update_H(temp_data,init_list)\n\n\n"} {"package":"nestr","topic":"amplify","snippet":"### Name: amplify\n### Title: Amplify the data frame with a given structure\n### Aliases: amplify amplify.data.frame\n\n### ** Examples\n\ndf <- data.frame(x = 1:3, y = c(\"a\", \"b\", \"b\"))\namplify(df, z = nest_in(y, \"a\" ~ 5,\n \"b\" ~ 3))\n\n\n\n"} {"package":"nestr","topic":"nest_in","snippet":"### Name: nest_in\n### Title: Create a nested structure\n### Aliases: nest_in\n\n### ** Examples\n\n# Each element in the supplied the vector has 4 child.\nnest_in(1:3, 4)\n\n# prefix and suffix can be added to child labels\n# along with other aesthesitics like leading zeroes\n# with minimum number of digits.\nnest_in(1:3, 10, prefix = \"id-\", suffix = \"xy\", leading0 = 4)\n\n# you can specify unbalanced nested structures\nnest_in(2:4,\n 1 ~ 3,\n 2 ~ 4,\n 3 ~ 2)\n\n# A `.` may be used to specify \"otherwise\".\nnest_in(c(\"A\", \"B\", \"C\", \"D\"),\n 2:3 ~ 10,\n . ~ 3)\n\n# The parental level can be referred by its name or vectorised.\nnest_in(c(\"A\", \"B\", \"C\"),\n c(\"A\", \"B\") ~ 10,\n \"C\" ~ 3)\n\n\n\n"} {"package":"STEPCAM","topic":"STEPCAM_ABC","snippet":"### Name: STEPCAM_ABC\n### Title: ABC-SMC inference of the STEPCAM model\n### Aliases: STEPCAM_ABC\n### Keywords: community assembly traits simulation processes functional\n### diversity\n\n### ** Examples\n\n## Not run: \n##D \n##D Artificial.Data <- generate.Artificial.Data(n_species = 40, n_traits = 3,\n##D n_communities = 5, occurence_distribution = 0.2,\n##D average_richness = 0.5, sd_richness = 0.2,\n##D mechanism_random=FALSE)\n##D O <- STEPCAM_ABC(Artificial.Data$abundances, Artificial.Data$traits,\n##D numParticles = 10, n_traits = 3, plot_number = 1, stopRate = 0.8)\n## End(Not run)\n\n\n"} {"package":"STEPCAM","topic":"TernPlot","snippet":"### Name: TernPlot\n### Title: Create a ternary plot from results of the STEPCAM_ABC function\n### Aliases: TernPlot\n\n### ** Examples\n\n## Not run: \n##D Artificial.Data <- generate.Artificial.Data(n_species = 40, n_traits = 3,\n##D n_communities = 5, occurence_distribution = 0.2,\n##D average_richness = 0.5, sd_richness = 0.2,\n##D mechanism_random=FALSE)\n##D O <- STEPCAM_ABC(Artificial.Data$abundances, Artificial.Data$traits,\n##D numParticles = 10, n_traits = 3, plot_number = 1, stopRate = 0.8);\n##D TernPlot(O);\n## End(Not run)\n\n\n"} {"package":"STEPCAM","topic":"generate.Artificial.Data","snippet":"### Name: generate.Artificial.Data\n### Title: Generate Artifical data that can be used by the STEPCAM model\n### Aliases: generate.Artificial.Data\n\n### ** Examples\n\nArtificial.Data <- generate.Artificial.Data(n_species=40, n_traits=3, n_communities = 5,\n occurence_distribution = 0.2, average_richness = 0.5, sd_richness = 0.2,\n mechanism_random = FALSE)\n\n\n"} {"package":"STEPCAM","topic":"plotSMC","snippet":"### Name: plotSMC\n### Title: Plot the progression of the SMC algorithm.\n### Aliases: plotSMC\n### Keywords: SMC ABC community assembly\n\n### ** Examples\n\n## Not run: \n##D Artificial.Data <- generate.Artificial.Data(n_species = 40, n_traits = 3,\n##D n_communities = 5, occurence_distribution = 0.2,\n##D average_richness = 0.5, sd_richness = 0.2,\n##D mechanism_random=FALSE)\n##D O <- STEPCAM_ABC(Artificial.Data$abundances, Artificial.Data$traits,\n##D numParticles = 10, n_traits = 3, plot_number = 1, stopRate = 0.8)\n##D currentDir <- getwd();\n##D plotSMC(paste(currentDir,\"/\",sep=\"\"));\n## End(Not run)\n\n\n"} {"package":"STEPCAM","topic":"plotSTEPCAM","snippet":"### Name: plotSTEPCAM\n### Title: Plot the output generated by the STEPCAM_ABC function\n### Aliases: plotSTEPCAM\n\n### ** Examples\n\n## Not run: \n##D Artificial.Data <- generate.Artificial.Data(n_species = 40, n_traits = 3,\n##D n_communities = 5, occurence_distribution = 0.2,\n##D average_richness = 0.5, sd_richness = 0.2,\n##D mechanism_random=FALSE)\n##D O <- STEPCAM_ABC(Artificial.Data$abundances,Artificial.Data$traits,\n##D numParticles=10,n_traits=3,plot_number=1,stopRate=0.8);\n##D plotSTEPCAM(O);\n## End(Not run)\n\n\n"} {"package":"mixedClust","topic":"mixedCoclust","snippet":"### Name: mixedCoclust\n### Title: Function to perform a co-clustering\n### Aliases: mixedCoclust\n### Keywords: \"mixed-type-data\" \"co-clustering\"\n\n### ** Examples\n\n \n data(M1)\n nbSEM=30\n nbSEMburn=20\n nbindmini=1\n init = \"random\"\n\n kr=2\n kc=c(2,2,2)\n m=c(6,3)\n d.list <- c(1,41,81)\n distributions <- c(\"Multinomial\",\"Gaussian\",\"Bos\")\n res <- mixedCoclust(x = M1, idx_list = d.list,distrib_names = distributions,\n kr = kr, kc = kc, m = m, init = init,nbSEM = nbSEM,\n nbSEMburn = nbSEMburn, nbindmini = nbindmini)\n \n \n\n\n"} {"package":"eltr","topic":"create_elt","snippet":"### Name: create_elt\n### Title: Create parameters for ELT simulation\n### Aliases: create_elt\n\n### ** Examples\n\ncreate_elt (eltr::example_elt, ann_rate=\"rate\", mu=\"mean\", \n sdev_i = \"sdevi\" , sdev_c = \"sdevc\", expval = \"exp\")\n\n\n\n"} {"package":"eltr","topic":"create_oep_curve","snippet":"### Name: create_oep_curve\n### Title: OEP Curve\n### Aliases: create_oep_curve\n\n### ** Examples\n\ncreate_oep_curve(data.table::data.table(\"Year\" = c(1,2,3,4,5) , \n \"Loss\" =c(1 , 20 , 500 , 100 , 10000)) , y= \"Year\", z=\"Loss\")\n\n\n"} {"package":"eltr","topic":"create_ylt","snippet":"### Name: create_ylt\n### Title: Create a YLT from ELT via monte carlo simulation\n### Aliases: create_ylt\n\n### ** Examples\n\ncreate_ylt(create_elt(eltr::example_elt, ann_rate=\"rate\", mu=\"mean\", \n sdev_i = \"sdevi\" , sdev_c = \"sdevc\", expval=\"exp\"),\n sims=10,ann_rate = \"rate\" ,event_id = \"id\",expval = \"exp\",mu =\"mean\")\n\n\n"} {"package":"eltr","topic":"layer_loss","snippet":"### Name: layer_loss\n### Title: Limited loss to the layer\n### Aliases: layer_loss\n\n### ** Examples\n\nlayer_loss(5,2,6)\nlayer_loss(5,10,6)\n\n\n\n"} {"package":"bakeoff","topic":"bakeoff_colors","snippet":"### Name: bakeoff_colors\n### Title: Extract named *bakeoff* colors as hex codes\n### Aliases: bakeoff_colors\n\n### ** Examples\n\nbakeoff_colors()\nbakeoff_colors(\"riptide\")\nbakeoff_colors(\"baltic\", \"yellow\")\nnames(bakeoff_colors())\n\nif (require('scales')) {\n scales::show_col(bakeoff_colors(), label = FALSE)\n}\n\n\n\n"} {"package":"bakeoff","topic":"bakeoff_palette","snippet":"### Name: bakeoff_palette\n### Title: A *bakeoff* palette generator\n### Aliases: bakeoff_palette\n\n### ** Examples\n\nbakeoff_palette(\"showstopper\")\n\nif (require('scales')) {\n show_col(bakeoff_palette(\"finale\"))\n }\n\nif (require('ggplot2')) {\nline_plot <- ggplot(ratings, aes(x = episode, y = viewers_7day,\ncolor = as.factor(series), group = series)) + facet_wrap(~series) + geom_line(lwd = 2)\nline_plot + scale_color_manual(values = bakeoff_palette(), guide = \"none\")\n\nggplot(episodes, aes(episode, bakers_appeared, fill = as.factor(series))) +\ngeom_col() + facet_wrap(~series) +\nscale_fill_manual(values = bakeoff_palette(\"signature\"), guide = \"none\") +\nscale_x_continuous(breaks = scales::pretty_breaks())\n}\n\n# If you need more colors than normally found in a palette, you\n# can use a continuous palette to interpolate between existing\n# colours\npal <- bakeoff_palette(palette = \"finale\", n = 20, type = \"continuous\")\nif (require('scales')) {\nshow_col(pal)\n}\n\n\n\n"} {"package":"bakeoff","topic":"bakeoff_palette_names","snippet":"### Name: bakeoff_palette_names\n### Title: Print names of all *bakeoff* color palettes\n### Aliases: bakeoff_palette_names\n\n### ** Examples\n\nbakeoff_palette_names()\n\n\n\n"} {"package":"bakeoff","topic":"bakers","snippet":"### Name: bakers\n### Title: Bakers\n### Aliases: bakers\n### Keywords: datasets\n\n### ** Examples\n\nif (require('tibble')) {\n bakers\n }\nhead(bakers)\n\n\n"} {"package":"bakeoff","topic":"bakers_raw","snippet":"### Name: bakers_raw\n### Title: Bakers (raw)\n### Aliases: bakers_raw\n### Keywords: datasets\n\n### ** Examples\n\nif (require('tibble')) {\n bakers_raw\n }\nhead(bakers_raw)\n\n\n"} {"package":"bakeoff","topic":"bakes_raw","snippet":"### Name: bakes_raw\n### Title: Bakes (raw)\n### Aliases: bakes_raw\n### Keywords: datasets\n\n### ** Examples\n\nif (require('tibble')) {\n bakes_raw\n }\nhead(bakes_raw)\n\n\n"} {"package":"bakeoff","topic":"challenges","snippet":"### Name: challenges\n### Title: Challenges\n### Aliases: challenges\n### Keywords: datasets\n\n### ** Examples\n\nif (require('tibble')) {\n challenges\n }\nhead(challenges)\n\n\n"} {"package":"bakeoff","topic":"episodes","snippet":"### Name: episodes\n### Title: Episodes\n### Aliases: episodes\n### Keywords: datasets\n\n### ** Examples\n\nif (require('tibble')) {\n episodes\n }\nhead(episodes)\n\n\n"} {"package":"bakeoff","topic":"episodes_raw","snippet":"### Name: episodes_raw\n### Title: Each episodes' challenges (raw)\n### Aliases: episodes_raw\n### Keywords: datasets\n\n### ** Examples\n\nif (require('tibble')) {\n episodes_raw\n }\nhead(episodes_raw)\n\n\n"} {"package":"bakeoff","topic":"ratings","snippet":"### Name: ratings\n### Title: Ratings\n### Aliases: ratings\n### Keywords: datasets\n\n### ** Examples\n\nif (require('tibble')) {\n ratings\n }\nhead(ratings)\n\n\n"} {"package":"bakeoff","topic":"ratings_raw","snippet":"### Name: ratings_raw\n### Title: Each episode's ratings (raw)\n### Aliases: ratings_raw\n### Keywords: datasets\n\n### ** Examples\n\nif (require('tibble')) {\n ratings_raw\n }\nhead(ratings_raw)\n\n\n"} {"package":"bakeoff","topic":"results_raw","snippet":"### Name: results_raw\n### Title: Each baker's results by episode (raw)\n### Aliases: results_raw\n### Keywords: datasets\n\n### ** Examples\n\nif (require('tibble')) {\n results_raw\n }\n\n\n"} {"package":"bakeoff","topic":"scale_bakeoff","snippet":"### Name: scale_bakeoff\n### Title: Color scale constructor for *bakeoff* colors\n### Aliases: scale_bakeoff scale_color_bakeoff scale_fill_bakeoff\n\n### ** Examples\n\nif (require('ggplot2')) {\n\n ggplot(ratings, aes(x = episode, y = viewers_7day, group = series, color = as.factor(series))) +\n geom_line(lwd = 3) +\n theme_minimal() +\n scale_color_bakeoff(\"finale\", guide = \"none\")\n}\n\n\n\n"} {"package":"bakeoff","topic":"seasons_raw","snippet":"### Name: seasons_raw\n### Title: Data about each season aired in the US (raw)\n### Aliases: seasons_raw\n### Keywords: datasets\n\n### ** Examples\n\nif (require('tibble')) {\n seasons_raw\n }\nhead(seasons_raw)\n\n\n"} {"package":"bakeoff","topic":"series_raw","snippet":"### Name: series_raw\n### Title: Data about each series aired in the UK (raw)\n### Aliases: series_raw\n### Keywords: datasets\n\n### ** Examples\n\nif (require('tibble')) {\n series_raw\n }\nhead(series_raw)\n\n\n"} {"package":"bakeoff","topic":"spice_test_wide","snippet":"### Name: spice_test_wide\n### Title: Spice Test\n### Aliases: spice_test_wide\n### Keywords: datasets\n\n### ** Examples\n\nif (require('tibble')) {\n spice_test_wide\n }\nhead(spice_test_wide)\n\n\n"} {"package":"iai","topic":"acquire_license","snippet":"### Name: acquire_license\n### Title: Acquire an IAI license for the current session.\n### Aliases: acquire_license\n\n### ** Examples\n\n## Not run: iai::acquire_license()\n\n\n\n"} {"package":"iai","topic":"add_julia_processes","snippet":"### Name: add_julia_processes\n### Title: Add additional Julia worker processes to parallelize workloads\n### Aliases: add_julia_processes\n\n### ** Examples\n\n## Not run: iai::add_julia_processes(3)\n\n\n\n"} {"package":"iai","topic":"all_treatment_combinations","snippet":"### Name: all_treatment_combinations\n### Title: Return a dataframe containing all treatment combinations of one\n### or more treatment vectors, ready for use as treatment candidates in\n### 'fit_predict!' or 'predict'\n### Aliases: all_treatment_combinations\n\n### ** Examples\n\n## Not run: iai::all_treatment_combinations(c(1, 2, 3))\n\n\n\n"} {"package":"iai","topic":"apply","snippet":"### Name: apply\n### Title: Return the leaf index in a tree model into which each point in\n### the features falls\n### Aliases: apply\n\n### ** Examples\n\n## Not run: iai::apply(lnr, X)\n\n\n\n"} {"package":"iai","topic":"apply_nodes","snippet":"### Name: apply_nodes\n### Title: Return the indices of the points in the features that fall into\n### each node of a trained tree model\n### Aliases: apply_nodes\n\n### ** Examples\n\n## Not run: iai::apply_nodes(lnr, X)\n\n\n\n"} {"package":"iai","topic":"as.mixeddata","snippet":"### Name: as.mixeddata\n### Title: Convert a vector of values to IAI mixed data format\n### Aliases: as.mixeddata\n\n### ** Examples\n\n## Not run: \n##D df <- iris\n##D set.seed(1)\n##D df$mixed <- rnorm(150)\n##D df$mixed[1:5] <- NA # Insert some missing values\n##D df$mixed[6:10] <- \"Not graded\"\n##D df$mixed <- iai::as.mixeddata(df$mixed, c(\"Not graded\"))\n## End(Not run)\n\n\n"} {"package":"iai","topic":"autoplot.grid_search","snippet":"### Name: autoplot.grid_search\n### Title: Construct a 'ggplot2::ggplot' object plotting grid search\n### results for Optimal Feature Selection learners\n### Aliases: autoplot.grid_search\n\n### ** Examples\n\n## Not run: ggplot2::autoplot(grid)\n\n\n\n"} {"package":"iai","topic":"autoplot.roc_curve","snippet":"### Name: autoplot.roc_curve\n### Title: Construct a 'ggplot2::ggplot' object plotting the ROC curve\n### Aliases: autoplot.roc_curve\n\n### ** Examples\n\n## Not run: ggplot2::autoplot(roc)\n\n\n\n"} {"package":"iai","topic":"autoplot.similarity_comparison","snippet":"### Name: autoplot.similarity_comparison\n### Title: Construct a 'ggplot2::ggplot' object plotting the results of the\n### similarity comparison\n### Aliases: autoplot.similarity_comparison\n\n### ** Examples\n\n## Not run: ggplot2::autoplot(similarity)\n\n\n\n"} {"package":"iai","topic":"autoplot.stability_analysis","snippet":"### Name: autoplot.stability_analysis\n### Title: Construct a 'ggplot2::ggplot' object plotting the results of the\n### stability analysis\n### Aliases: autoplot.stability_analysis\n\n### ** Examples\n\n## Not run: ggplot2::autoplot(stability)\n\n\n\n"} {"package":"iai","topic":"categorical_classification_reward_estimator","snippet":"### Name: categorical_classification_reward_estimator\n### Title: Learner for conducting reward estimation with categorical\n### treatments and classification outcomes\n### Aliases: categorical_classification_reward_estimator\n\n### ** Examples\n\n## Not run: lnr <- iai::categorical_classification_reward_estimator()\n\n\n\n"} {"package":"iai","topic":"categorical_regression_reward_estimator","snippet":"### Name: categorical_regression_reward_estimator\n### Title: Learner for conducting reward estimation with categorical\n### treatments and regression outcomes\n### Aliases: categorical_regression_reward_estimator\n\n### ** Examples\n\n## Not run: lnr <- iai::categorical_regression_reward_estimator()\n\n\n\n"} {"package":"iai","topic":"categorical_reward_estimator","snippet":"### Name: categorical_reward_estimator\n### Title: Learner for conducting reward estimation with categorical\n### treatments\n### Aliases: categorical_reward_estimator\n\n### ** Examples\n\n## Not run: lnr <- iai::categorical_reward_estimator()\n\n\n\n"} {"package":"iai","topic":"categorical_survival_reward_estimator","snippet":"### Name: categorical_survival_reward_estimator\n### Title: Learner for conducting reward estimation with categorical\n### treatments and survival outcomes\n### Aliases: categorical_survival_reward_estimator\n\n### ** Examples\n\n## Not run: lnr <- iai::categorical_survival_reward_estimator()\n\n\n\n"} {"package":"iai","topic":"cleanup_installation","snippet":"### Name: cleanup_installation\n### Title: Remove all traces of automatic Julia/IAI installation\n### Aliases: cleanup_installation\n\n### ** Examples\n\n## Not run: iai::cleanup_installation()\n\n\n\n"} {"package":"iai","topic":"clone","snippet":"### Name: clone\n### Title: Return an unfitted copy of a learner with the same parameters\n### Aliases: clone\n\n### ** Examples\n\n## Not run: new_lnr <- iai::clone(lnr)\n\n\n\n"} {"package":"iai","topic":"convert_treatments_to_numeric","snippet":"### Name: convert_treatments_to_numeric\n### Title: Convert 'treatments' from symbol/string format into numeric\n### values.\n### Aliases: convert_treatments_to_numeric\n\n### ** Examples\n\n## Not run: iai::convert_treatments_to_numeric(c(\"1\", \"2\", \"3\"))\n\n\n\n"} {"package":"iai","topic":"copy_splits_and_refit_leaves","snippet":"### Name: copy_splits_and_refit_leaves\n### Title: Copy the tree split structure from one learner into another and\n### refit the models in each leaf of the tree using the supplied data\n### Aliases: copy_splits_and_refit_leaves\n\n### ** Examples\n\n## Not run: iai::copy_splits_and_refit_leaves(new_lnr, orig_lnr, ...)\n\n\n\n"} {"package":"iai","topic":"decision_path","snippet":"### Name: decision_path\n### Title: Return a matrix where entry '(i, j)' is true if the 'i'th point\n### in the features passes through the 'j'th node in a trained tree\n### model.\n### Aliases: decision_path\n\n### ** Examples\n\n## Not run: iai::decision_path(lnr, X)\n\n\n\n"} {"package":"iai","topic":"delete_rich_output_param","snippet":"### Name: delete_rich_output_param\n### Title: Delete a global rich output parameter\n### Aliases: delete_rich_output_param\n\n### ** Examples\n\n## Not run: iai::delete_rich_output_param(\"simple_layout\")\n\n\n\n"} {"package":"iai","topic":"equal_propensity_estimator","snippet":"### Name: equal_propensity_estimator\n### Title: Learner that estimates equal propensity for all treatments.\n### Aliases: equal_propensity_estimator\n\n### ** Examples\n\n## Not run: lnr <- iai::equal_propensity_estimator()\n\n\n\n"} {"package":"iai","topic":"fit.grid_search","snippet":"### Name: fit.grid_search\n### Title: Fits a 'grid_search' to the training data\n### Aliases: fit.grid_search\n\n### ** Examples\n\n## Not run: \n##D X <- iris[, 1:4]\n##D y <- iris$Species\n##D grid <- iai::grid_search(\n##D iai::optimal_tree_classifier(max_depth = 1),\n##D )\n##D iai::fit(grid, X, y)\n## End(Not run)\n\n\n"} {"package":"iai","topic":"fit.imputation_learner","snippet":"### Name: fit.imputation_learner\n### Title: Fits an imputation learner to the training data.\n### Aliases: fit.imputation_learner\n\n### ** Examples\n\n## Not run: iai::fit(lnr, X)\n\n\n\n"} {"package":"iai","topic":"fit.learner","snippet":"### Name: fit.learner\n### Title: Fits a model to the training data\n### Aliases: fit.learner\n\n### ** Examples\n\n## Not run: iai::fit(lnr, X, y)\n\n\n\n"} {"package":"iai","topic":"fit.optimal_feature_selection_learner","snippet":"### Name: fit.optimal_feature_selection_learner\n### Title: Fits an Optimal Feature Selection learner to the training data\n### Aliases: fit.optimal_feature_selection_learner\n\n### ** Examples\n\n## Not run: iai::fit(lnr, X)\n\n\n\n"} {"package":"iai","topic":"fit_and_expand","snippet":"### Name: fit_and_expand\n### Title: Fit an imputation learner with training features and create\n### adaptive indicator features to encode the missing pattern\n### Aliases: fit_and_expand\n\n### ** Examples\n\n## Not run: lnr <- iai::fit_and_expand(lnr, X, type = \"finite\")\n\n\n\n"} {"package":"iai","topic":"fit_cv","snippet":"### Name: fit_cv\n### Title: Fits a grid search to the training data with cross-validation\n### Aliases: fit_cv\n\n### ** Examples\n\n## Not run: \n##D X <- iris[, 1:4]\n##D y <- iris$Species\n##D grid <- iai::grid_search(\n##D iai::optimal_tree_classifier(max_depth = 1),\n##D )\n##D iai::fit_cv(grid, X, y)\n## End(Not run)\n\n\n"} {"package":"iai","topic":"fit_predict.categorical_reward_estimator","snippet":"### Name: fit_predict.categorical_reward_estimator\n### Title: Fit a categorical reward estimator on features, treatments and\n### outcomes and return predicted counterfactual rewards for each\n### observation, under each treatment observed in the data, as well as\n### the scores of the internal estimators.\n### Aliases: fit_predict.categorical_reward_estimator\n\n### ** Examples\n\n## Not run: iai::fit_predict(obj, X, treatments, outcomes)\n\n\n\n"} {"package":"iai","topic":"fit_predict.numeric_reward_estimator","snippet":"### Name: fit_predict.numeric_reward_estimator\n### Title: Fit a numeric reward estimator on features, treatments and\n### outcomes and return predicted counterfactual rewards for each\n### observation, under each treatment candidate, as well as the scores of\n### the internal estimators.\n### Aliases: fit_predict.numeric_reward_estimator\n\n### ** Examples\n\n## Not run: iai::fit_predict(obj, X, treatments, outcomes)\n\n\n\n"} {"package":"iai","topic":"fit_transform","snippet":"### Name: fit_transform\n### Title: Fit an imputation model using the given features and impute the\n### missing values in these features\n### Aliases: fit_transform\n\n### ** Examples\n\n## Not run: \n##D X <- iris\n##D X[1, 1] <- NA\n##D grid <- iai::grid_search(\n##D iai::imputation_learner(),\n##D method = c(\"opt_knn\", \"opt_tree\"),\n##D )\n##D iai::fit_transform(grid, X)\n## End(Not run)\n\n\n"} {"package":"iai","topic":"fit_transform_cv","snippet":"### Name: fit_transform_cv\n### Title: Train a grid using cross-validation with features and impute all\n### missing values in these features\n### Aliases: fit_transform_cv\n\n### ** Examples\n\n## Not run: \n##D X <- iris\n##D X[1, 1] <- NA\n##D grid <- iai::grid_search(\n##D iai::imputation_learner(),\n##D method = c(\"opt_knn\", \"opt_tree\"),\n##D )\n##D iai::fit_transform_cv(grid, X)\n## End(Not run)\n\n\n"} {"package":"iai","topic":"get_best_params","snippet":"### Name: get_best_params\n### Title: Return the best parameter combination from a grid\n### Aliases: get_best_params\n\n### ** Examples\n\n## Not run: iai::get_best_params(grid)\n\n\n\n"} {"package":"iai","topic":"get_classification_label.classification_tree_learner","snippet":"### Name: get_classification_label.classification_tree_learner\n### Title: Return the predicted label at a node of a tree\n### Aliases: get_classification_label.classification_tree_learner\n\n### ** Examples\n\n## Not run: iai::get_classification_label(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"get_classification_label.classification_tree_multi_learner","snippet":"### Name: get_classification_label.classification_tree_multi_learner\n### Title: Return the predicted label at a node of a multi-task tree\n### Aliases: get_classification_label.classification_tree_multi_learner\n\n### ** Examples\n\n## Not run: iai::get_classification_label(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"get_classification_proba.classification_tree_learner","snippet":"### Name: get_classification_proba.classification_tree_learner\n### Title: Return the predicted probabilities of class membership at a node\n### of a tree\n### Aliases: get_classification_proba.classification_tree_learner\n\n### ** Examples\n\n## Not run: iai::get_classification_proba(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"get_classification_proba.classification_tree_multi_learner","snippet":"### Name: get_classification_proba.classification_tree_multi_learner\n### Title: Return the predicted probabilities of class membership at a node\n### of a multi-task tree\n### Aliases: get_classification_proba.classification_tree_multi_learner\n\n### ** Examples\n\n## Not run: iai::get_classification_proba(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"get_cluster_assignments","snippet":"### Name: get_cluster_assignments\n### Title: Return the indices of the trees assigned to each cluster, under\n### the clustering of a given number of trees\n### Aliases: get_cluster_assignments\n\n### ** Examples\n\n## Not run: iai::get_cluster_assignments(stability, num_trees)\n\n\n\n"} {"package":"iai","topic":"get_cluster_details","snippet":"### Name: get_cluster_details\n### Title: Return the centroid information for each cluster, under the\n### clustering of a given number of trees\n### Aliases: get_cluster_details\n\n### ** Examples\n\n## Not run: iai::get_cluster_details(stability, num_trees)\n\n\n\n"} {"package":"iai","topic":"get_cluster_distances","snippet":"### Name: get_cluster_distances\n### Title: Return the distances between the centroids of each pair of\n### clusters, under the clustering of a given number of trees\n### Aliases: get_cluster_distances\n\n### ** Examples\n\n## Not run: iai::get_cluster_distances(stability, num_trees)\n\n\n\n"} {"package":"iai","topic":"get_depth","snippet":"### Name: get_depth\n### Title: Get the depth of a node of a tree\n### Aliases: get_depth\n\n### ** Examples\n\n## Not run: iai::get_depth(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"get_estimation_densities","snippet":"### Name: get_estimation_densities\n### Title: Return the total kernel density surrounding each treatment\n### candidate for the propensity/outcome estimation problems in a fitted\n### learner.\n### Aliases: get_estimation_densities\n\n### ** Examples\n\n## Not run: iai::get_estimation_densities(lnr, ...)\n\n\n\n"} {"package":"iai","topic":"get_features_used","snippet":"### Name: get_features_used\n### Title: Return the names of the features used by the learner\n### Aliases: get_features_used\n\n### ** Examples\n\n## Not run: iai::get_features_used(lnr)\n\n\n\n"} {"package":"iai","topic":"get_grid_result_details","snippet":"### Name: get_grid_result_details\n### Title: Return a vector of lists detailing the results of the grid\n### search\n### Aliases: get_grid_result_details\n\n### ** Examples\n\n## Not run: iai::get_grid_result_details(grid)\n\n\n\n"} {"package":"iai","topic":"get_grid_result_summary","snippet":"### Name: get_grid_result_summary\n### Title: Return a summary of the results from the grid search\n### Aliases: get_grid_result_summary\n\n### ** Examples\n\n## Not run: iai::get_grid_result_summary(grid)\n\n\n\n"} {"package":"iai","topic":"get_grid_results","snippet":"### Name: get_grid_results\n### Title: Return a summary of the results from the grid search\n### Aliases: get_grid_results\n\n### ** Examples\n\n## Not run: iai::get_grid_results(grid)\n\n\n\n"} {"package":"iai","topic":"get_learner","snippet":"### Name: get_learner\n### Title: Return the fitted learner using the best parameter combination\n### from a grid\n### Aliases: get_learner\n\n### ** Examples\n\n## Not run: lnr <- iai::get_learner(grid)\n\n\n\n"} {"package":"iai","topic":"get_lower_child","snippet":"### Name: get_lower_child\n### Title: Get the index of the lower child at a split node of a tree\n### Aliases: get_lower_child\n\n### ** Examples\n\n## Not run: iai::get_lower_child(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"get_machine_id","snippet":"### Name: get_machine_id\n### Title: Return the machine ID for the current computer.\n### Aliases: get_machine_id\n\n### ** Examples\n\n## Not run: iai::get_machine_id()\n\n\n\n"} {"package":"iai","topic":"get_num_fits.glmnetcv_learner","snippet":"### Name: get_num_fits.glmnetcv_learner\n### Title: Return the number of fits along the path in a trained GLMNet\n### learner\n### Aliases: get_num_fits.glmnetcv_learner\n\n### ** Examples\n\n## Not run: lnr <- iai::get_num_fits(lnr)\n\n\n\n"} {"package":"iai","topic":"get_num_fits.optimal_feature_selection_learner","snippet":"### Name: get_num_fits.optimal_feature_selection_learner\n### Title: Return the number of fits along the path in a trained Optimal\n### Feature Selection learner\n### Aliases: get_num_fits.optimal_feature_selection_learner\n\n### ** Examples\n\n## Not run: iai::get_num_fits(lnr)\n\n\n\n"} {"package":"iai","topic":"get_num_nodes","snippet":"### Name: get_num_nodes\n### Title: Return the number of nodes in a trained learner\n### Aliases: get_num_nodes\n\n### ** Examples\n\n## Not run: iai::get_num_nodes(lnr)\n\n\n\n"} {"package":"iai","topic":"get_num_samples","snippet":"### Name: get_num_samples\n### Title: Get the number of training points contained in a node of a tree\n### Aliases: get_num_samples\n\n### ** Examples\n\n## Not run: iai::get_num_samples(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"get_params","snippet":"### Name: get_params\n### Title: Return the value of all parameters on a learner\n### Aliases: get_params\n\n### ** Examples\n\n## Not run: iai::get_params(lnr)\n\n\n\n"} {"package":"iai","topic":"get_parent","snippet":"### Name: get_parent\n### Title: Get the index of the parent node at a node of a tree\n### Aliases: get_parent\n\n### ** Examples\n\n## Not run: iai::get_parent(lnr, 2)\n\n\n\n"} {"package":"iai","topic":"get_policy_treatment_outcome","snippet":"### Name: get_policy_treatment_outcome\n### Title: Return the quality of the treatments at a node of a tree\n### Aliases: get_policy_treatment_outcome\n\n### ** Examples\n\n## Not run: iai::get_policy_treatment_outcome(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"get_policy_treatment_outcome_standard_error","snippet":"### Name: get_policy_treatment_outcome_standard_error\n### Title: Return the standard error for the quality of the treatments at a\n### node of a tree\n### Aliases: get_policy_treatment_outcome_standard_error\n\n### ** Examples\n\n## Not run: iai::get_policy_treatment_outcome_standard_error(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"get_policy_treatment_rank","snippet":"### Name: get_policy_treatment_rank\n### Title: Return the treatments ordered from most effective to least\n### effective at a node of a tree\n### Aliases: get_policy_treatment_rank\n\n### ** Examples\n\n## Not run: iai::get_policy_treatment_rank(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"get_prediction_constant.glmnetcv_learner","snippet":"### Name: get_prediction_constant.glmnetcv_learner\n### Title: Return the constant term in the prediction in a trained GLMNet\n### learner\n### Aliases: get_prediction_constant.glmnetcv_learner\n\n### ** Examples\n\n## Not run: iai::get_prediction_constant(lnr)\n\n\n\n"} {"package":"iai","topic":"get_prediction_constant.optimal_feature_selection_learner","snippet":"### Name: get_prediction_constant.optimal_feature_selection_learner\n### Title: Return the constant term in the prediction in a trained Optimal\n### Feature Selection learner\n### Aliases: get_prediction_constant.optimal_feature_selection_learner\n\n### ** Examples\n\n## Not run: iai::get_prediction_constant(lnr)\n\n\n\n"} {"package":"iai","topic":"get_prediction_weights.glmnetcv_learner","snippet":"### Name: get_prediction_weights.glmnetcv_learner\n### Title: Return the weights for numeric and categoric features used for\n### prediction in a trained GLMNet learner\n### Aliases: get_prediction_weights.glmnetcv_learner\n\n### ** Examples\n\n## Not run: iai::get_prediction_weights(lnr)\n\n\n\n"} {"package":"iai","topic":"get_prediction_weights.optimal_feature_selection_learner","snippet":"### Name: get_prediction_weights.optimal_feature_selection_learner\n### Title: Return the weights for numeric and categoric features used for\n### prediction in a trained Optimal Feature Selection learner\n### Aliases: get_prediction_weights.optimal_feature_selection_learner\n\n### ** Examples\n\n## Not run: iai::get_prediction_weights(lnr)\n\n\n\n"} {"package":"iai","topic":"get_prescription_treatment_rank","snippet":"### Name: get_prescription_treatment_rank\n### Title: Return the treatments ordered from most effective to least\n### effective at a node of a tree\n### Aliases: get_prescription_treatment_rank\n\n### ** Examples\n\n## Not run: iai::get_prescription_treatment_rank(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"get_regression_constant.classification_tree_learner","snippet":"### Name: get_regression_constant.classification_tree_learner\n### Title: Return the constant term in the logistic regression prediction\n### at a node of a classification tree\n### Aliases: get_regression_constant.classification_tree_learner\n\n### ** Examples\n\n## Not run: iai::get_regression_constant(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"get_regression_constant.classification_tree_multi_learner","snippet":"### Name: get_regression_constant.classification_tree_multi_learner\n### Title: Return the constant term in the logistic regression prediction\n### at a node of a multi-task classification tree\n### Aliases: get_regression_constant.classification_tree_multi_learner\n\n### ** Examples\n\n## Not run: iai::get_regression_constant(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"get_regression_constant.prescription_tree_learner","snippet":"### Name: get_regression_constant.prescription_tree_learner\n### Title: Return the constant term in the linear regression prediction at\n### a node of a prescription tree\n### Aliases: get_regression_constant.prescription_tree_learner\n\n### ** Examples\n\n## Not run: iai::get_regression_constant(lnr, 1, \"A\")\n\n\n\n"} {"package":"iai","topic":"get_regression_constant.regression_tree_learner","snippet":"### Name: get_regression_constant.regression_tree_learner\n### Title: Return the constant term in the linear regression prediction at\n### a node of a regression tree\n### Aliases: get_regression_constant.regression_tree_learner\n\n### ** Examples\n\n## Not run: iai::get_regression_constant(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"get_regression_constant.regression_tree_multi_learner","snippet":"### Name: get_regression_constant.regression_tree_multi_learner\n### Title: Return the constant term in the linear regression prediction at\n### a node of a multi-task regression tree\n### Aliases: get_regression_constant.regression_tree_multi_learner\n\n### ** Examples\n\n## Not run: iai::get_regression_constant(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"get_regression_constant.survival_tree_learner","snippet":"### Name: get_regression_constant.survival_tree_learner\n### Title: Return the constant term in the cox regression prediction at a\n### node of a survival tree\n### Aliases: get_regression_constant.survival_tree_learner\n\n### ** Examples\n\n## Not run: iai::get_regression_constant(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"get_regression_weights.classification_tree_learner","snippet":"### Name: get_regression_weights.classification_tree_learner\n### Title: Return the weights for each feature in the logistic regression\n### prediction at a node of a classification tree\n### Aliases: get_regression_weights.classification_tree_learner\n\n### ** Examples\n\n## Not run: iai::get_regression_weights(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"get_regression_weights.classification_tree_multi_learner","snippet":"### Name: get_regression_weights.classification_tree_multi_learner\n### Title: Return the weights for each feature in the logistic regression\n### prediction at a node of a multi-task classification tree\n### Aliases: get_regression_weights.classification_tree_multi_learner\n\n### ** Examples\n\n## Not run: iai::get_regression_weights(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"get_regression_weights.prescription_tree_learner","snippet":"### Name: get_regression_weights.prescription_tree_learner\n### Title: Return the weights for each feature in the linear regression\n### prediction at a node of a prescription tree\n### Aliases: get_regression_weights.prescription_tree_learner\n\n### ** Examples\n\n## Not run: iai::get_regression_weights(lnr, 1, \"A\")\n\n\n\n"} {"package":"iai","topic":"get_regression_weights.regression_tree_learner","snippet":"### Name: get_regression_weights.regression_tree_learner\n### Title: Return the weights for each feature in the linear regression\n### prediction at a node of a regression tree\n### Aliases: get_regression_weights.regression_tree_learner\n\n### ** Examples\n\n## Not run: iai::get_regression_weights(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"get_regression_weights.regression_tree_multi_learner","snippet":"### Name: get_regression_weights.regression_tree_multi_learner\n### Title: Return the weights for each feature in the linear regression\n### prediction at a node of a multi-task regression tree\n### Aliases: get_regression_weights.regression_tree_multi_learner\n\n### ** Examples\n\n## Not run: iai::get_regression_weights(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"get_regression_weights.survival_tree_learner","snippet":"### Name: get_regression_weights.survival_tree_learner\n### Title: Return the weights for each feature in the cox regression\n### prediction at a node of a survival tree\n### Aliases: get_regression_weights.survival_tree_learner\n\n### ** Examples\n\n## Not run: iai::get_regression_weights(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"get_rich_output_params","snippet":"### Name: get_rich_output_params\n### Title: Return the current global rich output parameter settings\n### Aliases: get_rich_output_params\n\n### ** Examples\n\n## Not run: iai::get_rich_output_params()\n\n\n\n"} {"package":"iai","topic":"get_roc_curve_data","snippet":"### Name: get_roc_curve_data\n### Title: Extract the underlying data from an ROC curve\n### Aliases: get_roc_curve_data\n\n### ** Examples\n\n## Not run: iai::get_roc_curve_data(curve)\n\n\n\n"} {"package":"iai","topic":"get_split_categories","snippet":"### Name: get_split_categories\n### Title: Return the categoric/ordinal information used in the split at a\n### node of a tree\n### Aliases: get_split_categories\n\n### ** Examples\n\n## Not run: iai::get_split_categories(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"get_split_feature","snippet":"### Name: get_split_feature\n### Title: Return the feature used in the split at a node of a tree\n### Aliases: get_split_feature\n\n### ** Examples\n\n## Not run: iai::get_split_feature(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"get_split_threshold","snippet":"### Name: get_split_threshold\n### Title: Return the threshold used in the split at a node of a tree\n### Aliases: get_split_threshold\n\n### ** Examples\n\n## Not run: iai::get_split_threshold(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"get_split_weights","snippet":"### Name: get_split_weights\n### Title: Return the weights for numeric and categoric features used in\n### the hyperplane split at a node of a tree\n### Aliases: get_split_weights\n\n### ** Examples\n\n## Not run: iai::get_split_weights(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"get_stability_results","snippet":"### Name: get_stability_results\n### Title: Return the trained trees in order of increasing objective value,\n### along with their variable importance scores for each feature\n### Aliases: get_stability_results\n\n### ** Examples\n\n## Not run: iai::get_stability_results(stability)\n\n\n\n"} {"package":"iai","topic":"get_survival_curve","snippet":"### Name: get_survival_curve\n### Title: Return the survival curve at a node of a tree\n### Aliases: get_survival_curve\n\n### ** Examples\n\n## Not run: iai::get_survival_curve(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"get_survival_curve_data","snippet":"### Name: get_survival_curve_data\n### Title: Extract the underlying data from a survival curve (as returned\n### by 'predict.survival_learner' or 'get_survival_curve')\n### Aliases: get_survival_curve_data\n\n### ** Examples\n\n## Not run: iai::get_survival_curve_data(curve)\n\n\n\n"} {"package":"iai","topic":"get_survival_expected_time","snippet":"### Name: get_survival_expected_time\n### Title: Return the predicted expected survival time at a node of a tree\n### Aliases: get_survival_expected_time\n\n### ** Examples\n\n## Not run: iai::get_survival_expected_time(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"get_survival_hazard","snippet":"### Name: get_survival_hazard\n### Title: Return the predicted hazard ratio at a node of a tree\n### Aliases: get_survival_hazard\n\n### ** Examples\n\n## Not run: iai::get_survival_hazard(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"get_train_errors","snippet":"### Name: get_train_errors\n### Title: Extract the training objective value for each candidate tree in\n### the comparison, where a lower value indicates a better solution\n### Aliases: get_train_errors\n\n### ** Examples\n\n## Not run: iai::get_train_errors(similarity)\n\n\n\n"} {"package":"iai","topic":"get_tree","snippet":"### Name: get_tree\n### Title: Return a copy of the learner that uses a specific tree rather\n### than the tree with the best training objective.\n### Aliases: get_tree\n\n### ** Examples\n\n## Not run: iai::get_tree(lnr, index)\n\n\n\n"} {"package":"iai","topic":"get_upper_child","snippet":"### Name: get_upper_child\n### Title: Get the index of the upper child at a split node of a tree\n### Aliases: get_upper_child\n\n### ** Examples\n\n## Not run: iai::get_upper_child(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"glmnetcv_classifier","snippet":"### Name: glmnetcv_classifier\n### Title: Learner for training GLMNet models for classification problems\n### with cross-validation\n### Aliases: glmnetcv_classifier\n\n### ** Examples\n\n## Not run: lnr <- iai::glmnetcv_classifier()\n\n\n\n"} {"package":"iai","topic":"glmnetcv_regressor","snippet":"### Name: glmnetcv_regressor\n### Title: Learner for training GLMNet models for regression problems with\n### cross-validation\n### Aliases: glmnetcv_regressor\n\n### ** Examples\n\n## Not run: lnr <- iai::glmnetcv_regressor()\n\n\n\n"} {"package":"iai","topic":"glmnetcv_survival_learner","snippet":"### Name: glmnetcv_survival_learner\n### Title: Learner for training GLMNet models for survival problems with\n### cross-validation\n### Aliases: glmnetcv_survival_learner\n\n### ** Examples\n\n## Not run: lnr <- iai::glmnetcv_survival_learner()\n\n\n\n"} {"package":"iai","topic":"grid_search","snippet":"### Name: grid_search\n### Title: Controls grid search over parameter combinations\n### Aliases: grid_search\n\n### ** Examples\n\n## Not run: \n##D grid <- iai::grid_search(\n##D iai::optimal_tree_classifier(\n##D random_seed = 1,\n##D ),\n##D max_depth = 1:5,\n##D )\n## End(Not run)\n\n\n"} {"package":"iai","topic":"iai_setup","snippet":"### Name: iai_setup\n### Title: Initialize Julia and the IAI package.\n### Aliases: iai_setup\n\n### ** Examples\n\n## Not run: iai::iai_setup()\n\n\n\n"} {"package":"iai","topic":"imputation_learner","snippet":"### Name: imputation_learner\n### Title: Generic learner for imputing missing values\n### Aliases: imputation_learner\n\n### ** Examples\n\n## Not run: lnr <- iai::imputation_learner(method = \"opt_tree\")\n\n\n\n"} {"package":"iai","topic":"impute","snippet":"### Name: impute\n### Title: Impute missing values using either a specified method or through\n### validation\n### Aliases: impute\n\n### ** Examples\n\n## Not run: \n##D X <- iris\n##D X[1, 1] <- NA\n##D iai::impute(X)\n## End(Not run)\n\n\n"} {"package":"iai","topic":"impute_cv","snippet":"### Name: impute_cv\n### Title: Impute missing values using cross validation\n### Aliases: impute_cv\n\n### ** Examples\n\n## Not run: \n##D X <- iris\n##D X[1, 1] <- NA\n##D iai::impute_cv(X, list(method = c(\"opt_knn\", \"opt_tree\")))\n## End(Not run)\n\n\n"} {"package":"iai","topic":"install_julia","snippet":"### Name: install_julia\n### Title: Download and install Julia automatically.\n### Aliases: install_julia\n\n### ** Examples\n\n## Not run: iai::install_julia()\n\n\n\n"} {"package":"iai","topic":"install_system_image","snippet":"### Name: install_system_image\n### Title: Download and install the IAI system image automatically.\n### Aliases: install_system_image\n\n### ** Examples\n\n## Not run: iai::install_system_image()\n\n\n\n"} {"package":"iai","topic":"is_categoric_split","snippet":"### Name: is_categoric_split\n### Title: Check if a node of a tree applies a categoric split\n### Aliases: is_categoric_split\n\n### ** Examples\n\n## Not run: iai::is_categoric_split(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"is_hyperplane_split","snippet":"### Name: is_hyperplane_split\n### Title: Check if a node of a tree applies a hyperplane split\n### Aliases: is_hyperplane_split\n\n### ** Examples\n\n## Not run: iai::is_hyperplane_split(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"is_leaf","snippet":"### Name: is_leaf\n### Title: Check if a node of a tree is a leaf\n### Aliases: is_leaf\n\n### ** Examples\n\n## Not run: iai::is_leaf(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"is_mixed_ordinal_split","snippet":"### Name: is_mixed_ordinal_split\n### Title: Check if a node of a tree applies a mixed ordinal/categoric\n### split\n### Aliases: is_mixed_ordinal_split\n\n### ** Examples\n\n## Not run: iai::is_mixed_ordinal_split(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"is_mixed_parallel_split","snippet":"### Name: is_mixed_parallel_split\n### Title: Check if a node of a tree applies a mixed parallel/categoric\n### split\n### Aliases: is_mixed_parallel_split\n\n### ** Examples\n\n## Not run: iai::is_mixed_parallel_split(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"is_ordinal_split","snippet":"### Name: is_ordinal_split\n### Title: Check if a node of a tree applies a ordinal split\n### Aliases: is_ordinal_split\n\n### ** Examples\n\n## Not run: iai::is_ordinal_split(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"is_parallel_split","snippet":"### Name: is_parallel_split\n### Title: Check if a node of a tree applies a parallel split\n### Aliases: is_parallel_split\n\n### ** Examples\n\n## Not run: iai::is_parallel_split(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"load_graphviz","snippet":"### Name: load_graphviz\n### Title: Loads the Julia Graphviz library to permit certain\n### visualizations.\n### Aliases: load_graphviz\n\n### ** Examples\n\n## Not run: iai::load_graphviz()\n\n\n\n"} {"package":"iai","topic":"mean_imputation_learner","snippet":"### Name: mean_imputation_learner\n### Title: Learner for conducting mean imputation\n### Aliases: mean_imputation_learner\n\n### ** Examples\n\n## Not run: lnr <- iai::mean_imputation_learner()\n\n\n\n"} {"package":"iai","topic":"missing_goes_lower","snippet":"### Name: missing_goes_lower\n### Title: Check if points with missing values go to the lower child at a\n### split node of of a tree\n### Aliases: missing_goes_lower\n\n### ** Examples\n\n## Not run: iai::missing_goes_lower(lnr, 1)\n\n\n\n"} {"package":"iai","topic":"multi_questionnaire.default","snippet":"### Name: multi_questionnaire.default\n### Title: Construct an interactive questionnaire from multiple specified\n### learners\n### Aliases: multi_questionnaire.default\n\n### ** Examples\n\n## Not run: \n##D iai::multi_questionnaire(list(\"Questionnaire for\" = list(\n##D \"first learner\" = lnr1,\n##D \"second learner\" = lnr2\n##D )))\n## End(Not run)\n\n\n\n"} {"package":"iai","topic":"multi_questionnaire.grid_search","snippet":"### Name: multi_questionnaire.grid_search\n### Title: Construct an interactive tree questionnaire using multiple\n### learners from the results of a grid search\n### Aliases: multi_questionnaire.grid_search\n\n### ** Examples\n\n## Not run: iai::multi_questionnaire(grid)\n\n\n\n"} {"package":"iai","topic":"multi_tree_plot.default","snippet":"### Name: multi_tree_plot.default\n### Title: Construct an interactive tree visualization of multiple tree\n### learners as specified by questions\n### Aliases: multi_tree_plot.default\n\n### ** Examples\n\n## Not run: \n##D iai::multi_tree_plot(list(\"Visualizing\" = list(\n##D \"first learner\" = lnr1,\n##D \"second learner\" = lnr2\n##D )))\n## End(Not run)\n\n\n\n"} {"package":"iai","topic":"multi_tree_plot.grid_search","snippet":"### Name: multi_tree_plot.grid_search\n### Title: Construct an interactive tree visualization of multiple tree\n### learners from the results of a grid search\n### Aliases: multi_tree_plot.grid_search\n\n### ** Examples\n\n## Not run: iai::multi_tree_plot(grid)\n\n\n\n"} {"package":"iai","topic":"numeric_classification_reward_estimator","snippet":"### Name: numeric_classification_reward_estimator\n### Title: Learner for conducting reward estimation with numeric treatments\n### and classification outcomes\n### Aliases: numeric_classification_reward_estimator\n\n### ** Examples\n\n## Not run: lnr <- iai::numeric_classification_reward_estimator()\n\n\n\n"} {"package":"iai","topic":"numeric_regression_reward_estimator","snippet":"### Name: numeric_regression_reward_estimator\n### Title: Learner for conducting reward estimation with numeric treatments\n### and regression outcomes\n### Aliases: numeric_regression_reward_estimator\n\n### ** Examples\n\n## Not run: lnr <- iai::numeric_regression_reward_estimator()\n\n\n\n"} {"package":"iai","topic":"numeric_reward_estimator","snippet":"### Name: numeric_reward_estimator\n### Title: Learner for conducting reward estimation with numeric treatments\n### Aliases: numeric_reward_estimator\n\n### ** Examples\n\n## Not run: lnr <- iai::numeric_reward_estimator()\n\n\n\n"} {"package":"iai","topic":"numeric_survival_reward_estimator","snippet":"### Name: numeric_survival_reward_estimator\n### Title: Learner for conducting reward estimation with numeric treatments\n### and survival outcomes\n### Aliases: numeric_survival_reward_estimator\n\n### ** Examples\n\n## Not run: lnr <- iai::numeric_survival_reward_estimator()\n\n\n\n"} {"package":"iai","topic":"opt_knn_imputation_learner","snippet":"### Name: opt_knn_imputation_learner\n### Title: Learner for conducting optimal k-NN imputation\n### Aliases: opt_knn_imputation_learner\n\n### ** Examples\n\n## Not run: lnr <- iai::opt_knn_imputation_learner()\n\n\n\n"} {"package":"iai","topic":"opt_svm_imputation_learner","snippet":"### Name: opt_svm_imputation_learner\n### Title: Learner for conducting optimal SVM imputation\n### Aliases: opt_svm_imputation_learner\n\n### ** Examples\n\n## Not run: lnr <- iai::opt_svm_imputation_learner()\n\n\n\n"} {"package":"iai","topic":"opt_tree_imputation_learner","snippet":"### Name: opt_tree_imputation_learner\n### Title: Learner for conducting optimal tree-based imputation\n### Aliases: opt_tree_imputation_learner\n\n### ** Examples\n\n## Not run: lnr <- iai::opt_tree_imputation_learner()\n\n\n\n"} {"package":"iai","topic":"optimal_feature_selection_classifier","snippet":"### Name: optimal_feature_selection_classifier\n### Title: Learner for conducting Optimal Feature Selection on\n### classification problems\n### Aliases: optimal_feature_selection_classifier\n\n### ** Examples\n\n## Not run: lnr <- iai::optimal_feature_selection_classifier()\n\n\n\n"} {"package":"iai","topic":"optimal_feature_selection_regressor","snippet":"### Name: optimal_feature_selection_regressor\n### Title: Learner for conducting Optimal Feature Selection on regression\n### problems\n### Aliases: optimal_feature_selection_regressor\n\n### ** Examples\n\n## Not run: lnr <- iai::optimal_feature_selection_regressor()\n\n\n\n"} {"package":"iai","topic":"optimal_tree_classifier","snippet":"### Name: optimal_tree_classifier\n### Title: Learner for training Optimal Classification Trees\n### Aliases: optimal_tree_classifier\n\n### ** Examples\n\n## Not run: lnr <- iai::optimal_tree_classifier()\n\n\n\n"} {"package":"iai","topic":"optimal_tree_multi_classifier","snippet":"### Name: optimal_tree_multi_classifier\n### Title: Learner for training multi-task Optimal Classification Trees\n### Aliases: optimal_tree_multi_classifier\n\n### ** Examples\n\n## Not run: lnr <- iai::optimal_tree_multi_classifier()\n\n\n\n"} {"package":"iai","topic":"optimal_tree_multi_regressor","snippet":"### Name: optimal_tree_multi_regressor\n### Title: Learner for training multi-task Optimal Regression Trees\n### Aliases: optimal_tree_multi_regressor\n\n### ** Examples\n\n## Not run: lnr <- iai::optimal_tree_multi_regressor()\n\n\n\n"} {"package":"iai","topic":"optimal_tree_policy_maximizer","snippet":"### Name: optimal_tree_policy_maximizer\n### Title: Learner for training Optimal Policy Trees where the policy\n### should aim to maximize outcomes\n### Aliases: optimal_tree_policy_maximizer\n\n### ** Examples\n\n## Not run: lnr <- iai::optimal_tree_policy_maximizer()\n\n\n\n"} {"package":"iai","topic":"optimal_tree_policy_minimizer","snippet":"### Name: optimal_tree_policy_minimizer\n### Title: Learner for training Optimal Policy Trees where the policy\n### should aim to minimize outcomes\n### Aliases: optimal_tree_policy_minimizer\n\n### ** Examples\n\n## Not run: lnr <- iai::optimal_tree_policy_minimizer()\n\n\n\n"} {"package":"iai","topic":"optimal_tree_prescription_maximizer","snippet":"### Name: optimal_tree_prescription_maximizer\n### Title: Learner for training Optimal Prescriptive Trees where the\n### prescriptions should aim to maximize outcomes\n### Aliases: optimal_tree_prescription_maximizer\n\n### ** Examples\n\n## Not run: lnr <- iai::optimal_tree_prescription_maximizer()\n\n\n\n"} {"package":"iai","topic":"optimal_tree_prescription_minimizer","snippet":"### Name: optimal_tree_prescription_minimizer\n### Title: Learner for training Optimal Prescriptive Trees where the\n### prescriptions should aim to minimize outcomes\n### Aliases: optimal_tree_prescription_minimizer\n\n### ** Examples\n\n## Not run: lnr <- iai::optimal_tree_prescription_minimizer()\n\n\n\n"} {"package":"iai","topic":"optimal_tree_regressor","snippet":"### Name: optimal_tree_regressor\n### Title: Learner for training Optimal Regression Trees\n### Aliases: optimal_tree_regressor\n\n### ** Examples\n\n## Not run: lnr <- iai::optimal_tree_regressor()\n\n\n\n"} {"package":"iai","topic":"optimal_tree_survival_learner","snippet":"### Name: optimal_tree_survival_learner\n### Title: Learner for training Optimal Survival Trees\n### Aliases: optimal_tree_survival_learner\n\n### ** Examples\n\n## Not run: lnr <- iai::optimal_tree_survival_learner()\n\n\n\n"} {"package":"iai","topic":"optimal_tree_survivor","snippet":"### Name: optimal_tree_survivor\n### Title: Learner for training Optimal Survival Trees\n### Aliases: optimal_tree_survivor\n\n### ** Examples\n\n## Not run: lnr <- iai::optimal_tree_survivor()\n\n\n\n"} {"package":"iai","topic":"plot.grid_search","snippet":"### Name: plot.grid_search\n### Title: Plot a grid search results for Optimal Feature Selection\n### learners\n### Aliases: plot.grid_search\n\n### ** Examples\n\n## Not run: plot(grid)\n\n\n\n"} {"package":"iai","topic":"plot.roc_curve","snippet":"### Name: plot.roc_curve\n### Title: Plot an ROC curve\n### Aliases: plot.roc_curve\n\n### ** Examples\n\n## Not run: plot(roc)\n\n\n\n"} {"package":"iai","topic":"plot.similarity_comparison","snippet":"### Name: plot.similarity_comparison\n### Title: Plot a similarity comparison\n### Aliases: plot.similarity_comparison\n\n### ** Examples\n\n## Not run: plot(similarity)\n\n\n\n"} {"package":"iai","topic":"plot.stability_analysis","snippet":"### Name: plot.stability_analysis\n### Title: Plot a stability analysis\n### Aliases: plot.stability_analysis\n\n### ** Examples\n\n## Not run: plot(stability)\n\n\n\n"} {"package":"iai","topic":"predict.categorical_reward_estimator","snippet":"### Name: predict.categorical_reward_estimator\n### Title: Return counterfactual rewards estimated by a categorical reward\n### estimator for each observation in the supplied data\n### Aliases: predict.categorical_reward_estimator\n\n### ** Examples\n\n## Not run: iai::predict(lnr, X, treatments, outcomes)\n\n\n\n"} {"package":"iai","topic":"predict.glmnetcv_learner","snippet":"### Name: predict.glmnetcv_learner\n### Title: Return the predictions made by a GLMNet learner for each point\n### in the features\n### Aliases: predict.glmnetcv_learner\n\n### ** Examples\n\n## Not run: iai::predict(lnr, X)\n\n\n\n"} {"package":"iai","topic":"predict.numeric_reward_estimator","snippet":"### Name: predict.numeric_reward_estimator\n### Title: Return counterfactual rewards estimated by a numeric reward\n### estimator for each observation in the supplied data\n### Aliases: predict.numeric_reward_estimator\n\n### ** Examples\n\n## Not run: iai::predict(lnr, X, treatments, outcomes)\n\n\n\n"} {"package":"iai","topic":"predict.optimal_feature_selection_learner","snippet":"### Name: predict.optimal_feature_selection_learner\n### Title: Return the predictions made by an Optimal Feature Selection\n### learner for each point in the features\n### Aliases: predict.optimal_feature_selection_learner\n\n### ** Examples\n\n## Not run: iai::predict(lnr, X)\n\n\n\n"} {"package":"iai","topic":"predict.supervised_learner","snippet":"### Name: predict.supervised_learner\n### Title: Return the predictions made by a supervised learner for each\n### point in the features\n### Aliases: predict.supervised_learner\n\n### ** Examples\n\n## Not run: iai::predict(lnr, X)\n\n\n\n"} {"package":"iai","topic":"predict.supervised_multi_learner","snippet":"### Name: predict.supervised_multi_learner\n### Title: Return the predictions made by a multi-task supervised learner\n### for each point in the features\n### Aliases: predict.supervised_multi_learner\n\n### ** Examples\n\n## Not run: iai::predict(lnr, X)\n\n\n\n"} {"package":"iai","topic":"predict.survival_learner","snippet":"### Name: predict.survival_learner\n### Title: Return the predictions made by a survival learner for each point\n### in the features\n### Aliases: predict.survival_learner\n\n### ** Examples\n\n## Not run: iai::predict(lnr, X, t = 10)\n\n\n\n"} {"package":"iai","topic":"predict_expected_survival_time.glmnetcv_survival_learner","snippet":"### Name: predict_expected_survival_time.glmnetcv_survival_learner\n### Title: Return the expected survival time estimate made by a\n### 'glmnetcv_survival_learner' for each point in the features.\n### Aliases: predict_expected_survival_time.glmnetcv_survival_learner\n\n### ** Examples\n\n## Not run: iai::predict_expected_survival_time(lnr, X)\n\n\n\n"} {"package":"iai","topic":"predict_expected_survival_time.survival_curve","snippet":"### Name: predict_expected_survival_time.survival_curve\n### Title: Return the expected survival time estimate made by a survival\n### curve (as returned by 'predict.survival_learner' or\n### 'get_survival_curve')\n### Aliases: predict_expected_survival_time.survival_curve\n\n### ** Examples\n\n## Not run: iai::predict_expected_survival_time(curve)\n\n\n\n"} {"package":"iai","topic":"predict_expected_survival_time.survival_learner","snippet":"### Name: predict_expected_survival_time.survival_learner\n### Title: Return the expected survival time estimate made by a survival\n### learner for each point in the features.\n### Aliases: predict_expected_survival_time.survival_learner\n\n### ** Examples\n\n## Not run: iai::predict_expected_survival_time(lnr, X)\n\n\n\n"} {"package":"iai","topic":"predict_hazard.glmnetcv_survival_learner","snippet":"### Name: predict_hazard.glmnetcv_survival_learner\n### Title: Return the fitted hazard coefficient estimate made by a\n### 'glmnetcv_survival_learner' for each point in the features.\n### Aliases: predict_hazard.glmnetcv_survival_learner\n\n### ** Examples\n\n## Not run: iai::predict_hazard(lnr, X)\n\n\n\n"} {"package":"iai","topic":"predict_hazard.survival_learner","snippet":"### Name: predict_hazard.survival_learner\n### Title: Return the fitted hazard coefficient estimate made by a survival\n### learner for each point in the features.\n### Aliases: predict_hazard.survival_learner\n\n### ** Examples\n\n## Not run: iai::predict_hazard(lnr, X)\n\n\n\n"} {"package":"iai","topic":"predict_outcomes.policy_learner","snippet":"### Name: predict_outcomes.policy_learner\n### Title: Return the predicted outcome for each treatment made by a policy\n### learner for each point in the features\n### Aliases: predict_outcomes.policy_learner\n\n### ** Examples\n\n## Not run: iai::predict_outcomes(lnr, X, rewards)\n\n\n\n"} {"package":"iai","topic":"predict_outcomes.prescription_learner","snippet":"### Name: predict_outcomes.prescription_learner\n### Title: Return the predicted outcome for each treatment made by a\n### prescription learner for each point in the features\n### Aliases: predict_outcomes.prescription_learner\n\n### ** Examples\n\n## Not run: iai::predict_outcomes(lnr, X)\n\n\n\n"} {"package":"iai","topic":"predict_proba.classification_learner","snippet":"### Name: predict_proba.classification_learner\n### Title: Return the probabilities of class membership predicted by a\n### classification learner for each point in the features\n### Aliases: predict_proba.classification_learner\n\n### ** Examples\n\n## Not run: iai::predict_proba(lnr, X)\n\n\n\n"} {"package":"iai","topic":"predict_proba.classification_multi_learner","snippet":"### Name: predict_proba.classification_multi_learner\n### Title: Return the probabilities of class membership predicted by a\n### multi-task classification learner for each point in the features\n### Aliases: predict_proba.classification_multi_learner\n\n### ** Examples\n\n## Not run: iai::predict_proba(lnr, X)\n\n\n\n"} {"package":"iai","topic":"predict_proba.glmnetcv_classifier","snippet":"### Name: predict_proba.glmnetcv_classifier\n### Title: Return the probabilities of class membership predicted by a\n### 'glmnetcv_classifier' learner for each point in the features\n### Aliases: predict_proba.glmnetcv_classifier\n\n### ** Examples\n\n## Not run: iai::predict_proba(lnr, X)\n\n\n\n"} {"package":"iai","topic":"predict_reward.categorical_reward_estimator","snippet":"### Name: predict_reward.categorical_reward_estimator\n### Title: Return counterfactual rewards estimated by a categorical reward\n### estimator for each observation in the supplied data and predictions\n### Aliases: predict_reward.categorical_reward_estimator\n\n### ** Examples\n\n## Not run: iai::predict_reward(lnr, X, treatments, outcomes, predictions)\n\n\n\n"} {"package":"iai","topic":"predict_reward.numeric_reward_estimator","snippet":"### Name: predict_reward.numeric_reward_estimator\n### Title: Return counterfactual rewards estimated by a numeric reward\n### estimator for each observation in the supplied data and predictions\n### Aliases: predict_reward.numeric_reward_estimator\n\n### ** Examples\n\n## Not run: iai::predict_reward(lnr, X, treatments, outcomes, predictions)\n\n\n\n"} {"package":"iai","topic":"predict_shap","snippet":"### Name: predict_shap\n### Title: Calculate SHAP values for all points in the features using the\n### learner\n### Aliases: predict_shap\n\n### ** Examples\n\n## Not run: iai::predict_shap(lnr, X)\n\n\n\n"} {"package":"iai","topic":"predict_treatment_outcome","snippet":"### Name: predict_treatment_outcome\n### Title: Return the estimated quality of each treatment in the trained\n### model of the learner for each point in the features\n### Aliases: predict_treatment_outcome\n\n### ** Examples\n\n## Not run: iai::predict_treatment_outcome(lnr, X)\n\n\n\n"} {"package":"iai","topic":"predict_treatment_outcome_standard_error","snippet":"### Name: predict_treatment_outcome_standard_error\n### Title: Return the standard error for the estimated quality of each\n### treatment in the trained model of the learner for each point in the\n### features\n### Aliases: predict_treatment_outcome_standard_error\n\n### ** Examples\n\n## Not run: iai::predict_treatment_outcome_standard_error(lnr, X)\n\n\n\n"} {"package":"iai","topic":"predict_treatment_rank","snippet":"### Name: predict_treatment_rank\n### Title: Return the treatments in ranked order of effectiveness for each\n### point in the features\n### Aliases: predict_treatment_rank\n\n### ** Examples\n\n## Not run: iai::predict_treatment_rank(lnr, X)\n\n\n\n"} {"package":"iai","topic":"print_path","snippet":"### Name: print_path\n### Title: Print the decision path through the learner for each sample in\n### the features\n### Aliases: print_path\n\n### ** Examples\n\n## Not run: \n##D iai::print_path(lnr, X)\n##D iai::print_path(lnr, X, 1)\n## End(Not run)\n\n\n\n"} {"package":"iai","topic":"prune_trees","snippet":"### Name: prune_trees\n### Title: Use the trained trees in a learner along with the supplied\n### validation data to determine the best value for the 'cp' parameter\n### and then prune the trees according to this value\n### Aliases: prune_trees\n\n### ** Examples\n\n## Not run: iai::prune_trees(lnr, ...)\n\n\n\n"} {"package":"iai","topic":"questionnaire.optimal_feature_selection_learner","snippet":"### Name: questionnaire.optimal_feature_selection_learner\n### Title: Specify an interactive questionnaire of an Optimal Feature\n### Selection learner\n### Aliases: questionnaire.optimal_feature_selection_learner\n\n### ** Examples\n\n## Not run: iai::questionnaire(lnr)\n\n\n\n"} {"package":"iai","topic":"questionnaire.tree_learner","snippet":"### Name: questionnaire.tree_learner\n### Title: Specify an interactive questionnaire of a tree learner\n### Aliases: questionnaire.tree_learner\n\n### ** Examples\n\n## Not run: iai::questionnaire(lnr)\n\n\n\n"} {"package":"iai","topic":"rand_imputation_learner","snippet":"### Name: rand_imputation_learner\n### Title: Learner for conducting random imputation\n### Aliases: rand_imputation_learner\n\n### ** Examples\n\n## Not run: lnr <- iai::rand_imputation_learner()\n\n\n\n"} {"package":"iai","topic":"random_forest_classifier","snippet":"### Name: random_forest_classifier\n### Title: Learner for training random forests for classification problems\n### Aliases: random_forest_classifier\n\n### ** Examples\n\n## Not run: lnr <- iai::random_forest_classifier()\n\n\n\n"} {"package":"iai","topic":"random_forest_regressor","snippet":"### Name: random_forest_regressor\n### Title: Learner for training random forests for regression problems\n### Aliases: random_forest_regressor\n\n### ** Examples\n\n## Not run: lnr <- iai::random_forest_regressor()\n\n\n\n"} {"package":"iai","topic":"random_forest_survival_learner","snippet":"### Name: random_forest_survival_learner\n### Title: Learner for training random forests for survival problems\n### Aliases: random_forest_survival_learner\n\n### ** Examples\n\n## Not run: lnr <- iai::random_forest_survival_learner()\n\n\n\n"} {"package":"iai","topic":"read_json","snippet":"### Name: read_json\n### Title: Read in a learner or grid saved in JSON format\n### Aliases: read_json\n\n### ** Examples\n\n## Not run: obj <- iai::read_json(\"out.json\")\n\n\n\n"} {"package":"iai","topic":"refit_leaves","snippet":"### Name: refit_leaves\n### Title: Refit the models in the leaves of a trained learner using the\n### supplied data\n### Aliases: refit_leaves\n\n### ** Examples\n\n## Not run: iai::refit_leaves(lnr, ...)\n\n\n\n"} {"package":"iai","topic":"release_license","snippet":"### Name: release_license\n### Title: Release any IAI license held by the current session.\n### Aliases: release_license\n\n### ** Examples\n\n## Not run: iai::release_license()\n\n\n\n"} {"package":"iai","topic":"reset_display_label","snippet":"### Name: reset_display_label\n### Title: Reset the predicted probability displayed to be that of the\n### predicted label when visualizing a learner\n### Aliases: reset_display_label\n\n### ** Examples\n\n## Not run: iai::reset_display_label(lnr)\n\n\n\n"} {"package":"iai","topic":"resume_from_checkpoint","snippet":"### Name: resume_from_checkpoint\n### Title: Resume training from a checkpoint file\n### Aliases: resume_from_checkpoint\n\n### ** Examples\n\n## Not run: obj <- iai::resume_from_checkpoint(\"checkpoint.json\")\n\n\n\n"} {"package":"iai","topic":"reward_estimator","snippet":"### Name: reward_estimator\n### Title: Learner for conducting reward estimation with categorical\n### treatments\n### Aliases: reward_estimator\n\n### ** Examples\n\n## Not run: lnr <- iai::reward_estimator()\n\n\n\n"} {"package":"iai","topic":"roc_curve.classification_learner","snippet":"### Name: roc_curve.classification_learner\n### Title: Construct an ROC curve using a trained classification learner on\n### the given data\n### Aliases: roc_curve.classification_learner\n\n### ** Examples\n\n## Not run: iai::roc_curve(lnr, X, y)\n\n\n\n"} {"package":"iai","topic":"roc_curve.classification_multi_learner","snippet":"### Name: roc_curve.classification_multi_learner\n### Title: Construct an ROC curve using a trained multi-task classification\n### learner on the given data\n### Aliases: roc_curve.classification_multi_learner\n\n### ** Examples\n\n## Not run: iai::roc_curve(lnr, X, y)\n\n\n\n"} {"package":"iai","topic":"roc_curve.default","snippet":"### Name: roc_curve.default\n### Title: Construct an ROC curve from predicted probabilities and true\n### labels\n### Aliases: roc_curve.default\n\n### ** Examples\n\n## Not run: iai::roc_curve(probs, y, positive_label=positive_label)\n\n\n\n"} {"package":"iai","topic":"roc_curve.glmnetcv_classifier","snippet":"### Name: roc_curve.glmnetcv_classifier\n### Title: Construct an ROC curve using a trained 'glmnetcv_classifier' on\n### the given data\n### Aliases: roc_curve.glmnetcv_classifier\n\n### ** Examples\n\n## Not run: iai::roc_curve(lnr, X, y)\n\n\n\n"} {"package":"iai","topic":"score.categorical_reward_estimator","snippet":"### Name: score.categorical_reward_estimator\n### Title: Calculate the scores for a categorical reward estimator on the\n### given data\n### Aliases: score.categorical_reward_estimator\n\n### ** Examples\n\n## Not run: iai::score(lnr, X, treatments, outcomes)\n\n\n\n"} {"package":"iai","topic":"score.default","snippet":"### Name: score.default\n### Title: Calculate the score for a set of predictions on the given data\n### Aliases: score.default\n\n### ** Examples\n\n## Not run: iai::score(\"regression\", y_pred, y_true, criterion=\"mse\")\n\n\n\n"} {"package":"iai","topic":"score.glmnetcv_learner","snippet":"### Name: score.glmnetcv_learner\n### Title: Calculate the score for a GLMNet learner on the given data\n### Aliases: score.glmnetcv_learner\n\n### ** Examples\n\n## Not run: iai::score(lnr, X, y, fit_index=1)\n\n\n\n"} {"package":"iai","topic":"score.numeric_reward_estimator","snippet":"### Name: score.numeric_reward_estimator\n### Title: Calculate the scores for a numeric reward estimator on the given\n### data\n### Aliases: score.numeric_reward_estimator\n\n### ** Examples\n\n## Not run: iai::score(lnr, X, treatments, outcomes)\n\n\n\n"} {"package":"iai","topic":"score.optimal_feature_selection_learner","snippet":"### Name: score.optimal_feature_selection_learner\n### Title: Calculate the score for an Optimal Feature Selection learner on\n### the given data\n### Aliases: score.optimal_feature_selection_learner\n\n### ** Examples\n\n## Not run: iai::score(lnr, X, y, fit_index=1)\n\n\n\n"} {"package":"iai","topic":"score.supervised_learner","snippet":"### Name: score.supervised_learner\n### Title: Calculate the score for a model on the given data\n### Aliases: score.supervised_learner\n\n### ** Examples\n\n## Not run: iai::score(lnr, X, y)\n\n\n\n"} {"package":"iai","topic":"score.supervised_multi_learner","snippet":"### Name: score.supervised_multi_learner\n### Title: Calculate the score for a multi-task model on the given data\n### Aliases: score.supervised_multi_learner\n\n### ** Examples\n\n## Not run: iai::score(lnr, X, y)\n\n\n\n"} {"package":"iai","topic":"set_display_label","snippet":"### Name: set_display_label\n### Title: Show the probability of a specified label when visualizing a\n### learner\n### Aliases: set_display_label\n\n### ** Examples\n\n## Not run: iai::set_display_label(lnr, \"A\")\n\n\n\n"} {"package":"iai","topic":"set_julia_seed","snippet":"### Name: set_julia_seed\n### Title: Set the random seed in Julia\n### Aliases: set_julia_seed\n\n### ** Examples\n\n## Not run: iai::set_julia_seed(1)\n\n\n\n"} {"package":"iai","topic":"set_params","snippet":"### Name: set_params\n### Title: Set all supplied parameters on a learner\n### Aliases: set_params\n\n### ** Examples\n\n## Not run: iai::set_params(lnr, random_seed = 1)\n\n\n\n"} {"package":"iai","topic":"set_reward_kernel_bandwidth","snippet":"### Name: set_reward_kernel_bandwidth\n### Title: Save a new reward kernel bandwidth inside a learner, and return\n### new reward predictions generated using this bandwidth for the\n### original data used to train the learner.\n### Aliases: set_reward_kernel_bandwidth\n\n### ** Examples\n\n## Not run: iai::set_reward_kernel_bandwidth(lnr, ...)\n\n\n\n"} {"package":"iai","topic":"set_rich_output_param","snippet":"### Name: set_rich_output_param\n### Title: Sets a global rich output parameter\n### Aliases: set_rich_output_param\n\n### ** Examples\n\n## Not run: iai::set_rich_output_param(\"simple_layout\", TRUE)\n\n\n\n"} {"package":"iai","topic":"set_threshold","snippet":"### Name: set_threshold\n### Title: For a binary classification problem, update the the predicted\n### labels in the leaves of the learner to predict a label only if the\n### predicted probability is at least the specified threshold.\n### Aliases: set_threshold\n\n### ** Examples\n\n## Not run: iai::set_threshold(lnr, \"A\", 0.4)\n\n\n\n"} {"package":"iai","topic":"show_in_browser.abstract_visualization","snippet":"### Name: show_in_browser.abstract_visualization\n### Title: Show interactive visualization of an object in the default\n### browser\n### Aliases: show_in_browser.abstract_visualization\n\n### ** Examples\n\n## Not run: iai::show_in_browser(lnr)\n\n\n\n"} {"package":"iai","topic":"show_in_browser.roc_curve","snippet":"### Name: show_in_browser.roc_curve\n### Title: Show interactive visualization of a 'roc_curve' in the default\n### browser\n### Aliases: show_in_browser.roc_curve\n\n### ** Examples\n\n## Not run: iai::show_in_browser(curve)\n\n\n\n"} {"package":"iai","topic":"show_in_browser.tree_learner","snippet":"### Name: show_in_browser.tree_learner\n### Title: Show interactive tree visualization of a tree learner in the\n### default browser\n### Aliases: show_in_browser.tree_learner\n\n### ** Examples\n\n## Not run: iai::show_in_browser(lnr)\n\n\n\n"} {"package":"iai","topic":"show_questionnaire.optimal_feature_selection_learner","snippet":"### Name: show_questionnaire.optimal_feature_selection_learner\n### Title: Show an interactive questionnaire based on an Optimal Feature\n### Selection learner in default browser\n### Aliases: show_questionnaire.optimal_feature_selection_learner\n\n### ** Examples\n\n## Not run: iai::show_questionnaire(lnr)\n\n\n\n"} {"package":"iai","topic":"show_questionnaire.tree_learner","snippet":"### Name: show_questionnaire.tree_learner\n### Title: Show an interactive questionnaire based on a tree learner in\n### default browser\n### Aliases: show_questionnaire.tree_learner\n\n### ** Examples\n\n## Not run: iai::show_questionnaire(lnr)\n\n\n\n"} {"package":"iai","topic":"similarity_comparison","snippet":"### Name: similarity_comparison\n### Title: Conduct a similarity comparison between the final tree in a\n### learner and all trees in a new learner to consider the tradeoff\n### between training performance and similarity to the original tree\n### Aliases: similarity_comparison\n\n### ** Examples\n\n## Not run: iai::similarity_comparison(lnr, new_lnr, deviations)\n\n\n\n"} {"package":"iai","topic":"single_knn_imputation_learner","snippet":"### Name: single_knn_imputation_learner\n### Title: Learner for conducting heuristic k-NN imputation\n### Aliases: single_knn_imputation_learner\n\n### ** Examples\n\n## Not run: lnr <- iai::single_knn_imputation_learner()\n\n\n\n"} {"package":"iai","topic":"split_data","snippet":"### Name: split_data\n### Title: Split the data into training and test datasets\n### Aliases: split_data\n\n### ** Examples\n\n## Not run: \n##D X <- iris[, 1:4]\n##D y <- iris$Species\n##D split <- iai::split_data(\"classification\", X, y, train_proportion = 0.75)\n##D train_X <- split$train$X\n##D train_y <- split$train$y\n##D test_X <- split$test$X\n##D test_y <- split$test$y\n## End(Not run)\n\n\n"} {"package":"iai","topic":"stability_analysis","snippet":"### Name: stability_analysis\n### Title: Conduct a stability analysis of the trees in a tree learner\n### Aliases: stability_analysis\n\n### ** Examples\n\n## Not run: iai::stability_analysis(lnr, ...)\n\n\n\n"} {"package":"iai","topic":"transform","snippet":"### Name: transform\n### Title: Impute missing values in a dataframe using a fitted imputation\n### model\n### Aliases: transform\n\n### ** Examples\n\n## Not run: iai::transform(lnr, X)\n\n\n\n"} {"package":"iai","topic":"transform_and_expand","snippet":"### Name: transform_and_expand\n### Title: Transform features with a trained imputation learner and create\n### adaptive indicator features to encode the missing pattern\n### Aliases: transform_and_expand\n\n### ** Examples\n\n## Not run: lnr <- iai::transform_and_expand(lnr, X, type = \"finite\")\n\n\n\n"} {"package":"iai","topic":"tree_plot","snippet":"### Name: tree_plot\n### Title: Specify an interactive tree visualization of a tree learner\n### Aliases: tree_plot\n\n### ** Examples\n\n## Not run: iai::tree_plot(lnr)\n\n\n\n"} {"package":"iai","topic":"tune_reward_kernel_bandwidth","snippet":"### Name: tune_reward_kernel_bandwidth\n### Title: Conduct the reward kernel bandwidth tuning procedure for a range\n### of starting bandwidths and return the final tuned values.\n### Aliases: tune_reward_kernel_bandwidth\n\n### ** Examples\n\n## Not run: iai::tune_reward_kernel_bandwidth(lnr, ...)\n\n\n\n"} {"package":"iai","topic":"variable_importance.learner","snippet":"### Name: variable_importance.learner\n### Title: Generate a ranking of the variables in a learner according to\n### their importance during training. The results are normalized so that\n### they sum to one.\n### Aliases: variable_importance.learner\n\n### ** Examples\n\n## Not run: iai::variable_importance(lnr, ...)\n\n\n\n"} {"package":"iai","topic":"variable_importance.optimal_feature_selection_learner","snippet":"### Name: variable_importance.optimal_feature_selection_learner\n### Title: Generate a ranking of the variables in an Optimal Feature\n### Selection learner according to their importance during training. The\n### results are normalized so that they sum to one.\n### Aliases: variable_importance.optimal_feature_selection_learner\n\n### ** Examples\n\n## Not run: iai::variable_importance(lnr, ...)\n\n\n\n"} {"package":"iai","topic":"variable_importance.tree_learner","snippet":"### Name: variable_importance.tree_learner\n### Title: Generate a ranking of the variables in a tree learner according\n### to their importance during training. The results are normalized so\n### that they sum to one.\n### Aliases: variable_importance.tree_learner\n\n### ** Examples\n\n## Not run: iai::variable_importance(lnr, ...)\n\n\n\n"} {"package":"iai","topic":"variable_importance_similarity","snippet":"### Name: variable_importance_similarity\n### Title: Calculate similarity between the final tree in a tree learner\n### with all trees in new tree learner using variable importance scores.\n### Aliases: variable_importance_similarity\n\n### ** Examples\n\n## Not run: iai::variable_importance_similarity(lnr, new_lnr)\n\n\n\n"} {"package":"iai","topic":"write_booster","snippet":"### Name: write_booster\n### Title: Write the internal booster saved in the learner to file\n### Aliases: write_booster\n\n### ** Examples\n\n## Not run: iai::write_booster(file.path(tempdir(), \"out.json\"), lnr)\n\n\n\n"} {"package":"iai","topic":"write_dot","snippet":"### Name: write_dot\n### Title: Output a learner in .dot format\n### Aliases: write_dot\n\n### ** Examples\n\n## Not run: iai::write_dot(file.path(tempdir(), \"tree.dot\"), lnr)\n\n\n\n"} {"package":"iai","topic":"write_html.abstract_visualization","snippet":"### Name: write_html.abstract_visualization\n### Title: Output an object as an interactive browser visualization in HTML\n### format\n### Aliases: write_html.abstract_visualization\n\n### ** Examples\n\n## Not run: iai::write_html(file.path(tempdir(), \"out.html\"), lnr)\n\n\n\n"} {"package":"iai","topic":"write_html.roc_curve","snippet":"### Name: write_html.roc_curve\n### Title: Output an ROC curve as an interactive browser visualization in\n### HTML format\n### Aliases: write_html.roc_curve\n\n### ** Examples\n\n## Not run: iai::write_html(file.path(tempdir(), \"roc.html\"), lnr)\n\n\n\n"} {"package":"iai","topic":"write_html.tree_learner","snippet":"### Name: write_html.tree_learner\n### Title: Output a tree learner as an interactive browser visualization in\n### HTML format\n### Aliases: write_html.tree_learner\n\n### ** Examples\n\n## Not run: iai::write_html(file.path(tempdir(), \"tree.html\"), lnr)\n\n\n\n"} {"package":"iai","topic":"write_json","snippet":"### Name: write_json\n### Title: Output a learner or grid in JSON format\n### Aliases: write_json\n\n### ** Examples\n\n## Not run: iai::write_json(file.path(tempdir(), \"out.json\"), obj)\n\n\n\n"} {"package":"iai","topic":"write_pdf","snippet":"### Name: write_pdf\n### Title: Output a learner as a PDF image\n### Aliases: write_pdf\n\n### ** Examples\n\n## Not run: iai::write_pdf(file.path(tempdir(), \"tree.pdf\"), lnr)\n\n\n\n"} {"package":"iai","topic":"write_png","snippet":"### Name: write_png\n### Title: Output a learner as a PNG image\n### Aliases: write_png\n\n### ** Examples\n\n## Not run: iai::write_png(file.path(tempdir(), \"tree.png\"), lnr)\n\n\n\n"} {"package":"iai","topic":"write_questionnaire.optimal_feature_selection_learner","snippet":"### Name: write_questionnaire.optimal_feature_selection_learner\n### Title: Output an Optimal Feature Selection learner as an interactive\n### questionnaire in HTML format\n### Aliases: write_questionnaire.optimal_feature_selection_learner\n\n### ** Examples\n\n## Not run: iai::write_questionnaire(file.path(tempdir(), \"questionnaire.html\"), lnr)\n\n\n\n"} {"package":"iai","topic":"write_questionnaire.tree_learner","snippet":"### Name: write_questionnaire.tree_learner\n### Title: Output a tree learner as an interactive questionnaire in HTML\n### format\n### Aliases: write_questionnaire.tree_learner\n\n### ** Examples\n\n## Not run: iai::write_questionnaire(file.path(tempdir(), \"questionnaire.html\"), lnr)\n\n\n\n"} {"package":"iai","topic":"write_svg","snippet":"### Name: write_svg\n### Title: Output a learner as a SVG image\n### Aliases: write_svg\n\n### ** Examples\n\n## Not run: iai::write_svg(file.path(tempdir(), \"tree.svg\"), lnr)\n\n\n\n"} {"package":"iai","topic":"xgboost_classifier","snippet":"### Name: xgboost_classifier\n### Title: Learner for training XGBoost models for classification problems\n### Aliases: xgboost_classifier\n\n### ** Examples\n\n## Not run: lnr <- iai::xgboost_classifier()\n\n\n\n"} {"package":"iai","topic":"xgboost_regressor","snippet":"### Name: xgboost_regressor\n### Title: Learner for training XGBoost models for regression problems\n### Aliases: xgboost_regressor\n\n### ** Examples\n\n## Not run: lnr <- iai::xgboost_regressor()\n\n\n\n"} {"package":"iai","topic":"xgboost_survival_learner","snippet":"### Name: xgboost_survival_learner\n### Title: Learner for training XGBoost models for survival problems\n### Aliases: xgboost_survival_learner\n\n### ** Examples\n\n## Not run: lnr <- iai::xgboost_survival_learner()\n\n\n\n"} {"package":"iai","topic":"zero_imputation_learner","snippet":"### Name: zero_imputation_learner\n### Title: Learner for conducting zero-imputation\n### Aliases: zero_imputation_learner\n\n### ** Examples\n\n## Not run: lnr <- iai::zero_imputation_learner()\n\n\n\n"} {"package":"dbGaPCheckup","topic":"NA_check","snippet":"### Name: NA_check\n### Title: Missing Value (NA) Check\n### Aliases: NA_check\n\n### ** Examples\n\n# Example 1: Fail check\ndata(ExampleK)\nNA_check(DD.dict.K, DS.data.K)\nprint(NA_check(DD.dict.K, DS.data.K, verbose=FALSE))\n\n# Example 2: Pass check\ndata(ExampleA)\nNA_check(DD.dict.A, DS.data.A)\nprint(NA_check(DD.dict.A, DS.data.A, verbose=FALSE))\n\n# Example 3: Pass check (though missing_value_check detects a more specific error)\ndata(ExampleS)\nNA_check(DD.dict.S, DS.data.S)\n\n\n"} {"package":"dbGaPCheckup","topic":"NA_precheck","snippet":"### Name: NA_precheck\n### Title: Min Max Required Pre-checks\n### Aliases: NA_precheck\n\n### ** Examples\n\ndata(ExampleB)\nNA_precheck(DD.dict.B, DS.data.B)\n\n\n"} {"package":"dbGaPCheckup","topic":"add_missing_fields","snippet":"### Name: add_missing_fields\n### Title: Add Missing Fields\n### Aliases: add_missing_fields\n\n### ** Examples\n\n# Example\ndata(ExampleD)\nDD.dict.updated <- add_missing_fields(DD.dict.D, DS.data.D)\n\n\n"} {"package":"dbGaPCheckup","topic":"check_report","snippet":"### Name: check_report\n### Title: Check Report\n### Aliases: check_report\n\n### ** Examples\n\n# Example 1: Incorrectly showing as pass check on first attempt\ndata(ExampleB)\nreport <- check_report(DD.dict.B, DS.data.B)\n# Addition of missing value codes calls attention to error\n# at missing_value_check\nreport <- check_report(DD.dict.B, DS.data.B, non.NA.missing.codes=c(-4444, -9999))\n\n# Example 2: Several fail checks or not attempted\ndata(ExampleC)\nreport <- check_report(DD.dict.C, DS.data.C, non.NA.missing.codes=c(-4444, -9999))\n# Note you can also run report using compact=FALSE\nreport <- check_report(DD.dict.C, DS.data.C, non.NA.missing.codes=c(-4444, -9999), compact = FALSE)\n\n\n"} {"package":"dbGaPCheckup","topic":"complete_check","snippet":"### Name: complete_check\n### Title: Complete Check\n### Aliases: complete_check\n\n### ** Examples\n\n# Example 1\n# Note in this example, the missing value codes are not defined,\n# so the last check ('missing_value_check') doesn't know to\n# to check for encoded values\ndata(ExampleB)\ncomplete_check(DD.dict.B, DS.data.B)\n# Rerun check after defining missing value codes\ncomplete_check(DD.dict.B, DS.data.B, non.NA.missing.codes=c(-9999, -4444))\n\n# Example 2\ndata(ExampleA)\ncomplete_check(DD.dict.A, DS.data.A, non.NA.missing.codes=c(-9999, -4444))\n\n# Example 3\ndata(ExampleD)\nresults <- complete_check(DD.dict.D, DS.data.D, non.NA.missing.codes=c(-9999, -4444)) \n# View output in greater detail\nresults$Message[2] # Recommend using add_missing_fields\nresults$Information$pkg_field_check.Info # We see that MIN, MAX, and TYPE are all missing\n# Use the add_missing_fields function to add in data\nDD.dict.updated <- add_missing_fields(DD.dict.D, DS.data.D)\n# Be sure to call in the new version of the dictionary (DD.dict.updated)\ncomplete_check(DD.dict.updated, DS.data.D)\n\n\n"} {"package":"dbGaPCheckup","topic":"create_awareness_report","snippet":"### Name: create_awareness_report\n### Title: Create Awareness Report\n### Aliases: create_awareness_report\n\n### ** Examples\n\n## No test: \ndata(ExampleB)\ncreate_awareness_report(DD.dict.B, DS.data.B, non.NA.missing.codes=c(-9999),\n output.path= tempdir(), open.html = FALSE)\n## End(No test)\n\n\n"} {"package":"dbGaPCheckup","topic":"create_report","snippet":"### Name: create_report\n### Title: Create Report\n### Aliases: create_report\n\n### ** Examples\n\n## No test: \ndata(ExampleB)\ncreate_report(DD.dict.B, DS.data.B, sex.split=TRUE, sex.name= \"SEX\",\n start = 3, end = 7, non.NA.missing.codes=c(-9999,-4444),\n output.path= tempdir(), open.html = FALSE)\n## End(No test)\n\n\n"} {"package":"dbGaPCheckup","topic":"decimal_check","snippet":"### Name: decimal_check\n### Title: Decimal Check\n### Aliases: decimal_check\n\n### ** Examples\n\n# Example 1: Fail check\ndata(ExampleF)\ndecimal_check(DD.dict.F, DS.data.F)\nprint(integer_check(DD.dict.F, DS.data.F, verbose=FALSE))\n\n# Example 2: Required pre-check fails\ndata(ExampleE)\ndecimal_check(DD.dict.E, DS.data.E)\nprint(decimal_check(DD.dict.E, DS.data.E, verbose=FALSE))\n\n# Example 3: Pass check\ndata(ExampleA)\ndecimal_check(DD.dict.A, DS.data.A)\nprint(decimal_check(DD.dict.A, DS.data.A, verbose=FALSE))\n\n\n"} {"package":"dbGaPCheckup","topic":"description_check","snippet":"### Name: description_check\n### Title: Description Check\n### Aliases: description_check\n\n### ** Examples\n\n# Example 1: Fail check \ndata(ExampleG)\ndescription_check(DD.dict.G)\nprint(description_check(DD.dict.G, verbose=FALSE))\n\n# Example 2: Pass check\ndata(ExampleA)\ndescription_check(DD.dict.A)\nprint(description_check(DD.dict.A, verbose=FALSE))\n\n\n"} {"package":"dbGaPCheckup","topic":"dictionary_search","snippet":"### Name: dictionary_search\n### Title: Data Dictionary Search\n### Aliases: dictionary_search\n\n### ** Examples\n\n# Successful search\ndata(ExampleB)\ndictionary_search(DD.dict.B, search.term=c(\"skinfold\"), search.column=c(\"VARDESC\"))\n# Attempted search in wrong column\ndictionary_search(DD.dict.B, search.term=c(\"skinfold\"), search.column=c(\"VARIABLE_DESCRIPTION\"))\n\n\n"} {"package":"dbGaPCheckup","topic":"dimension_check","snippet":"### Name: dimension_check\n### Title: Dimension Check\n### Aliases: dimension_check\n\n### ** Examples\n\n# Example 1: Fail check\ndata(ExampleG)\ndimension_check(DD.dict.G, DS.data.G)\nprint(dimension_check(DD.dict=DD.dict.G, DS.data=DS.data.G,verbose=FALSE))\n\n# Example 2: Pass check\ndata(ExampleA)\ndimension_check(DD.dict.A, DS.data.A)\nprint(dimension_check(DD.dict.A, DS.data.A,verbose=FALSE))\n\n\n"} {"package":"dbGaPCheckup","topic":"field_check","snippet":"### Name: field_check\n### Title: Field Check\n### Aliases: field_check\n\n### ** Examples\n\ndata(ExampleA)\nfield_check(DD.dict.A)\nprint(field_check(DD.dict.A, verbose=FALSE))\n\n\n"} {"package":"dbGaPCheckup","topic":"id_check","snippet":"### Name: id_check\n### Title: ID Check\n### Aliases: id_check\n\n### ** Examples\n\n# Example 1: Fail check, 'SUBJECT_ID' not present\ndata(ExampleO)\nid_check(DS.data.O)\nprint(id_check(DS.data.O, verbose=FALSE))\n\n# Example 2: Fail check, 'SUBJECT_ID' includes illegal spaces\ndata(ExampleP)\nid_check(DS.data.P)\nresults <- id_check(DS.data.P)\nresults$Information[[1]]$details\nprint(id_check(DS.data.P, verbose=FALSE))\n\n# Example 3: Pass check\ndata(ExampleA)\nid_check(DS.data.A)\nprint(id_check(DS.data.A, verbose=FALSE))\n\n\n"} {"package":"dbGaPCheckup","topic":"id_first_data","snippet":"### Name: id_first_data\n### Title: Relocate SUBJECT_ID to First Column of Data Set\n### Aliases: id_first_data\n\n### ** Examples\n\ndata(ExampleQ)\nhead(DS.data.Q)\nDS.data.updated <- id_first_data(DS.data.Q)\nhead(DS.data.updated)\n\n\n"} {"package":"dbGaPCheckup","topic":"id_first_dict","snippet":"### Name: id_first_dict\n### Title: Relocate SUBJECT_ID to First Column of Data Dictionary\n### Aliases: id_first_dict\n\n### ** Examples\n\ndata(ExampleQ)\nhead(DD.dict.Q)\nDD.dict.updated <- id_first_dict(DD.dict.Q)\nhead(DD.dict.updated)\n\n\n"} {"package":"dbGaPCheckup","topic":"integer_check","snippet":"### Name: integer_check\n### Title: Integer Check\n### Aliases: integer_check\n\n### ** Examples\n\n# Example 1: Fail check\ndata(ExampleH)\ninteger_check(DD.dict.H, DS.data.H)\nprint(integer_check(DD.dict.H, DS.data.H, verbose=FALSE))\n\n# Example 2: Pass check\ndata(ExampleA)\ninteger_check(DD.dict.A, DS.data.A)\nprint(integer_check(DD.dict.A, DS.data.A, verbose=FALSE))\n\ndata(ExampleR)\ninteger_check(DD.dict.R, DS.data.R)\nprint(integer_check(DD.dict.R, DS.data.R, verbose=FALSE))\n\n\n"} {"package":"dbGaPCheckup","topic":"label_data","snippet":"### Name: label_data\n### Title: Label the data\n### Aliases: label_data\n\n### ** Examples\n\ndata(ExampleB)\nDS_labelled_data <- label_data(DD.dict.B, DS.data.B, non.NA.missing.codes=c(-9999))\nlabelled::var_label(DS_labelled_data$SEX)\nlabelled::val_labels(DS_labelled_data$SEX)\nattributes(DS_labelled_data$SEX)\nlabelled::na_values(DS_labelled_data$HX_DEPRESSION)\n\n\n"} {"package":"dbGaPCheckup","topic":"minmax_check","snippet":"### Name: minmax_check\n### Title: Mimimum and Maximum Values Check\n### Aliases: minmax_check\n\n### ** Examples\n\n# Example 1\n# Fail check (incorrectly flagging NA value codes -9999\n# and -4444 as outside of the min max range)\ndata(ExampleA)\nminmax_check(DD.dict.A, DS.data.A)\n# View out of range values:\ndetails <- minmax_check(DD.dict.A, DS.data.A)$Information\ndetails[[1]]$OutOfRangeValues\n# Attempt 2, specifying -9999 and -4444 as missing value\n# codes so check works correctly\nminmax_check(DD.dict.A, DS.data.A, non.NA.missing.codes=c(-9999, -4444))\n\n# Example 2\ndata(ExampleI)\nminmax_check(DD.dict.I, DS.data.I, non.NA.missing.codes=c(-9999, -4444))\n# View out of range values:\ndetails <- minmax_check(DD.dict.I, DS.data.I, non.NA.missing.codes=c(-9999, -4444))$Information\ndetails[[1]]$OutOfRangeValues\n\n\n"} {"package":"dbGaPCheckup","topic":"misc_format_check","snippet":"### Name: misc_format_check\n### Title: Miscellaneous Format Check\n### Aliases: misc_format_check\n\n### ** Examples\n\n# Example 1: Fail check \ndata(ExampleJ)\nmisc_format_check(DD.dict.J, DS.data.J)\nprint(misc_format_check(DD.dict.J, DS.data.J, verbose=FALSE))\n\n# Example 2: Pass check\ndata(ExampleA)\nmisc_format_check(DD.dict.A, DS.data.A)\nprint(misc_format_check(DD.dict.A, DS.data.A, verbose=FALSE))\n\n\n"} {"package":"dbGaPCheckup","topic":"missing_value_check","snippet":"### Name: missing_value_check\n### Title: Missing Value Check\n### Aliases: missing_value_check\n\n### ** Examples\n\ndata(ExampleB)\nmissing_value_check(DD.dict.B, DS.data.B, non.NA.missing.codes = c(-9999,-4444))\n\ndata(ExampleS)\nmissing_value_check(DD.dict.S, DS.data.S, non.NA.missing.codes = c(-9999,-4444))\n\n\n"} {"package":"dbGaPCheckup","topic":"missingness_summary","snippet":"### Name: missingness_summary\n### Title: Missingness Summary\n### Aliases: missingness_summary\n\n### ** Examples\n\n# Correct useage\ndata(ExampleA)\nmissingness_summary(DS.data.A, non.NA.missing.codes=c(-4444, -9999))\n\n\n"} {"package":"dbGaPCheckup","topic":"mm_precheck","snippet":"### Name: mm_precheck\n### Title: Min Max Required Pre-checks\n### Aliases: mm_precheck\n\n### ** Examples\n\ndata(ExampleB)\nmm_precheck(DD.dict.B, DS.data.B)\n\n\n"} {"package":"dbGaPCheckup","topic":"mv_precheck","snippet":"### Name: mv_precheck\n### Title: Missing Values Required Pre-checks\n### Aliases: mv_precheck\n\n### ** Examples\n\ndata(ExampleB)\nmv_precheck(DD.dict.B, DS.data.B)\n\n\n"} {"package":"dbGaPCheckup","topic":"name_check","snippet":"### Name: name_check\n### Title: Name Check\n### Aliases: name_check\n\n### ** Examples\n\n# Example 1: Fail check (name mismatch)\ndata(ExampleM)\nname_check(DD.dict.M, DS.data.M)\nDS.data_updated <- name_correct(DD.dict.M, DS.data.M)\nname_check(DD.dict.M, DS.data_updated)\n\n# Example 2: Pass check\ndata(ExampleA)\nname_check(DD.dict.A, DS.data.A)\nprint(name_check(DD.dict.A, DS.data.A, verbose=FALSE))\n\n\n"} {"package":"dbGaPCheckup","topic":"name_correct","snippet":"### Name: name_correct\n### Title: Name Correction Utility Function\n### Aliases: name_correct\n\n### ** Examples\n\ndata(ExampleM)\nname_check(DD.dict.M, DS.data.M)\nDS.data_updated <- name_correct(DD.dict.M, DS.data.M)\nname_check(DD.dict.M, DS.data_updated)\n\n\n"} {"package":"dbGaPCheckup","topic":"name_precheck","snippet":"### Name: name_precheck\n### Title: Name Pre-checks\n### Aliases: name_precheck\n\n### ** Examples\n\ndata(ExampleB)\nname_precheck(DD.dict.B, DS.data.B)\n\n\n"} {"package":"dbGaPCheckup","topic":"pkg_field_check","snippet":"### Name: pkg_field_check\n### Title: Package Required Field Check\n### Aliases: pkg_field_check\n\n### ** Examples\n\n# Example 1: Fail check\ndata(ExampleD)\npkg_field_check(DD.dict.D, DS.data.D)\n# Use the add_missing_fields function to add in data\nDD.dict.updated <- add_missing_fields(DD.dict.D, DS.data.D)\n# Be sure to call in the new version of the dictionary (DD.dict.updated)\npkg_field_check(DD.dict.updated, DS.data.D) \n\n# Example 2: Pass check\ndata(ExampleA)\npkg_field_check(DD.dict.A, DS.data.A)\nprint(pkg_field_check(DD.dict.A, DS.data.A, verbose=FALSE))\n\n\n"} {"package":"dbGaPCheckup","topic":"reorder_data","snippet":"### Name: reorder_data\n### Title: Reorder Data Set Utility Function\n### Aliases: reorder_data\n\n### ** Examples\n\ndata(ExampleN)\nname_check(DD.dict.N, DS.data.N)\nDS.data_updated <- reorder_data(DD.dict.N, DS.data.N)\nname_check(DD.dict.N, DS.data_updated)\n\n\n"} {"package":"dbGaPCheckup","topic":"reorder_dictionary","snippet":"### Name: reorder_dictionary\n### Title: Reorder Data Dictionary Utility Function\n### Aliases: reorder_dictionary\n\n### ** Examples\n\ndata(ExampleN)\nname_check(DD.dict.N, DS.data.N)\nDD.dict_updated <- reorder_dictionary(DD.dict.N, DS.data.N)\nname_check(DD.dict_updated, DS.data.N)\n\n\n"} {"package":"dbGaPCheckup","topic":"row_check","snippet":"### Name: row_check\n### Title: Row Check\n### Aliases: row_check\n\n### ** Examples\n\n# Example 1: Fail check\ndata(ExampleK)\nrow_check(DD.dict.K, DS.data.K)\nprint(row_check(DD.dict.K, DS.data.K, verbose=FALSE))\n\n# Example 2: Pass check\ndata(ExampleC)\nrow_check(DD.dict.C, DS.data.C)\nprint(row_check(DD.dict.C, DS.data.C, verbose=FALSE))\n\n\n"} {"package":"dbGaPCheckup","topic":"short_field_check","snippet":"### Name: short_field_check\n### Title: Truncated Field Check\n### Aliases: short_field_check\n\n### ** Examples\n\ndata(ExampleA)\nshort_field_check(DD.dict.A)\n\n\n"} {"package":"dbGaPCheckup","topic":"short_precheck","snippet":"### Name: short_precheck\n### Title: Truncated Pre-check\n### Aliases: short_precheck\n\n### ** Examples\n\ndata(ExampleB)\nshort_precheck(DD.dict.B, DS.data.B)\n\n\n"} {"package":"dbGaPCheckup","topic":"super_short_precheck","snippet":"### Name: super_short_precheck\n### Title: Very Truncated Pre-check\n### Aliases: super_short_precheck\n\n### ** Examples\n\n# Example 1: Pass check\ndata(ExampleB)\nsuper_short_precheck(DD.dict.B, DS.data.B)\n\n\n"} {"package":"dbGaPCheckup","topic":"type_check","snippet":"### Name: type_check\n### Title: Type Check\n### Aliases: type_check\n\n### ** Examples\n\ndata(ExampleB)\ntype_check(DD.dict.B)\nprint(type_check(DD.dict.B, verbose=FALSE))\n\n\n"} {"package":"dbGaPCheckup","topic":"value_meaning_table","snippet":"### Name: value_meaning_table\n### Title: Value-Meaning Table\n### Aliases: value_meaning_table\n\n### ** Examples\n\ndata(ExampleB)\nhead(value_meaning_table(DD.dict.B))\n\n\n"} {"package":"dbGaPCheckup","topic":"value_missing_table","snippet":"### Name: value_missing_table\n### Title: Values Missing Table Awareness Function\n### Aliases: value_missing_table\n\n### ** Examples\n\ndata(ExampleB)\nvalue_missing_table(DD.dict.B, DS.data.B, non.NA.missing.codes = c(-9999))\nprint(value_missing_table(DD.dict.B, DS.data.B, non.NA.missing.codes = c(-9999)))\nresults <- value_missing_table(DD.dict.B, DS.data.B, non.NA.missing.codes = c(-9999))\nresults$report$Information$details\n\n\n"} {"package":"dbGaPCheckup","topic":"values_check","snippet":"### Name: values_check\n### Title: Values Check\n### Aliases: values_check\n\n### ** Examples\n\n# Example 1: Fail check\ndata(ExampleE)\nvalues_check(DD.dict.E)\nprint(values_check(DD.dict.E, verbose=FALSE))\n\n# Example 2: Pass check\ndata(ExampleA)\nvalues_check(DD.dict.A)\nprint(values_check(DD.dict.A, verbose=FALSE))\n\n\n"} {"package":"dbGaPCheckup","topic":"values_precheck","snippet":"### Name: values_precheck\n### Title: Values Pre-Check\n### Aliases: values_precheck\n\n### ** Examples\n\ndata(ExampleB)\nvalues_precheck(DD.dict.B)\n\n\n"} {"package":"sonicscrewdriver","topic":"P_r","snippet":"### Name: P_r\n### Title: The radar equation\n### Aliases: P_r\n\n### ** Examples\n\nP_r(12, 20, 0.05)\nP_r(12, 20, 0.05, G_t=1.2, G_r=1.5, wl=0.045)\n\n\n\n"} {"package":"sonicscrewdriver","topic":"ab_annotations","snippet":"### Name: ab_annotations\n### Title: Get annotations from audioBlast\n### Aliases: ab_annotations\n\n### ** Examples\n\n## Not run: \n##D ab_annotations(taxon=\"Gryllotalpa vineae\")\n## End(Not run)\n\n\n\n"} {"package":"sonicscrewdriver","topic":"ab_seqss_nearestStart","snippet":"### Name: ab_seqss_nearestStart\n### Title: Nearest start time\n### Aliases: ab_seqss_nearestStart\n\n### ** Examples\n\n## Not run: \n##D ab_seqss_nearestStart(date=\"2020-05-15\",time=\"1500\")\n## End(Not run)\n\n\n\n"} {"package":"sonicscrewdriver","topic":"addSpectra","snippet":"### Name: addSpectra\n### Title: Add two spectra from seewave\n### Aliases: addSpectra\n\n### ** Examples\n\n## Not run: \n##D addSpectra(spec1, spec2)\n##D addSpectra(spec1, spec2, coerceNegative=\"input\")\n## End(Not run)\n\n\n\n"} {"package":"sonicscrewdriver","topic":"audiomoth_config","snippet":"### Name: audiomoth_config\n### Title: Read AudioMoth configuration file\n### Aliases: audiomoth_config\n\n### ** Examples\n\n## Not run: \n##D audiomoth_config(\"./CONFIG.TXT\")\n## End(Not run)\n\n\n\n"} {"package":"sonicscrewdriver","topic":"audiomoth_wave","snippet":"### Name: audiomoth_wave\n### Title: Read AudioMoth metadata from a wave file\n### Aliases: audiomoth_wave\n\n### ** Examples\n\n## Not run: \n##D audiomoth_wavew(\"./FILENAME.WAV\")\n## End(Not run)\n\n\n\n"} {"package":"sonicscrewdriver","topic":"autoBandPass","snippet":"### Name: autoBandPass\n### Title: Automatic Band Pass Filter\n### Aliases: autoBandPass\n\n### ** Examples\n\n## Not run: \n##D autoBandPass(sheep)\n##D autoBandPass(sheep, bw=\"-3dB\", n.bw=1, lowcut=1000)\n##D autoBandPass(sheep, bw=\"-10dB\", n.bw=2, lowcut=0)\n## End(Not run)\n\n\n"} {"package":"sonicscrewdriver","topic":"beatComplexity","snippet":"### Name: beatComplexity\n### Title: Beat spectrum complexity\n### Aliases: beatComplexity\n\n### ** Examples\n\n## Not run: \n##D beatComplexity(sheep)\n##D beatComplexity(sheep, plot=TRUE)\n## End(Not run)\n\n\n"} {"package":"sonicscrewdriver","topic":"beatSpectrum","snippet":"### Name: beatSpectrum\n### Title: Computes a beat spectrum\n### Aliases: beatSpectrum\n\n### ** Examples\n\n## Not run: \n##D beatSpectrum(sheep)\n##D beatSpectrum(sheep, min_period=0.005, max_period=30, dj=1/32)\n## End(Not run)\n\n\n"} {"package":"sonicscrewdriver","topic":"convert2Celsius","snippet":"### Name: convert2Celsius\n### Title: Convert temperature to Celsius\n### Aliases: convert2Celsius\n\n### ** Examples\n\nconvert2Celsius(15, input=\"K\")\nconvert2Celsius(15, input=\"F\")\n\n\n\n"} {"package":"sonicscrewdriver","topic":"convert2Fahrenheit","snippet":"### Name: convert2Fahrenheit\n### Title: Convert temperature to Fahrenheit\n### Aliases: convert2Fahrenheit\n\n### ** Examples\n\n## Not run: \n##D convert2Fahrenheit(15, input = \"C\")\n## End(Not run)\n\n\n\n"} {"package":"sonicscrewdriver","topic":"convert2Kelvin","snippet":"### Name: convert2Kelvin\n### Title: Convert temperature to Kelvin\n### Aliases: convert2Kelvin\n\n### ** Examples\n\nconvert2Kelvin(15, input=\"C\")\nconvert2Kelvin(15, input=\"F\")\n\n\n\n"} {"package":"sonicscrewdriver","topic":"convert2Pascals","snippet":"### Name: convert2Pascals\n### Title: Convert pressure to Pascals\n### Aliases: convert2Pascals\n\n### ** Examples\n\nconvert2Pascals(1000, input=\"kPa\")\nconvert2Pascals(10, input=\"dyne_cm2\")\n\n\n\n"} {"package":"sonicscrewdriver","topic":"convert2dyne_cm2","snippet":"### Name: convert2dyne_cm2\n### Title: Convert pressure to dyne per square centimetre\n### Aliases: convert2dyne_cm2\n\n### ** Examples\n\nconvert2dyne_cm2(1, input=\"Pa\")\nconvert2dyne_cm2(1, input=\"kPa\")\n\n\n"} {"package":"sonicscrewdriver","topic":"cutws","snippet":"### Name: cutws\n### Title: Cut wave by samples\n### Aliases: cutws\n\n### ** Examples\n\n## Not run: \n##D cutws(sheep, 1, 20)\n##D cutws(sheep, 1, 20, plot=TRUE)\n## End(Not run)\n\n\n\n"} {"package":"sonicscrewdriver","topic":"data2Wave","snippet":"### Name: data2Wave\n### Title: Convert data into a Wave object\n### Aliases: data2Wave\n\n### ** Examples\n\npattern <- seq(from=-1, to=1, length.out=100)\ndata <- rep.int(pattern, 100)\nw <- data2Wave(data)\n\n\n"} {"package":"sonicscrewdriver","topic":"dayPhase","snippet":"### Name: dayPhase\n### Title: Phase of day\n### Aliases: dayPhase\n\n### ** Examples\n\ndayPhase <- function(time=Sys.time(), duration=200000, lat=50.1, lon=1.83, tz=\"UTC\")\n\n\n\n"} {"package":"sonicscrewdriver","topic":"defaultCluster","snippet":"### Name: defaultCluster\n### Title: Create Default Cluster for Windowing\n### Aliases: defaultCluster\n### Keywords: wave\n\n### ** Examples\n\n## Not run: \n##D cl <- defaultCluster()\n##D stopCluster(cl)\n##D cl <- defaultCluster(FALSE)\n##D stopCluster(cl)\n## End(Not run)\n\n\n\n"} {"package":"sonicscrewdriver","topic":"dutyCycle","snippet":"### Name: dutyCycle\n### Title: Calculate the duty cycle of a wave\n### Aliases: dutyCycle\n\n### ** Examples\n\nwave <- tuneR::sine(2000)\ndc <- dutyCycle(wave)\npc <- dutyCycle(wave, output=\"percent\")\n\n\n"} {"package":"sonicscrewdriver","topic":"entropyStats","snippet":"### Name: entropyStats\n### Title: Various measurements of frequency values for a Wave object\n### Aliases: entropyStats\n\n### ** Examples\n\n## Not run: \n##D entropyStats(sheep)\n## End(Not run)\n\n\n\n"} {"package":"sonicscrewdriver","topic":"frequencySound","snippet":"### Name: frequencySound\n### Title: Get the frequency from wavelength and speed of sound\n### Aliases: frequencySound\n\n### ** Examples\n\nf <- frequencySound(wl=100, s=343)\n\n\n"} {"package":"sonicscrewdriver","topic":"gs_transcribe","snippet":"### Name: gs_transcribe\n### Title: Google Speech API Transcribe\n### Aliases: gs_transcribe\n\n### ** Examples\n\n## Not run: \n##D gs_transcribe(\"demo.wav\")\n## End(Not run)\n\n\n\n"} {"package":"sonicscrewdriver","topic":"jitter","snippet":"### Name: jitter\n### Title: Calculate the jitter in a Wave object\n### Aliases: jitter\n\n### ** Examples\n\n## Not run: \n##D jitter(sheep, method=\"absolute\")\n##D jitter(sheep, method=\"relative\")\n## End(Not run)\n\n\n"} {"package":"sonicscrewdriver","topic":"labelPadding","snippet":"### Name: labelPadding\n### Title: Pad labels with interval\n### Aliases: labelPadding\n\n### ** Examples\n\n## Not run: \n##D labelPadding(t, pad=2, max_t=duration(wave))\n## End(Not run)\n\n\n\n"} {"package":"sonicscrewdriver","topic":"labelReduction","snippet":"### Name: labelReduction\n### Title: Combines labels which overlap into single continuous regions\n### Aliases: labelReduction\n\n### ** Examples\n\n## Not run: \n##D labelReduction(t)\n## End(Not run)\n\n\n\n"} {"package":"sonicscrewdriver","topic":"naturalFrequency","snippet":"### Name: naturalFrequency\n### Title: Calculate the natural frequency\n### Aliases: naturalFrequency\n\n### ** Examples\n\nf <- naturalFrequency(L=1, C=140, R=12)\n\n\n"} {"package":"sonicscrewdriver","topic":"parseFilename","snippet":"### Name: parseFilename\n### Title: Parse a filename\n### Aliases: parseFilename\n\n### ** Examples\n\nparseFilename(\"20180605.wav\")\n\n\n\n"} {"package":"sonicscrewdriver","topic":"radar.range","snippet":"### Name: radar.range\n### Title: Radar range\n### Aliases: radar.range\n\n### ** Examples\n\nradar.range(2)\nradar.range(2, c=343)\nradar.range(2, c=soundSpeedMedium(\"sea water\"))\n\n\n\n"} {"package":"sonicscrewdriver","topic":"rainfallDetection","snippet":"### Name: rainfallDetection\n### Title: Rainfall detection\n### Aliases: rainfallDetection\n\n### ** Examples\n\n## Not run: \n##D rainfallDetection(sheep, method=\"bedoya2017\")\n## End(Not run)\n\n\n\n"} {"package":"sonicscrewdriver","topic":"referenceIntensity","snippet":"### Name: referenceIntensity\n### Title: Reference intensity\n### Aliases: referenceIntensity\n\n### ** Examples\n\nri <- referenceIntensity()\n\n\n"} {"package":"sonicscrewdriver","topic":"referencePressure","snippet":"### Name: referencePressure\n### Title: Reference pressure\n### Aliases: referencePressure\n\n### ** Examples\n\nrp <- referencePressure()\nrp <- referencePressure(unit=\"dyne_cm2\")\n\n\n"} {"package":"sonicscrewdriver","topic":"resonantFrequency","snippet":"### Name: resonantFrequency\n### Title: Calculate the resonant frequency\n### Aliases: resonantFrequency\n\n### ** Examples\n\nf <- resonantFrequency(L=1)\n\n\n"} {"package":"sonicscrewdriver","topic":"sDuration","snippet":"### Name: sDuration\n### Title: Sample duration\n### Aliases: sDuration\n\n### ** Examples\n\nsDuration(n=20, samp.rate=44100)\n## Not run: \n##D sDuration(n=20, wave=sheep)#'\n## End(Not run)\n\n\n\n\n"} {"package":"sonicscrewdriver","topic":"shimmer","snippet":"### Name: shimmer\n### Title: Calculate the shimmer in a Wave object\n### Aliases: shimmer\n\n### ** Examples\n\n## Not run: \n##D shimmer(sheep)\n## End(Not run)\n\n\n"} {"package":"sonicscrewdriver","topic":"soundSpeedMedium","snippet":"### Name: soundSpeedMedium\n### Title: Get the speed of sound in a medium\n### Aliases: soundSpeedMedium\n\n### ** Examples\n\nsoundSpeedMedium(\"air\")\nsoundSpeedMedium(\"sea water\")\n\n\n\n"} {"package":"sonicscrewdriver","topic":"soundSpeed_cramer1993","snippet":"### Name: soundSpeed_cramer1993\n### Title: Speed of sound in air using Cramer (1993)\n### Aliases: soundSpeed_cramer1993\n\n### ** Examples\n\nsoundSpeed_cramer1993(14, pressure=3, RH=10)\nsoundSpeed_cramer1993(14, temp.unit=\"C\", pressure=3, pressure.unit=\"kPa\", RH=10)\n\n\n\n"} {"package":"sonicscrewdriver","topic":"ste","snippet":"### Name: ste\n### Title: Short term energy\n### Aliases: ste\n\n### ** Examples\n\n## Not run: \n##D ste(sheep, method=\"dietrich2004\")\n## End(Not run)\n\n\n\n"} {"package":"sonicscrewdriver","topic":"subtractSpectra","snippet":"### Name: subtractSpectra\n### Title: Subtract two spectra from seewave\n### Aliases: subtractSpectra\n\n### ** Examples\n\n## Not run: \n##D subtractSpectra(spec1, spec2)\n##D subtractSpectra(spec1, spec2, coerceNegative=\"both\")\n## End(Not run)\n\n\n\n"} {"package":"sonicscrewdriver","topic":"sweptsine","snippet":"### Name: sweptsine\n### Title: Generate a frequency-swept sine wave\n### Aliases: sweptsine\n\n### ** Examples\n\nsweptsine()\n\n\n\n"} {"package":"sonicscrewdriver","topic":"tSamples","snippet":"### Name: tSamples\n### Title: Samples per time period\n### Aliases: tSamples\n\n### ** Examples\n\ntSamples(10, samp.rate=44100)\n## Not run: \n##D tSamples(10, wave=sheep)\n## End(Not run)\n\n\n\n\n"} {"package":"sonicscrewdriver","topic":"typicalVolume","snippet":"### Name: typicalVolume\n### Title: Typical volumes\n### Aliases: typicalVolume\n\n### ** Examples\n\ntypicalVolume()\ntypicalVolume(\"rocket\")\n\n\n\n"} {"package":"sonicscrewdriver","topic":"upsample","snippet":"### Name: upsample\n### Title: Upsample a wave\n### Aliases: upsample\n\n### ** Examples\n\nwave <- tuneR::sine(4000, samp.rate=44100)\nwave2 <- upsample(wave, 88200)\n\n\n\n"} {"package":"sonicscrewdriver","topic":"windowing","snippet":"### Name: windowing\n### Title: Windowing Function for Wave Objects\n### Aliases: windowing\n### Keywords: wave\n\n### ** Examples\n\n## Not run: \n##D windowing(wave, window.length=1000, window.overlap=0, bind.wave=TRUE, FUN=noChange)\n## End(Not run)\n\n\n"} {"package":"sonicscrewdriver","topic":"windowing.functions","snippet":"### Name: windowing.functions\n### Title: List available windowing functions\n### Aliases: windowing.functions\n### Keywords: wave\n\n### ** Examples\n\n## Not run: \n##D windowing.functions()\n## End(Not run)\n\n\n"} {"package":"sonicscrewdriver","topic":"zeroSpectrum","snippet":"### Name: zeroSpectrum\n### Title: Zero spectrum\n### Aliases: zeroSpectrum\n\n### ** Examples\n\n## Not run: \n##D zeroSpectrum(spec)\n## End(Not run)\n\n\n\n"} {"package":"sonicscrewdriver","topic":"zerocross","snippet":"### Name: zerocross\n### Title: Identify zero crossings in a Wave object\n### Aliases: zerocross\n\n### ** Examples\n\n## Not run: \n##D zerocross(sheep)\n## End(Not run)\n\n\n"} {"package":"epimdr","topic":"BarabasiAlbert","snippet":"### Name: BarabasiAlbert\n### Title: Function to generate a Barabasi-Albert network\n### Aliases: BarabasiAlbert\n\n### ** Examples\n\ncm3=BarabasiAlbert(200, 4)\n\n\n"} {"package":"epimdr","topic":"May.app","snippet":"### Name: May.app\n### Title: Launch a shiny-app simulating May's Parasitoid-host Model model\n### Aliases: May.app\n### Keywords: datasets\n\n### ** Examples\n\n## Not run: May.app\n\n\n"} {"package":"epimdr","topic":"NB","snippet":"### Name: NB\n### Title: The Nicholson-Bailey model\n### Aliases: NB\n\n### ** Examples\n\nsim= NB(R=1.1,a=0.1)\n\n\n"} {"package":"epimdr","topic":"NetworkSIR","snippet":"### Name: NetworkSIR\n### Title: Function to simulate an epidemic on a network\n### Aliases: NetworkSIR netSIR\n\n### ** Examples\n\ncm1=BarabasiAlbert(N=200,K=2)\nsim1=NetworkSIR(cm1,.3,0.1)\nsummary(sim1)\n## Not run: plot(sim1)\n\n\n"} {"package":"epimdr","topic":"SEIR.app","snippet":"### Name: SEIR.app\n### Title: Launch a shiny-app simulating the seasonal SEIR model\n### Aliases: SEIR.app\n### Keywords: datasets\n\n### ** Examples\n\n## Not run: SEIR.app\n\n\n"} {"package":"epimdr","topic":"SEIRS.app","snippet":"### Name: SEIRS.app\n### Title: Launch a shiny-app simulating the SEIRS model\n### Aliases: SEIRS.app\n### Keywords: datasets\n\n### ** Examples\n\n## Not run: SEIRS.app\n\n\n"} {"package":"epimdr","topic":"SIR.app","snippet":"### Name: SIR.app\n### Title: Launch a shiny-app simulating the SIR model\n### Aliases: SIR.app\n### Keywords: datasets\n\n### ** Examples\n\n## Not run: SIR.app\n\n\n"} {"package":"epimdr","topic":"SimTsir","snippet":"### Name: SimTsir\n### Title: Function to simulate the stochastic TSIR\n### Aliases: SimTsir\n\n### ** Examples\n\nout = SimTsir()\n\n\n"} {"package":"epimdr","topic":"SimTsir2","snippet":"### Name: SimTsir2\n### Title: Function to simulate the seasonally-forced TSIR\n### Aliases: SimTsir2\n\n### ** Examples\n\n## Not run: see chapter 8 in book\n\n\n"} {"package":"epimdr","topic":"TSIR.app","snippet":"### Name: TSIR.app\n### Title: Launch a shiny-app simulating TSIR model\n### Aliases: TSIR.app\n### Keywords: datasets\n\n### ** Examples\n\n## Not run: TSIR.app\n\n\n"} {"package":"epimdr","topic":"TSIRllyap","snippet":"### Name: TSIRllyap\n### Title: Function to calculate the local Lyapunov exponents for the TSIR\n### Aliases: TSIRllyap\n\n### ** Examples\n\n## Not run: see chapter 10 in book\n\n\n"} {"package":"epimdr","topic":"TSIRlyap","snippet":"### Name: TSIRlyap\n### Title: Function to do Lyapunov exponent calculations from a TSIR\n### simulation\n### Aliases: TSIRlyap\n\n### ** Examples\n\n## Not run: see chapter 10 in book\n\n\n"} {"package":"epimdr","topic":"WattsStrogatz","snippet":"### Name: WattsStrogatz\n### Title: Function to generate a Watts-Strogats network\n### Aliases: WattsStrogatz\n\n### ** Examples\n\ncm2=WattsStrogatz(N=20, K=4, Prw=.3)\n\n\n"} {"package":"epimdr","topic":"chainSIR","snippet":"### Name: chainSIR\n### Title: Gradient-function for the chain-SIR model\n### Aliases: chainSIR\n\n### ** Examples\n\nrequire(deSolve)\ntimes = seq(0, 10, by=1/52)\nparas2 = c(mu = 1/75, N = 1, beta = 625, gamma = 365/14, u=5)\nxstart2 = log(c(S=.06, I=c(0.001, rep(0.0001, paras2[\"u\"]-1)), R = 0.0001))\nout = as.data.frame(ode(xstart2, times, chainSIR, paras2))\n\n\n"} {"package":"epimdr","topic":"coyne","snippet":"### Name: coyne\n### Title: Gradient-function for Coyne et al's rabies model\n### Aliases: coyne\n\n### ** Examples\n\nrequire(deSolve)\ntimes = seq(0, 50, by=1/520)\nparas = c(gamma = 0.0397, b = 0.836, a = 1.34, sigma = 7.5, \nalpha = 66.36, beta = 33.25, c = 0, rho = 0.8)\nstart = log(c(X=12.69/2, H1=0.1, H2=0.1, Y = 0.1, I = 0.1))\nout = as.data.frame(ode(start, times, coyne, paras))\n\n\n"} {"package":"epimdr","topic":"flowField","snippet":"### Name: flowField\n### Title: Flowfield\n### Aliases: flowField\n\n### ** Examples\n\n#See archived phaseR package for examples\n\n\n"} {"package":"epimdr","topic":"gillespie","snippet":"### Name: gillespie\n### Title: Gillespie exact algorithm\n### Aliases: gillespie\n\n### ** Examples\n\nrlist=c(quote(mu * (S+I+R)), quote(mu * S), quote(beta * S * I /(S+I+R)), \n quote(mu * I), quote(gamma * I), quote(mu*R))\nemat=matrix(c(1,0,0,-1,0,0,-1,1,0,0,-1,0,0,-1,1,0,0,-1),ncol=3, byrow=TRUE)\nparas = c(mu = 1, beta = 1000, gamma = 365/20)\ninits = c(S=100, I=2, R=0)\nsim=gillespie(rlist, emat, paras, inits, 100)\n\n\n"} {"package":"epimdr","topic":"llik.cb","snippet":"### Name: llik.cb\n### Title: Negative log-likelihood function for the chain-binomial model\n### Aliases: llik.cb\n\n### ** Examples\n\ntwoweek=rep(1:15, each=2)\nniamey_cases1=sapply(split(niamey$cases_1[1:30], twoweek), sum)\nllik.cb(S0=6500, beta=23, I=niamey_cases1)\n\n\n"} {"package":"epimdr","topic":"llik.pc","snippet":"### Name: llik.pc\n### Title: Function to estimate parameters for the picewise-constant\n### catalytic model\n### Aliases: llik.pc\n\n### ** Examples\n\nx=c(1,4,8,12,18,24)\npara=rep(.1,length(x))\n## Not run: optim(par=log(para),fn=loglikpc, age=rabbit$a, num=rabbit$inf, denom=rabbit$n, up=x)\n\n\n"} {"package":"epimdr","topic":"orv.app","snippet":"### Name: orv.app\n### Title: Launch a shiny-app to study outbreak-response vaccination\n### campaigns\n### Aliases: orv.app\n### Keywords: datasets\n\n### ** Examples\n\n## Not run: orv.app\n\n\n"} {"package":"epimdr","topic":"plot.cm","snippet":"### Name: plot.cm\n### Title: Function to plot an object of class CM\n### Aliases: plot.cm\n\n### ** Examples\n\ncm=ringlattice(N=20,K=4)\n## Not run: plot(cm)\n\n\n"} {"package":"epimdr","topic":"r0fun","snippet":"### Name: r0fun\n### Title: Function to calculate R0 from a contact matrix\n### Aliases: r0fun\n\n### ** Examples\n\ncm1=BarabasiAlbert(N=200,K=2)\nr0fun(cm1, 0.3, 0.1)\n\n\n"} {"package":"epimdr","topic":"retrospec","snippet":"### Name: retrospec\n### Title: Function to predict efficacy of outbreak-response vaccination\n### campaign\n### Aliases: retrospec\n\n### ** Examples\n\nred1=retrospec(R=1.8, 161, vaccine_efficacy=0.85, target_vaccination=0.5, \n intervention_length=10, mtime=250, LP=8, IP=5, N=16000)\n1-red1$redn\n\n\n"} {"package":"epimdr","topic":"ringlattice","snippet":"### Name: ringlattice\n### Title: Function to generate a ring lattice\n### Aliases: ringlattice\n\n### ** Examples\n\ncm=ringlattice(N=20,K=4)\n\n\n"} {"package":"epimdr","topic":"seirmod","snippet":"### Name: seirmod\n### Title: Gradient-function for the SEIR model\n### Aliases: seirmod\n\n### ** Examples\n\nrequire(deSolve)\ntimes = seq(0, 10, by=1/120)\nparas = c(mu = 1/50, N = 1, beta = 1000, sigma = 365/8, gamma = 365/5)\nstart = c(S=0.06, E=0, I=0.001, R = 0.939)\nout=ode(y=start, times=times, func=seirmod, parms=paras)\n\n\n"} {"package":"epimdr","topic":"seirmod2","snippet":"### Name: seirmod2\n### Title: Gradient-function for the forced SEIR model\n### Aliases: seirmod2\n\n### ** Examples\n\nrequire(deSolve)\ntimes = seq(0, 10, by=1/120)\nparas = c(mu = 1/50, N = 1, beta0 = 1000, beta1 = 0.2, sigma = 365/8, gamma = 365/5)\nstart = c(S=0.06, E=0, I=0.001, R = 0.939)\nout=ode(y=start, times=times, func=seirmod2, parms=paras)\n\n\n"} {"package":"epimdr","topic":"sim.cb","snippet":"### Name: sim.cb\n### Title: Function to simulate the chain-binomial model\n### Aliases: sim.cb\n\n### ** Examples\n\nsim=sim.cb(S0=6500, beta=23)\n\n\n"} {"package":"epimdr","topic":"siragemod","snippet":"### Name: siragemod\n### Title: Gradient-function for the age-structured SIR model with possibly\n### heterogeneous mixing\n### Aliases: siragemod\n\n### ** Examples\n\na=rep(1,4)\nn=length(a)\nbetaM=matrix(1, ncol=4, nrow=4)\npars =list(N=1, gamma=365/14, mu=0.02, sigma=0.2, beta=500, betaM=betaM,p=rep(0,4), a=a)\nxstart<-log(c(S=rep(0.099/n,n), I=rep(0.001/n,n), R=rep(0.9/n,n)))\ntimes=seq(0,10,by=14/365)\nout=as.data.frame(ode(xstart, times=times, func=siragemod, parms=pars))\n\n\n"} {"package":"epimdr","topic":"sirmod","snippet":"### Name: sirmod\n### Title: Gradient-function for the SIR model\n### Aliases: sirmod\n\n### ** Examples\n\nrequire(deSolve)\ntimes = seq(0, 26, by=1/10)\nparas = c(mu = 0, N = 1, beta = 2, gamma = 1/2)\nstart = c(S=0.999, I=0.001, R = 0)\nout=ode(y=start, times=times, func=sirmod, parms=paras)\n\n\n"} {"package":"epimdr","topic":"sirwmod","snippet":"### Name: sirwmod\n### Title: Gradient-function for the SIRWS model\n### Aliases: sirwmod\n\n### ** Examples\n\nrequire(deSolve)\ntimes = seq(0, 26, by=1/10)\nparas = c(mu = 1/70, p=0.2, N = 1, beta = 200, omega = 1/10, gamma = 17, kappa=30)\nstart = log(c(S=0.06, I=0.01, R=0.92, W = 0.01))\nout = as.data.frame(ode(start, times, sirwmod, paras))\n\n\n"} {"package":"epimdr","topic":"summary.cm","snippet":"### Name: summary.cm\n### Title: Function to calculate the degree distribution for an object of\n### class CM\n### Aliases: summary.cm\n\n### ** Examples\n\ncm=WattsStrogatz(N=20, K=4, Prw=.3)\nsummary(cm)\n\n\n"} {"package":"epimdr","topic":"tau","snippet":"### Name: tau\n### Title: Gillespie tau-leap algorithm\n### Aliases: tau\n\n### ** Examples\n\nrlist2=c(quote(mu * (S+E+I+R)), quote(mu * S), quote(beta * S * I/(S+E+I+R)), \n quote(mu*E), quote(sigma * E), quote(mu * I), quote(gamma * I), quote(mu*R))\nemat2=matrix(c(1,0,0,0,-1,0,0,0,-1,1,0,0,0,-1,0,0,0,-1,1,0,0,0,-1,0,0,0,-1,1,0,0,0,-1),\nncol=4, byrow=TRUE)\nparas = c(mu = 1, beta = 1000, sigma = 365/8, gamma = 365/5)\ninits = c(S=999, E=0, I=1, R = 0)\nsim2=tau(rlist2, emat2, paras, inits, 1/365, 1)\n\n\n"} {"package":"BatchExperiments","topic":"addExperiments","snippet":"### Name: addExperiments\n### Title: Add experiemts to the registry.\n### Aliases: addExperiments Experiment\n\n### ** Examples\n\n### EXAMPLE 1 ###\nreg = makeExperimentRegistry(id = \"example1\", file.dir = tempfile())\n\n# Define a problem:\n# Subsampling from the iris dataset.\ndata(iris)\nsubsample = function(static, ratio) {\n n = nrow(static)\n train = sample(n, floor(n * ratio))\n test = setdiff(seq(n), train)\n list(test = test, train = train)\n}\naddProblem(reg, id = \"iris\", static = iris,\n dynamic = subsample, seed = 123)\n\n# Define algorithm \"tree\":\n# Decision tree on the iris dataset, modeling Species.\ntree.wrapper = function(static, dynamic, ...) {\n library(rpart)\n mod = rpart(Species ~ ., data = static[dynamic$train, ], ...)\n pred = predict(mod, newdata = static[dynamic$test, ], type = \"class\")\n table(static$Species[dynamic$test], pred)\n}\naddAlgorithm(reg, id = \"tree\", fun = tree.wrapper)\n\n# Define algorithm \"forest\":\n# Random forest on the iris dataset, modeling Species.\nforest.wrapper = function(static, dynamic, ...) {\n library(randomForest)\n mod = randomForest(Species ~ ., data = static, subset = dynamic$train, ...)\n pred = predict(mod, newdata = static[dynamic$test, ])\n table(static$Species[dynamic$test], pred)\n}\naddAlgorithm(reg, id = \"forest\", fun = forest.wrapper)\n\n# Define problem parameters:\npars = list(ratio = c(0.67, 0.9))\niris.design = makeDesign(\"iris\", exhaustive = pars)\n\n# Define decision tree parameters:\npars = list(minsplit = c(10, 20), cp = c(0.01, 0.1))\ntree.design = makeDesign(\"tree\", exhaustive = pars)\n\n# Define random forest parameters:\npars = list(ntree = c(100, 500))\nforest.design = makeDesign(\"forest\", exhaustive = pars)\n\n# Add experiments to the registry:\n# Use previously defined experimental designs.\naddExperiments(reg, prob.designs = iris.design,\n algo.designs = list(tree.design, forest.design),\n repls = 2) # usually you would set repls to 100 or more.\n\n# Optional: Short summary over problems and algorithms.\nsummarizeExperiments(reg)\n\n# Optional: Test one decision tree job and one expensive (ntree = 1000)\n# random forest job. Use findExperiments to get the right job ids.\ndo.tests = FALSE\nif (do.tests) {\n id1 = findExperiments(reg, algo.pattern = \"tree\")[1]\n id2 = findExperiments(reg, algo.pattern = \"forest\",\n algo.pars = (ntree == 1000))[1]\n testJob(reg, id1)\n testJob(reg, id2)\n}\n\n# Submit the jobs to the batch system\nsubmitJobs(reg)\n\n# Calculate the misclassification rate for all (already done) jobs.\nreduce = function(job, res) {\n n = sum(res)\n list(mcr = (n-sum(diag(res)))/n)\n}\nres = reduceResultsExperiments(reg, fun = reduce)\nprint(res)\n\n# Aggregate results using 'ddply' from package 'plyr':\n# Calculate the mean over all replications of identical experiments\n# (same problem, same algorithm and same parameters)\nlibrary(plyr)\nvars = setdiff(names(res), c(\"repl\", \"mcr\"))\naggr = ddply(res, vars, summarise, mean.mcr = mean(mcr))\nprint(aggr)\n\n## Not run: \n##D ### EXAMPLE 2 ###\n##D # define two simple test functions\n##D testfun1 = function(x) sum(x^2)\n##D testfun2 = function(x) -exp(-sum(abs(x)))\n##D \n##D # Define ExperimentRegistry:\n##D reg = makeExperimentRegistry(\"example02\", seed = 123, file.dir = tempfile())\n##D \n##D # Add the testfunctions to the registry:\n##D addProblem(reg, \"testfun1\", static = testfun1)\n##D addProblem(reg, \"testfun2\", static = testfun2)\n##D \n##D # Use SimulatedAnnealing on the test functions:\n##D addAlgorithm(reg, \"sann\", fun = function(static, dynamic) {\n##D upp = rep(10, 2)\n##D low = -upp\n##D start = sample(c(-10, 10), 2)\n##D res = optim(start, fn = static, lower = low, upper = upp, method = \"SANN\")\n##D res = res[c(\"par\", \"value\", \"counts\", \"convergence\")]\n##D res$start = start\n##D return(res)\n##D })\n##D \n##D # add experiments and submit\n##D addExperiments(reg, repls = 10)\n##D submitJobs(reg)\n##D \n##D # Gather informations from the experiments, in this case function value\n##D # and whether the algorithm convergenced:\n##D reduceResultsExperiments(reg, fun = function(job, res) res[c(\"value\", \"convergence\")])\n## End(Not run)\n\n\n"} {"package":"BatchExperiments","topic":"findExperiments","snippet":"### Name: findExperiments\n### Title: Find ids of experiments that match a query.\n### Aliases: findExperiments\n\n### ** Examples\n\nreg = makeExperimentRegistry(id = \"example1\", file.dir = tempfile())\np1 = addProblem(reg, \"one\", 1)\np2 = addProblem(reg, \"two\", 2)\na = addAlgorithm(reg, \"A\", fun = function(static, n) static + n)\naddExperiments(reg, algo.design = makeDesign(a, exhaustive = list(n = 1:4)))\nfindExperiments(reg, prob.pattern = \"one\")\nfindExperiments(reg, prob.pattern = \"o\")\nfindExperiments(reg, algo.pars = (n > 2))\n\n\n"} {"package":"BatchExperiments","topic":"getIndex","snippet":"### Name: getIndex\n### Title: Group experiments.\n### Aliases: getIndex\n\n### ** Examples\n\n# create a registry and add problems and algorithms\nreg = makeExperimentRegistry(\"getIndex\", file.dir = tempfile(\"\"))\naddProblem(reg, \"prob\", static = 1)\naddAlgorithm(reg, \"f0\", function(static, dynamic) static)\naddAlgorithm(reg, \"f1\", function(static, dynamic, i, k) static * i^k)\nad = list(makeDesign(\"f0\"), makeDesign(\"f1\", exhaustive = list(i = 1:5, k = 1:3)))\naddExperiments(reg, algo.designs = ad)\nsubmitJobs(reg)\n\n# get grouped job ids\nids = getJobIds(reg)\nby(ids, getIndex(reg, by.prob = TRUE, by.algo = TRUE), identity)\nids = findExperiments(reg, algo.pattern = \"f1\")\nby(ids, getIndex(reg, ids, by.algo.pars = (k == 1)), identity)\n\n# groupwise reduction\nids = findExperiments(reg, algo.pattern = \"f1\")\nshowStatus(reg, ids)\nf = function(aggr, job, res) aggr + res\nby(ids, getIndex(reg, ids, by.algo.pars = k), reduceResults, reg = reg, fun = f)\nby(ids, getIndex(reg, ids, by.algo.pars = i), reduceResults, reg = reg, fun = f)\n\n\n"} {"package":"BatchExperiments","topic":"getResultVars","snippet":"### Name: getResultVars\n### Title: Get variable groups of reduced results.\n### Aliases: getResultVars\n\n### ** Examples\n\nreg = makeExperimentRegistry(\"BatchExample\", seed = 123, file.dir = tempfile())\naddProblem(reg, \"p1\", static = 1)\naddProblem(reg, \"p2\", static = 2)\naddAlgorithm(reg, id = \"a1\",\n fun = function(static, dynamic, alpha) c(y = static*alpha))\naddAlgorithm(reg, id = \"a2\",\n fun = function(static, dynamic, alpha, beta) c(y = static*alpha+beta))\nad1 = makeDesign(\"a1\", exhaustive = list(alpha = 1:2))\nad2 = makeDesign(\"a2\", exhaustive = list(alpha = 1:2, beta = 5:6))\naddExperiments(reg, algo.designs = list(ad1, ad2), repls = 2)\nsubmitJobs(reg)\ndata = reduceResultsExperiments(reg)\nlibrary(plyr)\nddply(data, getResultVars(data, \"group\"), summarise, mean_y = mean(y))\n\n\n"} {"package":"BatchExperiments","topic":"makeDesign","snippet":"### Name: makeDesign\n### Title: Create parameter designs for problems and algorithms.\n### Aliases: makeDesign Design\n\n### ** Examples\n\n## Not run: \n##D # simple design for algorithm \"a1\" with no parameters:\n##D design = makeDesign(\"a1\")\n##D \n##D # design for problem \"p1\" using predefined parameter combinations\n##D design = makeDesign(\"p1\", design = data.frame(alpha = 0:1, beta = c(0.1, 0.2)))\n##D \n##D # creating a list of designs for several algorithms at once, all using the same\n##D # exhaustive grid of parameters\n##D designs = lapply(c(\"a1\", \"a2\", \"a3\"), makeDesign,\n##D exhaustive = list(alpha = 0:1, gamma = 1:10/10))\n## End(Not run)\n\n\n"} {"package":"BatchExperiments","topic":"summarizeExperiments","snippet":"### Name: summarizeExperiments\n### Title: Summarize selected experiments.\n### Aliases: summarizeExperiments\n\n### ** Examples\n\nreg = makeExperimentRegistry(\"summarizeExperiments\", seed = 123, file.dir = tempfile())\np1 = addProblem(reg, \"p1\", static = 1)\na1 = addAlgorithm(reg, id = \"a1\", fun = function(static, dynamic, alpha, beta) 1)\na2 = addAlgorithm(reg, id = \"a2\", fun = function(static, dynamic, alpha, gamma) 2)\nad1 = makeDesign(a1, exhaustive = list(alpha = 1:2, beta = 1:2))\nad2 = makeDesign(a2, exhaustive = list(alpha = 1:2, gamma = 7:8))\naddExperiments(reg, algo.designs = list(ad1, ad2), repls = 2)\nprint(summarizeExperiments(reg))\nprint(summarizeExperiments(reg, show = c(\"prob\", \"algo\", \"alpha\", \"gamma\")))\n\n\n"} {"package":"gcdnet","topic":"FHT","snippet":"### Name: FHT\n### Title: FHT data introduced in Friedman et al. (2010).\n### Aliases: FHT\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(FHT)\n\n\n\n"} {"package":"gcdnet","topic":"coef.cv.gcdnet","snippet":"### Name: coef.cv.gcdnet\n### Title: Get coefficients or make coefficient predictions from a\n### \"cv.gcdnet\" object.\n### Aliases: coef.cv.gcdnet\n### Keywords: models regression\n\n### ** Examples\n\n\ndata(FHT)\nset.seed(2011)\ncv <- cv.gcdnet(FHT$x, FHT$y, lambda2 = 1, nfolds = 5)\ncoef(cv, s = \"lambda.min\")\n\n\n\n"} {"package":"gcdnet","topic":"coef.gcdnet","snippet":"### Name: coef.gcdnet\n### Title: Get coefficients or make coefficient predictions from a \"gcdnet\"\n### object.\n### Aliases: coef.gcdnet coef.hsvmpath coef.sqsvmpath coef.logitpath\n### coef.lspath coef.erpath\n### Keywords: models regression\n\n### ** Examples\n\n\ndata(FHT)\nfit1 <- gcdnet(x = FHT$x,y = FHT$y)\ncoef(fit1, type = \"coef\", s = c(0.1,0.005))\ncoef(fit1, type = \"nonzero\")\n\n\n\n"} {"package":"gcdnet","topic":"cv.gcdnet","snippet":"### Name: cv.gcdnet\n### Title: Cross-validation for gcdnet\n### Aliases: cv.gcdnet cv.hsvmpath cv.sqsvmpath cv.logitpath cv.lspath\n### cv.erpath\n### Keywords: models regression\n\n### ** Examples\n\n\n# fit an elastic net penalized HHSVM with lambda2 = 0.1 for the L2 penalty.\n# Use the misclassification rate as the cross validation prediction loss.\n# Use five-fold CV to choose the optimal lambda for the L1 penalty.\n\ndata(FHT)\nset.seed(2011)\ncv <- cv.gcdnet(FHT$x, FHT$y, method = \"hhsvm\",\n lambda2 = 0.1, pred.loss = \"misclass\",\n nfolds = 5, delta = 1.5)\nplot(cv)\n\n# fit an elastic net penalized least squares\n# with lambda2 = 0.1 for the L2 penalty. Use the\n# least square loss as the cross validation\n# prediction loss. Use five-fold CV to choose\n# the optimal lambda for the L1 penalty.\n\nset.seed(2011)\ncv1 <- cv.gcdnet(FHT$x, FHT$y_reg, method =\"ls\",\n lambda2 = 0.1, pred.loss = \"loss\",\n nfolds = 5)\nplot(cv1)\n\n# To fit a LASSO penalized logistic regression\n# we set lambda2 = 0 to disable the L2 penalty. Use the\n# logistic loss as the cross validation\n# prediction loss. Use five-fold CV to choose\n# the optimal lambda for the L1 penalty.\n\nset.seed(2011)\ncv2 <- cv.gcdnet(FHT$x, FHT$y, method =\"logit\",\n lambda2 = 0, pred.loss=\"loss\",\n nfolds=5)\nplot(cv2)\n\n\n\n"} {"package":"gcdnet","topic":"gcdnet","snippet":"### Name: gcdnet\n### Title: Fits the regularization paths for large margin classifiers\n### Aliases: gcdnet\n### Keywords: models regression\n\n### ** Examples\n\n\ndata(FHT)\n# 1. solution paths for the LASSO penalized least squares.\n# To use LASSO set lambda2 = 0.\n\nm1 <- gcdnet(x = FHT$x, y = FHT$y_reg, lambda2 = 0, method = \"ls\")\nplot(m1)\n\n# 2. solution paths for the elastic net penalized HHSVM.\n# lambda2 is the parameter controlling the L2 penalty.\nm2 <- gcdnet(x = FHT$x, y = FHT$y, delta = 1, lambda2 = 1, method = \"hhsvm\")\nplot(m2)\n\n# 3. solution paths for the adaptive LASSO penalized SVM\n# with the squared hinge loss. To use the adaptive LASSO,\n# set lambda2 = 0 and meanwhile specify the L1 penalty weights.\np <- ncol(FHT$x)\n# set the first three L1 penalty weights as 0.1 and the rest are 1\npf = c(0.1, 0.1, 0.1, rep(1, p-3))\nm3 <- gcdnet(x = FHT$x, y = FHT$y, pf = pf, lambda2 = 0, method = \"sqsvm\")\nplot(m3)\n\n# 4. solution paths for the adaptive elastic net penalized\n# logistic regression.\n\np <- ncol(FHT$x)\n# set the first three L1 penalty weights as 10 and the rest are 1.\npf <- c(10, 10, 10, rep(1, p-3))\n# set the last three L2 penalty weights as 0.1 and the rest are 1.\npf2 <- c(rep(1, p-3), 0.1, 0.1, 0.1)\n# set the L2 penalty parameter lambda2=0.01.\nm4 <- gcdnet(x = FHT$x, y = FHT$y, pf = pf, pf2 = pf2,\n lambda2 = 0.01, method = \"logit\")\nplot(m4)\n\n# 5. solution paths for the LASSO penalized expectile regression\n# with the asymmetric least square parameter omega=0.9.\n\nm5 <- gcdnet(x = FHT$x, y = FHT$y_reg, omega = 0.9,\n lambda2 = 0, method = \"er\")\nplot(m5)\n\n\n\n"} {"package":"gcdnet","topic":"plot.cv.gcdnet","snippet":"### Name: plot.cv.gcdnet\n### Title: Plot the cross-validation curve produced by cv.gcdnet\n### Aliases: plot.cv.gcdnet\n### Keywords: models regression\n\n### ** Examples\n\n\n# fit an elastic net penalized logistic regression with lambda2 = 1 for the\n# L2 penalty. Use the logistic loss as the cross validation prediction loss.\n# Use five-fold CV to choose the optimal lambda for the L1 penalty.\ndata(FHT)\nset.seed(2011)\ncv=cv.gcdnet(FHT$x, FHT$y, method =\"logit\", lambda2 = 1,\n pred.loss=\"loss\", nfolds=5)\nplot(cv)\n\n\n\n"} {"package":"gcdnet","topic":"plot.gcdnet","snippet":"### Name: plot.gcdnet\n### Title: Plot coefficients from a \"gcdnet\" object\n### Aliases: plot.gcdnet\n### Keywords: models regression\n\n### ** Examples\n\n\ndata(FHT)\nm1 <- gcdnet(x = FHT$x,y = FHT$y)\npar(mfrow = c(1,3))\nplot(m1) # plots against the L1-norm of the coefficients\nplot(m1,xvar = \"lambda\",label = TRUE) # plots against the log-lambda sequence\nplot(m1,color = TRUE)\n\n\n\n"} {"package":"gcdnet","topic":"predict.cv.gcdnet","snippet":"### Name: predict.cv.gcdnet\n### Title: Make predictions from a \"cv.gcdnet\" object.\n### Aliases: predict.cv.gcdnet\n### Keywords: models regression\n\n### ** Examples\n\n\ndata(FHT)\nset.seed(2011)\ncv=cv.gcdnet(FHT$x, FHT$y, lambda2 = 1, pred.loss=\"misclass\",\n lambda.factor=0.05, nfolds=5)\npre = predict(cv$gcdnet.fit, newx = FHT$x, s = cv$lambda.1se,\n type = \"class\")\n\n\n\n"} {"package":"gcdnet","topic":"predict.gcdnet","snippet":"### Name: predict.gcdnet\n### Title: Make predictions from a \"gcdnet\" object\n### Aliases: predict.gcdnet predict.hsvmpath predict.sqsvmpath\n### predict.logitpath predict.lspath predict.erpath\n### Keywords: models regression\n\n### ** Examples\n\n\ndata(FHT)\nm1 <- gcdnet(x = FHT$x,y = FHT$y)\nprint(predict(m1, type = \"class\",newx = FHT$x[2:5, ]))\n\n\n\n"} {"package":"gcdnet","topic":"print.gcdnet","snippet":"### Name: print.gcdnet\n### Title: Print a gcdnet object\n### Aliases: print.gcdnet\n### Keywords: models regression\n\n### ** Examples\n\n\ndata(FHT)\nm1 <- gcdnet(x = FHT$x, y = FHT$y, delta = 1, lambda2 = 0.1)\nprint(m1)\n\n\n\n"} {"package":"rrcov","topic":"Appalachia","snippet":"### Name: Appalachia\n### Title: Annual maximum streamflow in central Appalachia\n### Aliases: Appalachia\n### Keywords: datasets\n\n### ** Examples\n\n data(Appalachia)\n\n # plot a matrix of scatterplots\n pairs(Appalachia,\n main=\"Appalachia data set\",\n pch=21,\n bg=c(\"red\", \"green3\", \"blue\"))\n\n mcd<-CovMcd(Appalachia)\n mcd\n plot(mcd, which=\"dist\", class=TRUE)\n plot(mcd, which=\"dd\", class=TRUE)\n\n ## identify the discordant sites using robust distances and compare \n ## to the classical ones\n mcd <- CovMcd(Appalachia)\n rd <- sqrt(getDistance(mcd))\n ccov <- CovClassic(Appalachia)\n cd <- sqrt(getDistance(ccov))\n r.out <- which(rd > sqrt(qchisq(0.975,3)))\n c.out <- which(cd > sqrt(qchisq(0.975,3)))\n cat(\"Robust: \", length(r.out), \" outliers: \", r.out,\"\\n\")\n cat(\"Classical: \", length(c.out), \" outliers: \", c.out,\"\\n\")\n\n\n"} {"package":"rrcov","topic":"Cascades","snippet":"### Name: Cascades\n### Title: Annual precipitation totals for the North Cascades region\n### Aliases: Cascades\n### Keywords: datasets\n\n### ** Examples\n\n data(Cascades)\n\n # plot a matrix of scatterplots\n pairs(Cascades,\n main=\"Cascades data set\",\n pch=21,\n bg=c(\"red\", \"green3\", \"blue\"))\n\n mcd<-CovMcd(Cascades)\n mcd\n plot(mcd, which=\"dist\", class=TRUE)\n plot(mcd, which=\"dd\", class=TRUE)\n\n ## identify the discordant sites using robust distances and compare \n ## to the classical ones\n rd <- sqrt(getDistance(mcd))\n ccov <- CovClassic(Cascades)\n cd <- sqrt(getDistance(ccov))\n r.out <- which(rd > sqrt(qchisq(0.975,3)))\n c.out <- which(cd > sqrt(qchisq(0.975,3)))\n cat(\"Robust: \", length(r.out), \" outliers: \", r.out,\"\\n\")\n cat(\"Classical: \", length(c.out), \" outliers: \", c.out,\"\\n\")\n\n\n"} {"package":"rrcov","topic":"Cov-class","snippet":"### Name: Cov-class\n### Title: Class \"Cov\" - a base class for estimates of multivariate\n### location and scatter\n### Aliases: Cov-class getCenter-method getCenter,Cov-method\n### getCov,Cov-method getCorr,Cov-method getData,Cov-method\n### getDistance,Cov-method getEvals,Cov-method getDet,Cov-method\n### getShape,Cov-method getFlag,Cov-method isClassic,method\n### isClassic,Cov-method plot plot,Cov,missing-method show,Cov-method\n### summary,Cov-method Uvector-class Ulist-class Utable-class\n### Umatrix-class Ufunction-class\n### Keywords: classes robust multivariate\n\n### ** Examples\n\nshowClass(\"Cov\")\n\n\n"} {"package":"rrcov","topic":"CovClassic-class","snippet":"### Name: CovClassic-class\n### Title: Class \"CovClassic\" - classical estimates of multivariate\n### location and scatter\n### Aliases: CovClassic-class\n### Keywords: classes robust multivariate\n\n### ** Examples\n\ndata(hbk)\nhbk.x <- data.matrix(hbk[, 1:3])\ncv <- CovClassic(hbk.x)\ncv\nsummary(cv)\nplot(cv)\n\n\n"} {"package":"rrcov","topic":"CovClassic","snippet":"### Name: CovClassic\n### Title: Classical Estimates of Multivariate Location and Scatter\n### Aliases: CovClassic Cov\n### Keywords: classes robust multivariate\n\n### ** Examples\n\ndata(hbk)\nhbk.x <- data.matrix(hbk[, 1:3])\ncv <- CovClassic(hbk.x)\ncv\nsummary(cv)\nplot(cv)\n\n\n"} {"package":"rrcov","topic":"CovControlMMest-class","snippet":"### Name: CovControlMMest-class\n### Title: Class 'CovControlMMest' - contains control parameters for\n### \"CovMMest\"\n### Aliases: CovControlMMest-class restimate,CovControlMMest-method\n### Keywords: classes robust multivariate\n\n### ** Examples\n\n\n ## the following two statements are equivalent\n ctrl1 <- new(\"CovControlMMest\", bdp=0.25)\n ctrl2 <- CovControlMMest(bdp=0.25)\n\n data(hbk)\n CovMMest(hbk, control=ctrl1)\n\n\n\n"} {"package":"rrcov","topic":"CovControlMMest","snippet":"### Name: CovControlMMest\n### Title: Constructor function for objects of class \"CovControlMMest\"\n### Aliases: CovControlMMest\n### Keywords: classes robust multivariate\n\n### ** Examples\n\n ## the following two statements are equivalent\n ctrl1 <- new(\"CovControlMMest\", bdp=0.25)\n ctrl2 <- CovControlMMest(bdp=0.25)\n\n data(hbk)\n CovMMest(hbk, control=ctrl1)\n \n \n \n\n\n"} {"package":"rrcov","topic":"CovControlMcd-class","snippet":"### Name: CovControlMcd-class\n### Title: Class 'CovControlMcd' - contains control parameters for CovMcd\n### Aliases: CovControlMcd-class restimate,CovControlMcd-method\n### Keywords: classes robust multivariate\n\n### ** Examples\n\n ## the following two statements are equivalent\n ctrl1 <- new(\"CovControlMcd\", alpha=0.75)\n ctrl2 <- CovControlMcd(alpha=0.75)\n\n data(hbk)\n CovMcd(hbk, control=ctrl1)\n\n\n"} {"package":"rrcov","topic":"CovControlMcd","snippet":"### Name: CovControlMcd\n### Title: Constructor function for objects of class \"CovControlMcd\"\n### Aliases: CovControlMcd\n### Keywords: classes robust multivariate\n\n### ** Examples\n\n ## the following two statements are equivalent\n ctrl1 <- new(\"CovControlMcd\", alpha=0.75)\n ctrl2 <- CovControlMcd(alpha=0.75)\n\n data(hbk)\n CovMcd(hbk, control=ctrl1)\n\n\n"} {"package":"rrcov","topic":"CovControlMest-class","snippet":"### Name: CovControlMest-class\n### Title: Class 'CovControlMest' - contains control parameters for\n### \"CovMest\"\n### Aliases: CovControlMest-class\n### Keywords: classes robust multivariate\n\n### ** Examples\n\n ## the following two statements are equivalent\n ctrl1 <- new(\"CovControlMest\", r=0.4)\n ctrl2 <- CovControlMest(r=0.4)\n\n data(hbk)\n CovMest(hbk, control=ctrl1)\n\n\n\n"} {"package":"rrcov","topic":"CovControlMest","snippet":"### Name: CovControlMest\n### Title: Constructor function for objects of class \"CovControlMest\"\n### Aliases: CovControlMest\n### Keywords: classes robust multivariate\n\n### ** Examples\n\n ## the following two statements are equivalent\n ctrl1 <- new(\"CovControlMest\", r=0.4)\n ctrl2 <- CovControlMest(r=0.4)\n\n data(hbk)\n CovMest(hbk, control=ctrl1)\n\n\n"} {"package":"rrcov","topic":"CovControlMrcd-class","snippet":"### Name: CovControlMrcd-class\n### Title: Class 'CovControlMrcd' - contains control parameters for\n### CovMrcd()\n### Aliases: CovControlMrcd-class restimate,CovControlMrcd-method\n### Keywords: classes robust multivariate\n\n### ** Examples\n\n ## the following two statements are equivalent\n ctrl1 <- new(\"CovControlMrcd\", alpha=0.75)\n ctrl2 <- CovControlMrcd(alpha=0.75)\n\n data(hbk)\n CovMrcd(hbk, control=ctrl1)\n\n\n"} {"package":"rrcov","topic":"CovControlMrcd","snippet":"### Name: CovControlMrcd\n### Title: Constructor function for objects of class \"CovControlMrcd\"\n### Aliases: CovControlMrcd\n### Keywords: classes robust multivariate\n\n### ** Examples\n\n ## the following two statements are equivalent\n ctrl1 <- new(\"CovControlMrcd\", alpha=0.75)\n ctrl2 <- CovControlMrcd(alpha=0.75)\n\n data(hbk)\n CovMrcd(hbk, control=ctrl1)\n\n\n"} {"package":"rrcov","topic":"CovControlMve-class","snippet":"### Name: CovControlMve-class\n### Title: Class 'CovControlMve' - contains control parameters for CovMve\n### Aliases: CovControlMve-class restimate,CovControlMve-method\n### Keywords: classes robust multivariate\n\n### ** Examples\n\n ## the following two statements are equivalent\n ctrl1 <- new(\"CovControlMve\", alpha=0.75)\n ctrl2 <- CovControlMve(alpha=0.75)\n\n data(hbk)\n CovMve(hbk, control=ctrl1)\n\n\n"} {"package":"rrcov","topic":"CovControlMve","snippet":"### Name: CovControlMve\n### Title: Constructor function for objects of class \"CovControlMve\"\n### Aliases: CovControlMve\n### Keywords: classes robust multivariate\n\n### ** Examples\n\n ## the following two statements are equivalent\n ctrl1 <- new(\"CovControlMve\", alpha=0.75)\n ctrl2 <- CovControlMve(alpha=0.75)\n\n data(hbk)\n CovMve(hbk, control=ctrl1)\n\n\n"} {"package":"rrcov","topic":"CovControlOgk-class","snippet":"### Name: CovControlOgk-class\n### Title: Class 'CovControlOgk' - contains control parameters for CovOgk\n### Aliases: CovControlOgk-class restimate,CovControlOgk-method\n### Keywords: classes robust multivariate\n\n### ** Examples\n\n ## the following two statements are equivalent\n ctrl1 <- new(\"CovControlOgk\", beta=0.95)\n ctrl2 <- CovControlOgk(beta=0.95)\n\n data(hbk)\n CovOgk(hbk, control=ctrl1)\n\n\n"} {"package":"rrcov","topic":"CovControlOgk","snippet":"### Name: CovControlOgk\n### Title: Constructor function for objects of class \"CovControlOgk\"\n### Aliases: CovControlOgk\n### Keywords: classes robust multivariate\n\n### ** Examples\n\n ## the following two statements are equivalent\n ctrl1 <- new(\"CovControlOgk\", beta=0.95)\n ctrl2 <- CovControlOgk(beta=0.95)\n\n data(hbk)\n CovOgk(hbk, control=ctrl1)\n\n\n"} {"package":"rrcov","topic":"CovControlSde-class","snippet":"### Name: CovControlSde-class\n### Title: Class 'CovControlSde' - contains control parameters for \"CovSde\"\n### Aliases: CovControlSde-class restimate,CovControlSde-method\n### Keywords: classes robust multivariate\n\n### ** Examples\n\n ## the following two statements are equivalent\n ctrl1 <- new(\"CovControlSde\", nsamp=2000)\n ctrl2 <- CovControlSde(nsamp=2000)\n\n data(hbk)\n CovSde(hbk, control=ctrl1)\n\n\n"} {"package":"rrcov","topic":"CovControlSde","snippet":"### Name: CovControlSde\n### Title: Constructor function for objects of class \"CovControlSde\"\n### Aliases: CovControlSde\n### Keywords: classes robust multivariate\n\n### ** Examples\n\n ## the following two statements are equivalent\n ctrl1 <- new(\"CovControlSde\", nsamp=2000)\n ctrl2 <- CovControlSde(nsamp=2000)\n\n data(hbk)\n CovSde(hbk, control=ctrl1)\n\n\n"} {"package":"rrcov","topic":"CovControlSest-class","snippet":"### Name: CovControlSest-class\n### Title: Class 'CovControlSest' - contains control parameters for\n### \"CovSest\"\n### Aliases: CovControlSest-class restimate,CovControlSest-method\n### Keywords: classes robust multivariate\n\n### ** Examples\n\n ## the following two statements are equivalent\n ctrl1 <- new(\"CovControlSest\", bdp=0.4)\n ctrl2 <- CovControlSest(bdp=0.4)\n\n data(hbk)\n CovSest(hbk, control=ctrl1)\n\n\n\n"} {"package":"rrcov","topic":"CovControlSest","snippet":"### Name: CovControlSest\n### Title: Constructor function for objects of class \"CovControlSest\"\n### Aliases: CovControlSest\n### Keywords: classes robust multivariate\n\n### ** Examples\n\n ## the following two statements are equivalent\n ctrl1 <- new(\"CovControlSest\", bdp=0.4)\n ctrl2 <- CovControlSest(bdp=0.4)\n\n data(hbk)\n CovSest(hbk, control=ctrl1)\n\n\n"} {"package":"rrcov","topic":"CovMMest-class","snippet":"### Name: CovMMest-class\n### Title: MM Estimates of Multivariate Location and Scatter\n### Aliases: CovMMest-class\n### Keywords: classes robust multivariate\n\n### ** Examples\n\nshowClass(\"CovMMest\")\n\n\n"} {"package":"rrcov","topic":"CovMMest","snippet":"### Name: CovMMest\n### Title: MM Estimates of Multivariate Location and Scatter\n### Aliases: CovMMest\n### Keywords: robust multivariate\n\n### ** Examples\n\n\nlibrary(rrcov)\ndata(hbk)\nhbk.x <- data.matrix(hbk[, 1:3])\nCovMMest(hbk.x)\n\n## the following four statements are equivalent\nc0 <- CovMMest(hbk.x)\nc1 <- CovMMest(hbk.x, bdp = 0.25)\nc2 <- CovMMest(hbk.x, control = CovControlMMest(bdp = 0.25))\nc3 <- CovMMest(hbk.x, control = new(\"CovControlMMest\", bdp = 0.25))\n\n## direct specification overrides control one:\nc4 <- CovMMest(hbk.x, bdp = 0.40,\n control = CovControlMMest(bdp = 0.25))\nc1\nsummary(c1)\nplot(c1)\n\n## Deterministic MM-estmates\nCovMMest(hbk.x, control=CovControlMMest(sest=CovControlSest(method=\"sdet\")))\n\n\n\n"} {"package":"rrcov","topic":"CovMcd-class","snippet":"### Name: CovMcd-class\n### Title: MCD Estimates of Multivariate Location and Scatter\n### Aliases: CovMcd-class\n### Keywords: classes robust multivariate\n\n### ** Examples\n\nshowClass(\"CovMcd\")\n\n\n"} {"package":"rrcov","topic":"CovMcd","snippet":"### Name: CovMcd\n### Title: Robust Location and Scatter Estimation via MCD\n### Aliases: CovMcd\n### Keywords: robust multivariate\n\n### ** Examples\n\ndata(hbk)\nhbk.x <- data.matrix(hbk[, 1:3])\nCovMcd(hbk.x)\ncD <- CovMcd(hbk.x, nsamp = \"deterministic\")\nsummary(cD)\n\n## the following three statements are equivalent\nc1 <- CovMcd(hbk.x, alpha = 0.75)\nc2 <- CovMcd(hbk.x, control = CovControlMcd(alpha = 0.75))\n## direct specification overrides control one:\nc3 <- CovMcd(hbk.x, alpha = 0.75,\n control = CovControlMcd(alpha=0.95))\nc1\n\n\n"} {"package":"rrcov","topic":"CovMest-class","snippet":"### Name: CovMest-class\n### Title: Constrained M-estimates of Multivariate Location and Scatter\n### Aliases: CovMest-class\n### Keywords: classes robust multivariate\n\n### ** Examples\n\nshowClass(\"CovMest\")\n\n\n"} {"package":"rrcov","topic":"CovMest","snippet":"### Name: CovMest\n### Title: Constrained M-Estimates of Location and Scatter\n### Aliases: CovMest\n### Keywords: robust multivariate\n\n### ** Examples\n\n\nlibrary(rrcov)\ndata(hbk)\nhbk.x <- data.matrix(hbk[, 1:3])\nCovMest(hbk.x)\n\n## the following four statements are equivalent\nc0 <- CovMest(hbk.x)\nc1 <- CovMest(hbk.x, r = 0.45)\nc2 <- CovMest(hbk.x, control = CovControlMest(r = 0.45))\nc3 <- CovMest(hbk.x, control = new(\"CovControlMest\", r = 0.45))\n\n## direct specification overrides control one:\nc4 <- CovMest(hbk.x, r = 0.40,\n control = CovControlMest(r = 0.25))\nc1\nsummary(c1)\nplot(c1)\n\n\n"} {"package":"rrcov","topic":"CovMrcd-class","snippet":"### Name: CovMrcd-class\n### Title: MRCD Estimates of Multivariate Location and Scatter\n### Aliases: CovMrcd-class\n### Keywords: classes robust multivariate\n\n### ** Examples\n\nshowClass(\"CovMrcd\")\n\n\n"} {"package":"rrcov","topic":"CovMrcd","snippet":"### Name: CovMrcd\n### Title: Robust Location and Scatter Estimation via Minimum Regularized\n### Covariance Determonant (MRCD)\n### Aliases: CovMrcd\n### Keywords: robust multivariate\n\n### ** Examples\n\n## The result will be (almost) identical to the raw MCD\n## (since we do not do reweighting of MRCD)\n##\ndata(hbk)\nhbk.x <- data.matrix(hbk[, 1:3])\nc0 <- CovMcd(hbk.x, alpha=0.75, use.correction=FALSE)\ncc <- CovMrcd(hbk.x, alpha=0.75)\ncc$rho\nall.equal(c0$best, cc$best)\nall.equal(c0$raw.center, cc$center)\nall.equal(c0$raw.cov/c0$raw.cnp2[1], cc$cov/cc$cnp2)\n\nsummary(cc)\n\n## the following three statements are equivalent\nc1 <- CovMrcd(hbk.x, alpha = 0.75)\nc2 <- CovMrcd(hbk.x, control = CovControlMrcd(alpha = 0.75))\n## direct specification overrides control one:\nc3 <- CovMrcd(hbk.x, alpha = 0.75,\n control = CovControlMrcd(alpha=0.95))\nc1\n\n## Not run: \n##D \n##D ## This is the first example from Boudt et al. (2020). The first variable is \n##D ## the dependent one, which we remove and remain with p=226 NIR absorbance spectra \n##D \n##D data(octane)\n##D \n##D octane <- octane[, -1] # remove the dependent variable y\n##D \n##D n <- nrow(octane)\n##D p <- ncol(octane)\n##D \n##D ## Compute MRCD with h=33, which gives approximately 15 percent breakdown point.\n##D ## This value of h was found by Boudt et al. (2020) using a data driven approach, \n##D ## similar to the Forward Search of Atkinson et al. (2004). \n##D ## The default value of h would be 20 (i.e. alpha=0.5) \n##D \n##D out <- CovMrcd(octane, h=33) \n##D out$rho\n##D \n##D ## Please note that in the paper is indicated that the obtained rho=0.1149, however,\n##D ## this value of rho is obtained if the parameter maxcond is set equal to 999 (this was \n##D ## the default in an earlier version of the software, now the default is maxcond=50). \n##D ## To reproduce the result from the paper, change the call to CovMrcd() as follows \n##D ## (this will not influence the results shown further):\n##D \n##D ## out <- CovMrcd(octane, h=33, maxcond=999) \n##D ## out$rho\n##D \n##D robpca = PcaHubert(octane, k=2, alpha=0.75, mcd=FALSE)\n##D (outl.robpca = which(robpca@flag==FALSE))\n##D \n##D # Observations flagged as outliers by ROBPCA:\n##D # 25, 26, 36, 37, 38, 39\n##D \n##D # Plot the orthogonal distances versus the score distances:\n##D pch = rep(20,n); pch[robpca@flag==FALSE] = 17\n##D col = rep('black',n); col[robpca@flag==FALSE] = 'red'\n##D plot(robpca, pch=pch, col=col, id.n.sd=6, id.n.od=6)\n##D \n##D ## Plot now the MRCD mahalanobis distances\n##D pch = rep(20,n); pch[!getFlag(out)] = 17\n##D col = rep('black',n); col[!getFlag(out)] = 'red'\n##D plot(out, pch=pch, col=col, id.n=6)\n## End(Not run)\n\n\n"} {"package":"rrcov","topic":"CovMve-class","snippet":"### Name: CovMve-class\n### Title: MVE Estimates of Multivariate Location and Scatter\n### Aliases: CovMve-class\n### Keywords: classes robust multivariate\n\n### ** Examples\n\nshowClass(\"CovMve\")\n\n\n"} {"package":"rrcov","topic":"CovMve","snippet":"### Name: CovMve\n### Title: Robust Location and Scatter Estimation via MVE\n### Aliases: CovMve\n### Keywords: robust multivariate\n\n### ** Examples\n\ndata(hbk)\nhbk.x <- data.matrix(hbk[, 1:3])\nCovMve(hbk.x)\n\n## the following three statements are equivalent\nc1 <- CovMve(hbk.x, alpha = 0.75)\nc2 <- CovMve(hbk.x, control = CovControlMve(alpha = 0.75))\n## direct specification overrides control one:\nc3 <- CovMve(hbk.x, alpha = 0.75,\n control = CovControlMve(alpha=0.95))\nc1\n\n\n"} {"package":"rrcov","topic":"CovOgk-class","snippet":"### Name: CovOgk-class\n### Title: OGK Estimates of Multivariate Location and Scatter\n### Aliases: CovOgk-class\n### Keywords: classes robust multivariate\n\n### ** Examples\n\nshowClass(\"CovOgk\")\n\n\n"} {"package":"rrcov","topic":"CovOgk","snippet":"### Name: CovOgk\n### Title: Robust Location and Scatter Estimation - Ortogonalized\n### Gnanadesikan-Kettenring (OGK)\n### Aliases: CovOgk\n### Keywords: robust multivariate\n\n### ** Examples\n\ndata(hbk)\nhbk.x <- data.matrix(hbk[, 1:3])\nCovOgk(hbk.x)\n\n## the following three statements are equivalent\nc1 <- CovOgk(hbk.x, niter=1)\nc2 <- CovOgk(hbk.x, control = CovControlOgk(niter=1))\n\n## direct specification overrides control one:\nc3 <- CovOgk(hbk.x, beta=0.95,\n control = CovControlOgk(beta=0.99))\nc1\n\nx<-matrix(c(1,2,3,7,1,2,3,7), ncol=2)\n## CovOgk(x) - this would fail because the two columns of x are exactly collinear.\n## In order to fix it, redefine the default 'vrob' function for example\n## in the following way and pass it as a parameter in the control\n## object.\ncc <- CovOgk(x, control=new(\"CovControlOgk\",\n vrob=function(x1, x2, ...)\n {\n r <- .vrobGK(x1, x2, ...)\n if(is.na(r))\n r <- 0\n r\n })\n)\ncc\n\n\n"} {"package":"rrcov","topic":"CovRobust-class","snippet":"### Name: CovRobust-class\n### Title: Class \"CovRobust\" - virtual base class for robust estimates of\n### multivariate location and scatter\n### Aliases: CovRobust-class isClassic,CovRobust-method\n### getMeth,CovRobust-method show,CovRobust-method\n### summary,CovRobust-method getRaw,CovRobust-method\n### Keywords: classes robust multivariate\n\n### ** Examples\n\n data(hbk)\n hbk.x <- data.matrix(hbk[, 1:3])\n cv <- CovMest(hbk.x) # it is not possible to create an object of\n # class CovRobust, since it is a VIRTUAL class\n cv\n summary(cv) # summary method for class CovRobust\n plot(cv) # plot method for class CovRobust\n\n\n"} {"package":"rrcov","topic":"CovRobust","snippet":"### Name: CovRobust\n### Title: Robust Location and Scatter Estimation\n### Aliases: CovRobust\n### Keywords: robust multivariate\n\n### ** Examples\n\ndata(hbk)\nhbk.x <- data.matrix(hbk[, 1:3])\nCovRobust(hbk.x)\nCovRobust(hbk.x, CovControlSest(method=\"bisquare\"))\n\n\n\n"} {"package":"rrcov","topic":"CovSde-class","snippet":"### Name: CovSde-class\n### Title: Stahel-Donoho Estimates of Multivariate Location and Scatter\n### Aliases: CovSde-class\n### Keywords: classes robust multivariate\n\n### ** Examples\n\nshowClass(\"CovSde\")\n\n\n"} {"package":"rrcov","topic":"CovSde","snippet":"### Name: CovSde\n### Title: Stahel-Donoho Estimates of Multivariate Location and Scatter\n### Aliases: CovSde\n### Keywords: robust multivariate\n\n### ** Examples\n\ndata(hbk)\nhbk.x <- data.matrix(hbk[, 1:3])\nCovSde(hbk.x)\n\n## the following four statements are equivalent\nc0 <- CovSde(hbk.x)\nc1 <- CovSde(hbk.x, nsamp=2000)\nc2 <- CovSde(hbk.x, control = CovControlSde(nsamp=2000))\nc3 <- CovSde(hbk.x, control = new(\"CovControlSde\", nsamp=2000))\n\n## direct specification overrides control one:\nc4 <- CovSde(hbk.x, nsamp=100,\n control = CovControlSde(nsamp=2000))\nc1\nsummary(c1)\nplot(c1)\n\n## Use the function CovRobust() - if no estimation method is\n## specified, for small data sets CovSde() will be called\ncr <- CovRobust(hbk.x)\ncr\n\n\n\n"} {"package":"rrcov","topic":"CovSest-class","snippet":"### Name: CovSest-class\n### Title: S Estimates of Multivariate Location and Scatter\n### Aliases: CovSest-class\n### Keywords: classes robust multivariate\n\n### ** Examples\n\nshowClass(\"CovSest\")\n\n\n"} {"package":"rrcov","topic":"CovSest","snippet":"### Name: CovSest\n### Title: S Estimates of Multivariate Location and Scatter\n### Aliases: CovSest\n### Keywords: robust multivariate\n\n### ** Examples\n\n\nlibrary(rrcov)\ndata(hbk)\nhbk.x <- data.matrix(hbk[, 1:3])\ncc <- CovSest(hbk.x)\ncc\n\n## summry and different types of plots\nsummary(cc) \nplot(cc) \nplot(cc, which=\"dd\")\nplot(cc, which=\"pairs\")\nplot(cc, which=\"xydist\")\n\n## the following four statements are equivalent\nc0 <- CovSest(hbk.x)\nc1 <- CovSest(hbk.x, bdp = 0.25)\nc2 <- CovSest(hbk.x, control = CovControlSest(bdp = 0.25))\nc3 <- CovSest(hbk.x, control = new(\"CovControlSest\", bdp = 0.25))\n\n## direct specification overrides control one:\nc4 <- CovSest(hbk.x, bdp = 0.40,\n control = CovControlSest(bdp = 0.25))\nc1\nsummary(c1)\nplot(c1)\n\n## Use the SURREAL algorithm of Ruppert\ncr <- CovSest(hbk.x, method=\"surreal\")\ncr\n\n## Use Bisquare estimation\ncr <- CovSest(hbk.x, method=\"bisquare\")\ncr\n\n## Use Rocke type estimation\ncr <- CovSest(hbk.x, method=\"rocke\")\ncr\n\n## Use Deterministic estimation\ncr <- CovSest(hbk.x, method=\"sdet\")\ncr\n\n\n\n"} {"package":"rrcov","topic":"Lda-class","snippet":"### Name: Lda-class\n### Title: Class \"Lda\" - virtual base class for all classic and robust LDA\n### classes\n### Aliases: Lda-class predict,Lda-method show,Lda-method\n### summary,Lda-method\n### Keywords: classes robust multivariate\n\n### ** Examples\n\nshowClass(\"Lda\")\n\n\n"} {"package":"rrcov","topic":"LdaClassic-class","snippet":"### Name: LdaClassic-class\n### Title: Class \"LdaClassic\" - Linear Discriminant Analysis\n### Aliases: LdaClassic-class\n### Keywords: robust multivariate\n\n### ** Examples\n\nshowClass(\"LdaClassic\")\n\n\n"} {"package":"rrcov","topic":"LdaClassic","snippet":"### Name: LdaClassic\n### Title: Linear Discriminant Analysis\n### Aliases: LdaClassic LdaClassic.formula LdaClassic.default\n### Keywords: robust multivariate\n\n### ** Examples\n\n## Example anorexia\nlibrary(MASS)\ndata(anorexia)\n\n## rrcov: LdaClassic()\nlda <- LdaClassic(Treat~., data=anorexia)\npredict(lda)@classification\n\n## MASS: lda()\nlda.MASS <- lda(Treat~., data=anorexia)\npredict(lda.MASS)$class\n\n## Compare the prediction results of MASS:::lda() and LdaClassic()\nall.equal(predict(lda)@classification, predict(lda.MASS)$class)\n\n\n"} {"package":"rrcov","topic":"LdaPP-class","snippet":"### Name: LdaPP-class\n### Title: Class \"LdaPP\" - Robust method for Linear Discriminant Analysis\n### by Projection-pursuit\n### Aliases: LdaPP-class predict,LdaPP-method\n### Keywords: robust multivariate\n\n### ** Examples\n\nshowClass(\"LdaPP\")\n\n\n"} {"package":"rrcov","topic":"LdaPP","snippet":"### Name: LdaPP\n### Title: Robust Linear Discriminant Analysis by Projection Pursuit\n### Aliases: LdaPP LdaPP.formula LdaPP.default\n### Keywords: robust multivariate\n\n### ** Examples\n\n\n##\n## Function to plot a LDA separation line\n##\nlda.line <- function(lda, ...)\n{\n ab <- lda@ldf[1,] - lda@ldf[2,]\n cc <- lda@ldfconst[1] - lda@ldfconst[2]\n abline(a=-cc/ab[2], b=-ab[1]/ab[2],...)\n}\n\ndata(pottery)\nx <- pottery[,c(\"MG\", \"CA\")]\ngrp <- pottery$origin\ncol <- c(3,4)\ngcol <- ifelse(grp == \"Attic\", col[1], col[2])\ngpch <- ifelse(grp == \"Attic\", 16, 1)\n\n##\n## Reproduce Fig. 2. from Pires and branco (2010)\n##\nplot(CA~MG, data=pottery, col=gcol, pch=gpch)\n\n## Not run: \n##D \n##D ppc <- LdaPP(x, grp, method=\"class\", optim=TRUE)\n##D lda.line(ppc, col=1, lwd=2, lty=1)\n##D \n##D pph <- LdaPP(x, grp, method=\"huber\",optim=TRUE)\n##D lda.line(pph, col=3, lty=3)\n##D \n##D pps <- LdaPP(x, grp, method=\"sest\", optim=TRUE)\n##D lda.line(pps, col=4, lty=4)\n##D \n##D ppm <- LdaPP(x, grp, method=\"mad\", optim=TRUE)\n##D lda.line(ppm, col=5, lty=5)\n##D \n##D rlda <- Linda(x, grp, method=\"mcd\")\n##D lda.line(rlda, col=6, lty=1)\n##D \n##D fsa <- Linda(x, grp, method=\"fsa\")\n##D lda.line(fsa, col=8, lty=6)\n##D \n##D ## Use the formula interface:\n##D ##\n##D LdaPP(origin~MG+CA, data=pottery) ## use the same two predictors\n##D LdaPP(origin~., data=pottery) ## use all predictor variables\n##D \n##D ##\n##D ## Predict method\n##D data(pottery)\n##D fit <- LdaPP(origin~., data = pottery)\n##D predict(fit)\n## End(Not run)\n\n\n\n"} {"package":"rrcov","topic":"LdaRobust-class","snippet":"### Name: LdaRobust-class\n### Title: Class \"LdaRobust\" is a virtual base class for all robust LDA\n### classes\n### Aliases: LdaRobust-class\n### Keywords: robust multivariate\n\n### ** Examples\n\nshowClass(\"LdaRobust\")\n\n\n"} {"package":"rrcov","topic":"Linda-class","snippet":"### Name: Linda-class\n### Title: Class \"Linda\" - Robust method for LINear Discriminant Analysis\n### Aliases: Linda-class\n### Keywords: robust multivariate\n\n### ** Examples\n\nshowClass(\"Linda\")\n\n\n"} {"package":"rrcov","topic":"Linda","snippet":"### Name: Linda\n### Title: Robust Linear Discriminant Analysis\n### Aliases: Linda Linda.formula Linda.default\n### Keywords: robust multivariate\n\n### ** Examples\n\n## Example anorexia\nlibrary(MASS)\ndata(anorexia)\n\n## start with the classical estimates\nlda <- LdaClassic(Treat~., data=anorexia)\npredict(lda)@classification\n\n## try now the robust LDA with the default method (MCD with pooled whitin cov matrix)\nrlda <- Linda(Treat~., data= anorexia)\npredict(rlda)@classification\n\n## try the other methods\nLinda(Treat~., data= anorexia, method=\"mcdA\")\nLinda(Treat~., data= anorexia, method=\"mcdB\")\nLinda(Treat~., data= anorexia, method=\"mcdC\")\n\n## try the Hawkins&McLachlan method\n## use the default method\ngrp <- anorexia[,1]\ngrp <- as.factor(grp)\nx <- anorexia[,2:3]\nLinda(x, grp, method=\"fsa\")\n\n## Do DA with Linda and method mcdB or mcdC, when some classes\n## have very few observations. Use L1 median instead of MCD\n## to compute the group means (l1med=TRUE).\n\ndata(fish)\n\n# remove observation #14 containing missing value\nfish <- fish[-14,]\n\n# The height and width are calculated as percentages \n# of the third length variable\nfish[,5] <- fish[,5]*fish[,4]/100\nfish[,6] <- fish[,6]*fish[,4]/100\n\ntable(fish$Species) \nLinda(Species~., data=fish, l1med=TRUE)\nLinda(Species~., data=fish, method=\"mcdC\", l1med=TRUE)\n\n\n\n"} {"package":"rrcov","topic":"OsloTransect","snippet":"### Name: OsloTransect\n### Title: Oslo Transect Data\n### Aliases: OsloTransect\n### Keywords: datasets\n\n### ** Examples\n\ndata(OsloTransect)\nstr(OsloTransect)\n\n##\n## Log-transform the numerical part of the data, \n## choose the desired groups and variables and \n## perform the classical Wilks' Lambda test\n##\nOsloTransect[,14:38] <- log(OsloTransect[,14:38])\ngrp <- OsloTransect$X.FLITHO\nind <- which(grp ==\"CAMSED\" | grp == \"GNEIS_O\" |\n grp == \"GNEIS_R\" | grp==\"MAGM\")\n(cwl <- Wilks.test(X.FLITHO~K+P+Zn+Cu,data=OsloTransect[ind,]))\n\n##\n## Perform now the robust MCD based Wilks' Lambda test. \n## Use the already computed multiplication factor 'xd' and \n## degrees of freedom 'xq' for the approximate distribution.\n##\n\nxd <- -0.003708238\nxq <- 11.79073\n(mcdwl <- Wilks.test(X.FLITHO~K+P+Zn+Cu,data=OsloTransect[ind,], \n method=\"mcd\", xd=xd, xq=xq))\n\n\n"} {"package":"rrcov","topic":"Pca-class","snippet":"### Name: Pca-class\n### Title: Class \"Pca\" - virtual base class for all classic and robust PCA\n### classes\n### Aliases: Pca-class getCenter,Pca-method getScale,Pca-method\n### getEigenvalues,Pca-method getLoadings,Pca-method getPrcomp,Pca-method\n### getScores,Pca-method getSdev,Pca-method plot,Pca,missing-method\n### show,Pca-method predict,Pca-method predict screeplot,Pca-method\n### screeplot summary,Pca-method\n### Keywords: robust multivariate\n\n### ** Examples\n\nshowClass(\"Pca\")\n\n\n"} {"package":"rrcov","topic":"PcaClassic-class","snippet":"### Name: PcaClassic-class\n### Title: Class \"PcaClassic\" - Principal Components Analysis\n### Aliases: PcaClassic-class getQuan,PcaClassic-method\n### Keywords: robust multivariate\n\n### ** Examples\n\nshowClass(\"PcaClassic\")\n\n\n"} {"package":"rrcov","topic":"PcaCov-class","snippet":"### Name: PcaCov-class\n### Title: Class \"PcaCov\" - Robust PCA based on a robust covariance matrix\n### Aliases: PcaCov-class getQuan,PcaCov-method\n### Keywords: robust multivariate\n\n### ** Examples\n\nshowClass(\"PcaCov\")\n\n\n"} {"package":"rrcov","topic":"PcaCov","snippet":"### Name: PcaCov\n### Title: Robust PCA based on a robust covariance matrix\n### Aliases: PcaCov PcaCov.formula PcaCov.default\n### Keywords: robust multivariate\n\n### ** Examples\n\n## PCA of the Hawkins Bradu Kass's Artificial Data\n## using all 4 variables\n data(hbk)\n pca <- PcaCov(hbk)\n pca\n\n## Compare with the classical PCA\n prcomp(hbk)\n\n## or \n PcaClassic(hbk)\n \n## If you want to print the scores too, use\n print(pca, print.x=TRUE)\n\n## Using the formula interface\n PcaCov(~., data=hbk)\n\n## To plot the results:\n\n plot(pca) # distance plot\n pca2 <- PcaCov(hbk, k=2) \n plot(pca2) # PCA diagnostic plot (or outlier map)\n \n## Use the standard plots available for for prcomp and princomp\n screeplot(pca) \n biplot(pca) \n\n\n"} {"package":"rrcov","topic":"PcaGrid-class","snippet":"### Name: PcaGrid-class\n### Title: Class \"PcaGrid\" - Robust PCA using PP - GRID search Algorithm\n### Aliases: PcaGrid-class getQuan,PcaGrid-method\n### Keywords: robust multivariate\n\n### ** Examples\n\nshowClass(\"PcaGrid\")\n\n\n"} {"package":"rrcov","topic":"PcaGrid","snippet":"### Name: PcaGrid\n### Title: Robust Principal Components based on Projection Pursuit (PP):\n### GRID search Algorithm\n### Aliases: PcaGrid PcaGrid.formula PcaGrid.default\n### Keywords: robust multivariate\n\n### ** Examples\n\n # multivariate data with outliers\n library(mvtnorm)\n x <- rbind(rmvnorm(200, rep(0, 6), diag(c(5, rep(1,5)))),\n rmvnorm( 15, c(0, rep(20, 5)), diag(rep(1, 6))))\n # Here we calculate the principal components with PCAgrid\n pc <- PcaGrid(x, 6)\n # we could draw a biplot too:\n biplot(pc)\n \n # we could use another objective function, and \n # maybe only calculate the first three principal components:\n pc <- PcaGrid(x, 3, method=\"qn\")\n biplot(pc)\n \n # now we want to compare the results with the non-robust principal components\n pc <- PcaClassic(x, k=3)\n # again, a biplot for comparision:\n biplot(pc)\n\n\n"} {"package":"rrcov","topic":"PcaHubert-class","snippet":"### Name: PcaHubert-class\n### Title: Class \"PcaHubert\" - ROBust method for Principal Components\n### Analysis\n### Aliases: PcaHubert-class getQuan,PcaHubert-method\n### Keywords: robust multivariate\n\n### ** Examples\n\nshowClass(\"PcaHubert\")\n\n\n"} {"package":"rrcov","topic":"PcaHubert","snippet":"### Name: PcaHubert\n### Title: ROBPCA - ROBust method for Principal Components Analysis\n### Aliases: PcaHubert PcaHubert.formula PcaHubert.default\n### Keywords: robust multivariate\n\n### ** Examples\n\n## PCA of the Hawkins Bradu Kass's Artificial Data\n## using all 4 variables\n data(hbk)\n pca <- PcaHubert(hbk)\n pca\n\n## Compare with the classical PCA\n prcomp(hbk)\n\n## or \n PcaClassic(hbk)\n \n## If you want to print the scores too, use\n print(pca, print.x=TRUE)\n\n## Using the formula interface\n PcaHubert(~., data=hbk)\n\n## To plot the results:\n\n plot(pca) # distance plot\n pca2 <- PcaHubert(hbk, k=2) \n plot(pca2) # PCA diagnostic plot (or outlier map)\n \n## Use the standard plots available for prcomp and princomp\n screeplot(pca) \n biplot(pca) \n \n## Restore the covraiance matrix \n py <- PcaHubert(hbk)\n cov.1 <- py@loadings %*% diag(py@eigenvalues) %*% t(py@loadings)\n cov.1 \n\n\n"} {"package":"rrcov","topic":"PcaLocantore-class","snippet":"### Name: PcaLocantore-class\n### Title: Class \"PcaLocantore\" Spherical Principal Components\n### Aliases: PcaLocantore-class getQuan,PcaLocantore-method\n### Keywords: robust multivariate\n\n### ** Examples\n\nshowClass(\"PcaLocantore\")\n\n\n"} {"package":"rrcov","topic":"PcaLocantore","snippet":"### Name: PcaLocantore\n### Title: Spherical Principal Components\n### Aliases: PcaLocantore PcaLocantore.formula PcaLocantore.default\n### Keywords: robust multivariate\n\n### ** Examples\n\n## PCA of the Hawkins Bradu Kass's Artificial Data\n## using all 4 variables\n data(hbk)\n pca <- PcaLocantore(hbk)\n pca\n\n## Compare with the classical PCA\n prcomp(hbk)\n\n## or \n PcaClassic(hbk)\n \n## If you want to print the scores too, use\n print(pca, print.x=TRUE)\n\n## Using the formula interface\n PcaLocantore(~., data=hbk)\n\n## To plot the results:\n\n plot(pca) # distance plot\n pca2 <- PcaLocantore(hbk, k=2) \n plot(pca2) # PCA diagnostic plot (or outlier map)\n \n## Use the standard plots available for for prcomp and princomp\n screeplot(pca) \n biplot(pca) \n\n\n"} {"package":"rrcov","topic":"PcaProj-class","snippet":"### Name: PcaProj-class\n### Title: Class \"PcaProj\" - Robust PCA using PP - Croux and Ruiz-Gazen\n### (2005) algorithm\n### Aliases: PcaProj-class getQuan,PcaProj-method\n### Keywords: robust multivariate\n\n### ** Examples\n\nshowClass(\"PcaProj\")\n\n\n"} {"package":"rrcov","topic":"PcaProj","snippet":"### Name: PcaProj\n### Title: Robust Principal Components based on Projection Pursuit (PP):\n### Croux and Ruiz-Gazen (2005) algorithm\n### Aliases: PcaProj PcaProj.formula PcaProj.default\n### Keywords: robust multivariate\n\n### ** Examples\n\n # multivariate data with outliers\n library(mvtnorm)\n x <- rbind(rmvnorm(200, rep(0, 6), diag(c(5, rep(1,5)))),\n rmvnorm( 15, c(0, rep(20, 5)), diag(rep(1, 6))))\n # Here we calculate the principal components with PcaProj\n pc <- PcaProj(x, 6)\n # we could draw a biplot too:\n biplot(pc)\n\n # we could use another calculation method and another objective function, and\n # maybe only calculate the first three principal components:\n pc <- PcaProj(x, k=3, method=\"qn\", CalcMethod=\"sphere\")\n biplot(pc)\n\n # now we want to compare the results with the non-robust principal components\n pc <- PcaClassic(x, k=3)\n # again, a biplot for comparision:\n biplot(pc)\n\n\n"} {"package":"rrcov","topic":"PcaRobust-class","snippet":"### Name: PcaRobust-class\n### Title: Class \"PcaRobust\" is a virtual base class for all robust PCA\n### classes\n### Aliases: PcaRobust-class\n### Keywords: robust multivariate\n\n### ** Examples\n\nshowClass(\"PcaRobust\")\n\n\n"} {"package":"rrcov","topic":"PredictLda-class","snippet":"### Name: PredictLda-class\n### Title: Class \"PredictLda\" - prediction of \"Lda\" objects\n### Aliases: PredictLda-class show,PredictLda-method\n### Keywords: classes robust multivariate\n\n### ** Examples\n\nshowClass(\"PredictLda\")\n\n\n"} {"package":"rrcov","topic":"PredictQda-class","snippet":"### Name: PredictQda-class\n### Title: Class \"PredictQda\" - prediction of \"Qda\" objects\n### Aliases: PredictQda-class show,PredictQda-method\n### Keywords: classes robust multivariate\n\n### ** Examples\n\nshowClass(\"PredictQda\")\n\n\n"} {"package":"rrcov","topic":"Qda-class","snippet":"### Name: Qda-class\n### Title: Class \"Qda\" - virtual base class for all classic and robust QDA\n### classes\n### Aliases: Qda-class predict,Qda-method show,Qda-method\n### summary,Qda-method\n### Keywords: classes robust multivariate\n\n### ** Examples\n\nshowClass(\"Qda\")\n\n\n"} {"package":"rrcov","topic":"QdaClassic-class","snippet":"### Name: QdaClassic-class\n### Title: Class \"QdaClassic\" - Quadratic Discriminant Analysis\n### Aliases: QdaClassic-class\n### Keywords: classes robust multivariate\n\n### ** Examples\n\nshowClass(\"QdaClassic\")\n\n\n"} {"package":"rrcov","topic":"QdaCov-class","snippet":"### Name: QdaCov-class\n### Title: Class \"QdaCov\" - Robust methods for Quadratic Discriminant\n### Analysis\n### Aliases: QdaCov-class\n### Keywords: classes robust multivariate\n\n### ** Examples\n\nshowClass(\"QdaCov\")\n\n\n"} {"package":"rrcov","topic":"QdaCov","snippet":"### Name: QdaCov\n### Title: Robust Quadratic Discriminant Analysis\n### Aliases: QdaCov QdaCov.formula QdaCov.default\n### Keywords: robust multivariate\n\n### ** Examples\n\n## Example anorexia\nlibrary(MASS)\ndata(anorexia)\n\n## start with the classical estimates\nqda <- QdaClassic(Treat~., data=anorexia)\npredict(qda)@classification\n\n## try now the robust LDA with the default method (MCD with pooled whitin cov matrix)\nrqda <- QdaCov(Treat~., data= anorexia)\npredict(rqda)@classification\n\n## try the other methods\nQdaCov(Treat~., data= anorexia, method=\"sde\")\nQdaCov(Treat~., data= anorexia, method=\"M\")\nQdaCov(Treat~., data= anorexia, method=CovControlOgk())\n\n\n\n"} {"package":"rrcov","topic":"QdaRobust-class","snippet":"### Name: QdaRobust-class\n### Title: Class \"QdaRobust\" is a virtual base class for all robust QDA\n### classes\n### Aliases: QdaRobust-class\n### Keywords: classes robust multivariate\n\n### ** Examples\n\nshowClass(\"QdaRobust\")\n\n\n"} {"package":"rrcov","topic":"SummaryCov-class","snippet":"### Name: SummaryCov-class\n### Title: Class \"SummaryCov\" - summary of \"Cov\" objects\n### Aliases: SummaryCov-class getCenter,SummaryCov-method\n### getCov,SummaryCov-method getDistance,SummaryCov-method\n### getEvals,SummaryCov-method isClassic,SummaryCov-method\n### show,SummaryCov-method\n### Keywords: classes\n\n### ** Examples\n\nshowClass(\"SummaryCov\")\n\n\n"} {"package":"rrcov","topic":"SummaryCovRobust-class","snippet":"### Name: SummaryCovRobust-class\n### Title: Class \"SummaryCovRobust\" - summary of \"CovRobust\" objects\n### Aliases: SummaryCovRobust-class isClassic,SummaryCovRobust-method\n### show,SummaryCovRobust-method\n### Keywords: classes robust multivariate\n\n### ** Examples\n\ndata(hbk)\nhbk.x <- data.matrix(hbk[, 1:3])\ncv <- CovMest(hbk.x)\ncv\nsummary(cv)\n\n\n"} {"package":"rrcov","topic":"SummaryLda-class","snippet":"### Name: SummaryLda-class\n### Title: Class \"SummaryLda\" - summary of \"Lda\" objects\n### Aliases: SummaryLda-class show,SummaryLda-method\n### Keywords: classes robust multivariate\n\n### ** Examples\n\nshowClass(\"SummaryLda\")\n\n\n"} {"package":"rrcov","topic":"SummaryPca-class","snippet":"### Name: SummaryPca-class\n### Title: Class \"SummaryPca\" - summary of \"Pca\" objects\n### Aliases: SummaryPca-class show,SummaryPca-method\n### Keywords: classes robust multivariate\n\n### ** Examples\n\nshowClass(\"SummaryPca\")\n\n\n"} {"package":"rrcov","topic":"SummaryQda-class","snippet":"### Name: SummaryQda-class\n### Title: Class \"SummaryQda\" - summary of \"Qda\" objects\n### Aliases: SummaryQda-class show,SummaryQda-method\n### Keywords: classes robust multivariate\n\n### ** Examples\n\nshowClass(\"SummaryQda\")\n\n\n"} {"package":"rrcov","topic":"T2.test","snippet":"### Name: T2.test\n### Title: Robust Hotelling T2 test\n### Aliases: T2.test T2.test.default T2.test.formula\n### Keywords: htest multivariate\n\n### ** Examples\n\n\n## One-sample classical test\ndata(delivery)\ndelivery.x <- delivery[,1:2]\nT2.test(delivery.x)\n\n## One-sample robust test\ndata(delivery)\ndelivery.x <- delivery[,1:2]\nT2.test(delivery.x, method=\"mcd\")\n\n## Two-sample classical test\ndata(hemophilia)\ngrp <-as.factor(hemophilia[,3])\nx <- hemophilia[which(grp==levels(grp)[1]),1:2]\ny <- hemophilia[which(grp==levels(grp)[2]),1:2]\nT2.test(x,y)\n\n## or using the formula interface\nT2.test(as.matrix(hemophilia[,-3])~hemophilia[,3])\n\n\n## Not run: \n##D ## Two-sample robust test\n##D T2.test(x,y, method=\"mcd\") ## error - not yet implemented\n## End(Not run)\n\n"} {"package":"rrcov","topic":"Wilks.test","snippet":"### Name: Wilks.test\n### Title: Classical and Robust One-way MANOVA: Wilks Lambda\n### Aliases: Wilks.test Wilks.test.default Wilks.test.data.frame\n### Wilks.test.formula Wilks.test.matrix model.frame.Wilks.test\n### Keywords: multivariate robust\n\n### ** Examples\n\nlibrary(MASS)\ndata(anorexia)\ngrp <- as.factor(anorexia[,1])\nx <- as.matrix(anorexia[,2:3])\n## Using the default interface, classical test\nWilks.test(x, grouping=grp, method=\"c\")\n\n## Using the default interface, rank based test\nWilks.test(x, grouping=grp, method=\"rank\")\n\n## For this data set: p=2, n=n1+n2+n3=29+26+17\n## were computed the following multiplication factor xd and degrees of freedom xq\n## for the MCD estimates with alpha=0.5\nxd <- -0.02162666\nxq <- 3.63971\nWilks.test(x, grouping=grp, method=\"mcd\", xd=xd, xq=xq)\n\n## Now the same with the formula interface\nWilks.test(Treat~Prewt+Postwt, data=anorexia, method=\"mcd\", xd=xd, xq=xq)\n\n##Iris data with formula interface\ndata(iris)\nWilks.test(Species~., data=iris, method=\"c\")\n\n## and with default interface\nWilks.test(iris[,1:4],grouping=iris[,5], method=\"c\")\n\n# hemophilia data - classical, rank and MCD test\ndata(hemophilia)\nhemophilia$gr <- as.factor(hemophilia$gr)\n\nWilks.test(gr~., data=hemophilia, method=\"c\")\nWilks.test(gr~., data=hemophilia, method=\"rank\")\n## already simulated parameters for MCD with alpha=0.5\nxd <- -0.01805436\nxq <- 1.950301\nWilks.test(gr~., data=hemophilia, xd=xd, xq=xq, method=\"mcd\")\n\n\n\n"} {"package":"rrcov","topic":"biplot","snippet":"### Name: biplot-methods\n### Title: Biplot for Principal Components (objects of class 'Pca')\n### Aliases: biplot biplot-methods biplot,ANY-method biplot,Pca-method\n### Keywords: multivariate hplot\n\n### ** Examples\n\nrequire(graphics)\nbiplot(PcaClassic(USArrests, k=2))\n\n\n"} {"package":"rrcov","topic":"bus","snippet":"### Name: bus\n### Title: Automatic vehicle recognition data\n### Aliases: bus\n### Keywords: datasets\n\n### ** Examples\n\n ## Reproduce Table 6.3 from Maronna et al. (2006), page 213\n data(bus)\n bus <- as.matrix(bus)\n \n ## calculate MADN for each variable\n xmad <- apply(bus, 2, mad) \n cat(\"\\nMin, Max of MADN: \", min(xmad), max(xmad), \"\\n\")\n\n\n ## MADN vary between 0 (for variable 9) and 34. Therefore exclude \n ## variable 9 and divide the remaining variables by their MADNs.\n bus1 <- bus[, -9]\n madbus <- apply(bus1, 2, mad)\n bus2 <- sweep(bus1, 2, madbus, \"/\", check.margin = FALSE)\n\n ## Compute classical and robust PCA (Spherical/Locantore, Hubert, MCD and OGK) \n pca <- PcaClassic(bus2)\n rpca <- PcaLocantore(bus2)\n pcaHubert <- PcaHubert(bus2, k=17, kmax=17, mcd=FALSE)\n pcamcd <- PcaCov(bus2, cov.control=CovControlMcd())\n pcaogk <- PcaCov(bus2, cov.control=CovControlOgk())\n\n ev <- getEigenvalues(pca)\n evrob <- getEigenvalues(rpca)\n evhub <- getEigenvalues(pcaHubert)\n evmcd <- getEigenvalues(pcamcd)\n evogk <- getEigenvalues(pcaogk)\n\n uvar <- matrix(nrow=6, ncol=6)\n svar <- sum(ev)\n svarrob <- sum(evrob)\n svarhub <- sum(evhub)\n svarmcd <- sum(evmcd)\n svarogk <- sum(evogk)\n for(i in 1:6){\n uvar[i,1] <- i\n uvar[i,2] <- round((svar - sum(ev[1:i]))/svar, 3)\n uvar[i,3] <- round((svarrob - sum(evrob[1:i]))/svarrob, 3)\n uvar[i,4] <- round((svarhub - sum(evhub[1:i]))/svarhub, 3)\n uvar[i,5] <- round((svarmcd - sum(evmcd[1:i]))/svarmcd, 3)\n uvar[i,6] <- round((svarogk - sum(evogk[1:i]))/svarogk, 3)\n }\n uvar <- as.data.frame(uvar)\n names(uvar) <- c(\"q\", \"Classical\",\"Spherical\", \"Hubert\", \"MCD\", \"OGK\")\n cat(\"\\nBus data: proportion of unexplained variability for q components\\n\")\n print(uvar)\n \n ## Reproduce Table 6.4 from Maronna et al. (2006), page 214\n ##\n ## Compute classical and robust PCA extracting only the first 3 components\n ## and take the squared orthogonal distances to the 3-dimensional hyperplane\n ##\n pca3 <- PcaClassic(bus2, k=3) # classical\n rpca3 <- PcaLocantore(bus2, k=3) # spherical (Locantore, 1999)\n hpca3 <- PcaHubert(bus2, k=3) # Hubert\n dist <- pca3@od^2\n rdist <- rpca3@od^2\n hdist <- hpca3@od^2\n\n ## calculate the quantiles of the distances to the 3-dimensional hyperplane\n qclass <- round(quantile(dist, probs = seq(0, 1, 0.1)[-c(1,11)]), 1)\n qspc <- round(quantile(rdist, probs = seq(0, 1, 0.1)[-c(1,11)]), 1)\n qhubert <- round(quantile(hdist, probs = seq(0, 1, 0.1)[-c(1,11)]), 1)\n qq <- cbind(rbind(qclass, qspc, qhubert), round(c(max(dist), max(rdist), max(hdist)), 0))\n colnames(qq)[10] <- \"Max\"\n rownames(qq) <- c(\"Classical\", \"Spherical\", \"Hubert\")\n cat(\"\\nBus data: quantiles of distances to hiperplane\\n\")\n print(qq)\n\n ## \n ## Reproduce Fig 6.1 from Maronna et al. (2006), page 214\n ## \n cat(\"\\nBus data: Q-Q plot of logs of distances to hyperplane (k=3) \n \\nfrom classical and robust estimates. The line is the identity diagonal\\n\")\n plot(sort(log(dist)), sort(log(rdist)), xlab=\"classical\", ylab=\"robust\")\n lines(sort(log(dist)), sort(log(dist)))\n \n \n\n\n"} {"package":"rrcov","topic":"bushmiss","snippet":"### Name: bushmiss\n### Title: Campbell Bushfire Data with added missing data items\n### Aliases: bushmiss\n### Keywords: datasets\n\n### ** Examples\n\n## The following code will result in exactly the same output\n## as the one obtained from the original data set\ndata(bushmiss)\nbf <- bushmiss[bushmiss$MPROB==0,1:5]\nplot(bf)\ncovMcd(bf)\n\n\n## Not run: \n##D ## This is the code with which the missing data were created:\n##D ##\n##D ## Creates a data set with missing values (for testing purposes)\n##D ## from a complete data set 'x'. The probability of\n##D ## each item being missing is 'pr' (Bernoulli trials).\n##D ##\n##D getmiss <- function(x, pr=0.1)\n##D {\n##D n <- nrow(x)\n##D p <- ncol(x)\n##D done <- FALSE\n##D iter <- 0\n##D while(iter <= 50){\n##D bt <- rbinom(n*p, 1, pr)\n##D btmat <- matrix(bt, nrow=n)\n##D btmiss <- ifelse(btmat==1, NA, 0)\n##D y <- x+btmiss\n##D if(length(which(rowSums(nanmap(y)) == p)) == 0)\n##D return (y)\n##D iter <- iter + 1\n##D }\n##D y\n##D }\n## End(Not run)\n\n\n\n"} {"package":"rrcov","topic":"diabetes","snippet":"### Name: diabetes\n### Title: Reaven and Miller diabetes data\n### Aliases: diabetes\n### Keywords: datasets\n\n### ** Examples\n\ndata(diabetes)\n(cc <- Linda(group~insulin+glucose+sspg, data=diabetes))\n(pr <- predict(cc))\n\n\n"} {"package":"rrcov","topic":"fish","snippet":"### Name: fish\n### Title: Fish Catch Data Set\n### Aliases: fish\n### Keywords: datasets\n\n### ** Examples\n\n data(fish)\n\n # remove observation #14 containing missing value\n fish <- fish[-14,]\n\n # The height and width are calculated as percentages \n # of the third length variable\n fish[,5] <- fish[,5]*fish[,4]/100\n fish[,6] <- fish[,6]*fish[,4]/100\n \n # plot a matrix of scatterplots\n pairs(fish[1:6],\n main=\"Fish Catch Data\",\n pch=21,\n bg=c(\"red\", \"green3\", \"blue\", \"yellow\", \"magenta\", \"violet\", \n \"turquoise\")[unclass(fish$Species)])\n\n\n\n"} {"package":"rrcov","topic":"fruit","snippet":"### Name: fruit\n### Title: Fruit data set\n### Aliases: fruit\n### Keywords: datasets\n\n### ** Examples\n\n\n data(fruit)\n table(fruit$cultivar)\n\n\n\n"} {"package":"rrcov","topic":"getEllipse","snippet":"### Name: getEllipse\n### Title: Calculates the points for drawing a confidence ellipsoid\n### Aliases: getEllipse\n\n### ** Examples\n\n\ndata(hbk)\ncc <- cov.wt(hbk)\ne1 <- getEllipse(loc=cc$center[1:2], cov=cc$cov[1:2,1:2])\ne2 <- getEllipse(loc=cc$center[1:2], cov=cc$cov[1:2,1:2], crit=0.99)\nplot(X2~X1, data=hbk,\n xlim=c(min(X1, e1[,1], e2[,1]), max(X1,e1[,1], e2[,1])),\n ylim=c(min(X2, e1[,2], e2[,2]), max(X2,e1[,2], e2[,2])))\nlines(e1, type=\"l\", lty=1, col=\"red\")\nlines(e2, type=\"l\", lty=2, col=\"blue\")\nlegend(\"topleft\", legend=c(0.975, 0.99), lty=1:2, col=c(\"red\", \"blue\"))\n\n\n\n"} {"package":"rrcov","topic":"hemophilia","snippet":"### Name: hemophilia\n### Title: Hemophilia Data\n### Aliases: hemophilia\n### Keywords: datasets\n\n### ** Examples\n\ndata(hemophilia)\nplot(AHFantigen~AHFactivity, data=hemophilia, col=as.numeric(as.factor(gr))+1)\n##\n## Compute robust location and covariance matrix and \n## plot the tolerance ellipses\n(mcd <- CovMcd(hemophilia[,1:2]))\ncol <- ifelse(hemophilia$gr == \"carrier\", 2, 3) ## define clours for the groups\nplot(mcd, which=\"tolEllipsePlot\", class=TRUE, col=col)\n\n\n\n"} {"package":"rrcov","topic":"ionosphere","snippet":"### Name: ionosphere\n### Title: Johns Hopkins University Ionosphere database.\n### Aliases: ionosphere\n\n### ** Examples\n\n data(ionosphere)\n ionosphere[, 1:6] |> pairs()\n\n\n"} {"package":"rrcov","topic":"isSingular","snippet":"### Name: isSingular-methods\n### Title: Check if a covariance matrix (object of class 'Cov') is singular\n### Aliases: isSingular isSingular-methods isSingular,ANY-method\n### isSingular,Cov-method\n### Keywords: multivariate\n\n### ** Examples\n\n\ndata(hbk)\ncc <- CovClassic(hbk)\nisSingular(cc)\n\n\n"} {"package":"rrcov","topic":"lmom32","snippet":"### Name: lmom32\n### Title: Hosking and Wallis Data Set, Table 3.2\n### Aliases: lmom32\n### Keywords: datasets\n\n### ** Examples\n\n data(lmom32)\n\n # plot a matrix of scatterplots\n pairs(lmom32,\n main=\"Hosking and Wallis Data Set, Table 3.3\",\n pch=21,\n bg=c(\"red\", \"green3\", \"blue\"))\n\n mcd<-CovMcd(lmom32)\n mcd\n plot(mcd, which=\"dist\", class=TRUE)\n plot(mcd, which=\"dd\", class=TRUE)\n\n ## identify the discordant sites using robust distances and compare \n ## to the classical ones\n mcd <- CovMcd(lmom32)\n rd <- sqrt(getDistance(mcd))\n ccov <- CovClassic(lmom32)\n cd <- sqrt(getDistance(ccov))\n r.out <- which(rd > sqrt(qchisq(0.975,3)))\n c.out <- which(cd > sqrt(qchisq(0.975,3)))\n cat(\"Robust: \", length(r.out), \" outliers: \", r.out,\"\\n\")\n cat(\"Classical: \", length(c.out), \" outliers: \", c.out,\"\\n\")\n\n\n"} {"package":"rrcov","topic":"lmom33","snippet":"### Name: lmom33\n### Title: Hosking and Wallis Data Set, Table 3.3\n### Aliases: lmom33\n### Keywords: datasets\n\n### ** Examples\n\n data(lmom33)\n\n # plot a matrix of scatterplots\n pairs(lmom33,\n main=\"Hosking and Wallis Data Set, Table 3.3\",\n pch=21,\n bg=c(\"red\", \"green3\", \"blue\"))\n\n mcd<-CovMcd(lmom33)\n mcd\n plot(mcd, which=\"dist\", class=TRUE)\n plot(mcd, which=\"dd\", class=TRUE)\n\n ## identify the discordant sites using robust distances and compare \n ## to the classical ones\n mcd <- CovMcd(lmom33)\n rd <- sqrt(getDistance(mcd))\n ccov <- CovClassic(lmom33)\n cd <- sqrt(getDistance(ccov))\n r.out <- which(rd > sqrt(qchisq(0.975,3)))\n c.out <- which(cd > sqrt(qchisq(0.975,3)))\n cat(\"Robust: \", length(r.out), \" outliers: \", r.out,\"\\n\")\n cat(\"Classical: \", length(c.out), \" outliers: \", c.out,\"\\n\")\n\n\n\n"} {"package":"rrcov","topic":"machines","snippet":"### Name: machines\n### Title: Computer Hardware\n### Aliases: machines\n### Keywords: datasets\n\n### ** Examples\n\n\n data(machines)\n\n ## Compute the medcouple of each variable of the Computer hardware data\n data.frame(MC=round(apply(machines, 2, mc),2))\n\n ## Plot a pairwise scaterplot matrix\n pairs(machines[,1:6])\n\n mcd <- CovMcd(machines[,1:6])\n plot(mcd, which=\"pairs\")\n\n ## Remove the rownames (too long)\n rownames(machines) <- NULL\n\n ## Start with robust PCA based on MCD (P << n)\n (pca1 <- PcaHubert(machines, k=3))\n plot(pca1, main=\"ROBPCA-MCD\", off=0.03)\n\n ## PCA with the projection algoritm of Hubert\n (pca2 <- PcaHubert(machines, k=3, mcd=FALSE))\n plot(pca2, main=\"ROBPCA-SD\", off=0.03)\n\n ## PCA with the adjusted for skewness algorithm of Hubert et al (2009)\n (pca3 <- PcaHubert(machines, k=3, mcd=FALSE, skew=TRUE))\n plot(pca3, main=\"ROBPCA-AO\", off=0.03)\n\n\n\n"} {"package":"rrcov","topic":"maryo","snippet":"### Name: maryo\n### Title: Marona and Yohai Artificial Data\n### Aliases: maryo\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(maryo)\ngetCorr(CovClassic(maryo)) ## the sample correlation is 0.81\n\n## Modify 10%% of the data in the following way:\n## modify two points (out of 20) by interchanging the \n## largest and smallest value of the first coordinate\nimin <- which(maryo[,1]==min(maryo[,1])) # imin = 9\nimax <- which(maryo[,1]==max(maryo[,1])) # imax = 19\nmaryo1 <- maryo\nmaryo1[imin,1] <- maryo[imax,1]\nmaryo1[imax,1] <- maryo[imin,1]\n\n## The sample correlation becomes 0.05\nplot(maryo1)\ngetCorr(CovClassic(maryo1)) ## the sample correlation becomes 0.05\ngetCorr(CovMcd(maryo1)) ## the (reweighted) MCD correlation is 0.79\n\n\n\n"} {"package":"rrcov","topic":"octane","snippet":"### Name: octane\n### Title: Octane data\n### Aliases: octane\n### Keywords: datasets\n\n### ** Examples\n\ndata(octane)\n\noctane <- octane[, -1] # remove the dependent variable y\n\npca=PcaHubert(octane, k=10)\nscreeplot(pca, type=\"lines\")\n\npca2 <- PcaHubert(octane, k=2)\nplot(pca2, id.n.sd=6)\n\npca7 <- PcaHubert(octane, k=7)\nplot(pca7, id.n.sd=6)\n\n\n\n"} {"package":"rrcov","topic":"olitos","snippet":"### Name: olitos\n### Title: Olive Oil Data\n### Aliases: olitos\n### Keywords: datasets\n\n### ** Examples\n\ndata(olitos)\ncc <- Linda(grp~., data=olitos, method=\"mcdC\", l1med=TRUE)\ncc\npr <- predict(cc)\ntt <- mtxconfusion(cc@grp, pr@classification, printit=TRUE)\n\n\n"} {"package":"rrcov","topic":"pca.distances","snippet":"### Name: pca.distances\n### Title: Compute score and orthogonal distances for Principal Components\n### (objects of class 'Pca')\n### Aliases: pca.distances\n### Keywords: robust multivariate\n\n### ** Examples\n\n\n## PCA of the Hawkins Bradu Kass's Artificial Data\n## using all 4 variables\ndata(hbk)\npca <- PcaHubert(hbk)\npca.distances(pca, hbk, rankMM(hbk))\n\n\n"} {"package":"rrcov","topic":"pca.scoreplot","snippet":"### Name: pca.scoreplot\n### Title: Score plot for Principal Components (objects of class 'Pca')\n### Aliases: pca.scoreplot\n### Keywords: robust multivariate\n\n### ** Examples\n\nrequire(graphics)\n\n## PCA of the Hawkins Bradu Kass's Artificial Data\n## using all 4 variables\ndata(hbk)\npca <- PcaHubert(hbk)\npca\npca.scoreplot(pca)\n\n\n"} {"package":"rrcov","topic":"plot-methods","snippet":"### Name: plot-methods\n### Title: Methods for Function 'plot' in Package 'rrcov'\n### Aliases: plot-methods plot,CovClassic-method\n### plot,CovClassic,missing-method plot,CovRobust-method\n### plot,CovRobust,missing-method\n### Keywords: methods\n\n### ** Examples\n\ndata(hbk)\nhbk.x <- data.matrix(hbk[, 1:3])\ncv <- CovClassic(hbk.x)\nplot(cv)\nrcv <- CovMest(hbk.x)\nplot(rcv)\n\n\n"} {"package":"rrcov","topic":"pottery","snippet":"### Name: pottery\n### Title: Archaic Greek Pottery data\n### Aliases: pottery pottery.test\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(pottery)\nx <- pottery[,c(\"MG\", \"CA\")]\ngrp <- pottery$origin\n\n##\n## Compute robust location and covariance matrix and\n## plot the tolerance ellipses\nlibrary(rrcov)\n(mcd <- CovMcd(x))\ncol <- c(3,4)\ngcol <- ifelse(grp == \"Attic\", col[1], col[2])\ngpch <- ifelse(grp == \"Attic\", 16, 1)\nplot(mcd, which=\"tolEllipsePlot\", class=TRUE, col=gcol, pch=gpch)\n\n##\n## Perform classical LDA and plot the data, 0.975 tolerance ellipses\n## and LDA separation line\n##\nx <- pottery[,c(\"MG\", \"CA\")]\ngrp <- pottery$origin\nlda <- LdaClassic(x, grp)\nlda\ne1 <- getEllipse(loc=lda@center[1,], cov=lda@cov)\ne2 <- getEllipse(loc=lda@center[2,], cov=lda@cov)\n\nplot(CA~MG, data=pottery, col=gcol, pch=gpch,\n xlim=c(min(MG,e1[,1], e2[,1]), max(MG,e1[,1], e2[,1])),\n ylim=c(min(CA,e1[,2], e2[,2]), max(CA,e1[,2], e2[,2])))\n\nab <- lda@ldf[1,] - lda@ldf[2,]\ncc <- lda@ldfconst[1] - lda@ldfconst[2]\nabline(a=-cc/ab[2], b=-ab[1]/ab[2], col=2, lwd=2)\n\nlines(e1, type=\"l\", col=col[1])\nlines(e2, type=\"l\", col=col[2])\n\n##\n## Perform robust (MCD) LDA and plot data, classical and\n## robust separation line\n##\nplot(CA~MG, data=pottery, col=gcol, pch=gpch)\nlda <- LdaClassic(x, grp)\nab <- lda@ldf[1,] - lda@ldf[2,]\ncc <- lda@ldfconst[1] - lda@ldfconst[2]\nabline(a=-cc/ab[2], b=-ab[1]/ab[2], col=2, lwd=2)\nabline(a=-cc/ab[2], b=-ab[1]/ab[2], col=4, lwd=2)\n\nrlda <- Linda(x, grp, method=\"mcd\")\nrlda\nab <- rlda@ldf[1,] - rlda@ldf[2,]\ncc <- rlda@ldfconst[1] - rlda@ldfconst[2]\nabline(a=-cc/ab[2], b=-ab[1]/ab[2], col=2, lwd=2)\n\n\n\n"} {"package":"rrcov","topic":"salmon","snippet":"### Name: salmon\n### Title: Salmon data\n### Aliases: salmon\n### Keywords: datasets\n\n### ** Examples\n\ndata(salmon)\n\n\n"} {"package":"rrcov","topic":"scorePlot","snippet":"### Name: scorePlot-methods\n### Title: Score plot for Principal Components (objects of class 'Pca')\n### Aliases: scorePlot scorePlot-methods scorePlot,ANY-method\n### scorePlot,Pca-method\n### Keywords: multivariate hplot\n\n### ** Examples\n\nrequire(graphics)\n\n## PCA of the Hawkins Bradu Kass's Artificial Data\n## using all 4 variables\ndata(hbk)\npca <- PcaHubert(hbk)\npca\n\nscorePlot(pca)\n\n\n"} {"package":"rrcov","topic":"soil","snippet":"### Name: soil\n### Title: Exchangable cations in forest soil data set\n### Aliases: soil\n### Keywords: datasets\n\n### ** Examples\n\ndata(soil)\nsoil1983 <- soil[soil$D == 0, -2] # only 1983, remove column D (always 0)\n\n(cc <- Linda(F~., data=soil))\n(pr <- predict(cc))\npr@classification\n\n\n\n"} {"package":"rrcov","topic":"un86","snippet":"### Name: un86\n### Title: United Nations Data - 1986\n### Aliases: un86\n### Keywords: datasets\n\n### ** Examples\n\ndata(un86)\npairs(un86)\n\n\n"} {"package":"rrcov","topic":"wages","snippet":"### Name: wages\n### Title: Wages and Hours\n### Aliases: wages\n### Keywords: datasets\n\n### ** Examples\n\n data(wages)\n names(wages)\n x <- as.matrix(wages)\n ok <- is.finite(x %*% rep(1, ncol(x)))\n wages <- wages[ok, , drop = FALSE]\n wages.lm <- lm(HRS~AGE, data=wages)\n plot(HRS ~ AGE, data = wages)\n abline(wages.lm)\n class(wages.lm)\n names(wages.lm)\n summary(wages.lm)\n \n wages.mm <- lmrob(HRS~AGE, data=wages)\n plot(HRS ~ AGE, data = wages)\n abline(wages.mm)\n class(wages.mm)\n names(wages.mm)\n summary(wages.mm) \n\n\n"} {"package":"rrcov","topic":"wolves","snippet":"### Name: wolves\n### Title: Skull dimensions of the wolf _Canis lupus_ L.\n### Aliases: wolves\n### Keywords: datasets\n\n### ** Examples\n\n\n data(wolves)\n\n ## Remove the factors location and sex which we will not use for now\n x <- wolves[,-c(2:3)]\n\n ## Plot a pairwise scaterplot matrix\n pairs(x[,2:10])\n\n mcd <- CovMcd(x[, 2:10])\n plot(mcd, which=\"pairs\")\n\n lda <- LdaClassic(class~., data=x)\n lda@center\n lda@cov\n\n predict(lda)\n\n\n\n"} {"package":"quadraticSD","topic":"runApp","snippet":"### Name: runApp\n### Title: Visualizing the SD using a Quadratic Curve\n### Aliases: runApp\n\n### ** Examples\n\n## No test: \n data <- c(12,13,15,17,20,21,23)\n runApp(data)\n #end of example\n \n## End(No test) \n\n\n"} {"package":"uniReg","topic":"equiknots","snippet":"### Name: equiknots\n### Title: Determine the knot sequence.\n### Aliases: equiknots\n\n### ** Examples\n\nequiknots(0,5,3,3,TRUE)\nequiknots(0,5,3,3,FALSE)\n\n\n"} {"package":"uniReg","topic":"plot.unireg","snippet":"### Name: plot.unireg\n### Title: Plot method for 'unireg' objects.\n### Aliases: plot.unireg\n### Keywords: models regression nonparametric\n\n### ** Examples\n\nx <- sort(rep(0:5,20)) \nn <- length(x) \nset.seed(41333)\nfunc <- function(mu){rnorm(1,mu,0.05)}\ny <- sapply(dchisq(x,3),func)\n\n# fit with default settings\nfit <- unireg(x, y, g=5)\n# short overview of the fitted spline\nfit\n\n# plot of fitted spline with and without data\nplot(fit, col=\"orange\")\nplot(fit, onlySpline=TRUE)\n\n\n"} {"package":"uniReg","topic":"points.unireg","snippet":"### Name: points.unireg\n### Title: Points method for 'unireg' objects.\n### Aliases: points.unireg\n### Keywords: models regression nonparametric\n\n### ** Examples\n\nx <- sort(rep(0:5,20)) \nn <- length(x) \nset.seed(41333)\nfunc <- function(mu){rnorm(1,mu,0.05)}\ny <- sapply(dchisq(x,3),func)\n\n# plot of data\nplot(jitter(x), y, xlab=\"x (jittered)\")\n\n# fit with default settings\nfit <- unireg(x, y, g=5)\n# short overview of the fitted spline\nfit\n\n# plot of true and fitted functions\nplot(jitter(x), y, xlab=\"x (jittered)\")\ncurve(dchisq(x,3), 0, 5, type=\"l\", col=\"grey\", lwd=2, add=TRUE)\npoints(fit, lwd=2, col=\"orange\")\nlegend(\"bottomright\", legend = c(\"true mean function\", \n \"difference penalized unimodal fit\"),\n col=c(\"grey\",\"orange\"),lwd=c(2,2))\n\n\n"} {"package":"uniReg","topic":"predict.unireg","snippet":"### Name: predict.unireg\n### Title: Predict method for 'unireg' objects.\n### Aliases: predict.unireg\n### Keywords: models regression nonparametric\n\n### ** Examples\n\nx <- sort(rep(0:5,20)) \nn <- length(x) \nset.seed(41333)\nfunc <- function(mu){rnorm(1,mu,0.05)}\ny <- sapply(dchisq(x,3),func)\n\n# plot of data\nplot(jitter(x), y, xlab=\"x (jittered)\")\n\n# fit with default settings\nfit <- unireg(x, y, g=5)\n# short overview of the fitted spline\nfit\n\n# prediction at interim values\npredict(fit, c(1.5,2.5,3.5,4.5))\n\n\n"} {"package":"uniReg","topic":"print.unireg","snippet":"### Name: print.unireg\n### Title: Print method for 'unireg' objects.\n### Aliases: print.unireg\n### Keywords: models regression nonparametric\n\n### ** Examples\n\nx <- sort(rep(0:5,20)) \nn <- length(x) \nset.seed(41333)\nfunc <- function(mu){rnorm(1,mu,0.05)}\ny <- sapply(dchisq(x,3),func)\n\n# plot of data\nplot(jitter(x), y, xlab=\"x (jittered)\")\n\n# fit with default settings\nfit <- unireg(x, y, g=5)\n# short overview of the fitted spline\nfit\n\n\n"} {"package":"uniReg","topic":"unimat","snippet":"### Name: unimat\n### Title: Create the matrix of unimodality constraints.\n### Aliases: unimat\n\n### ** Examples\n\nunimat(4,2)\nunimat(5,3)\n\n\n"} {"package":"uniReg","topic":"unireg","snippet":"### Name: unireg\n### Title: Fitting a unimodal penalized spline regression.\n### Aliases: unireg\n### Keywords: models regression nonparametric\n\n### ** Examples\n\nx <- sort(rep(0:5,20)) \nn <- length(x) \nset.seed(41333)\nfunc <- function(mu){rnorm(1,mu,0.05)}\ny <- sapply(dchisq(x,3),func)\n\n# plot of data\nplot(jitter(x), y, xlab=\"x (jittered)\")\n\n# fit with default settings\nfit <- unireg(x, y, g=5)\n# short overview of the fitted spline\nfit\n\n# prediction at interim values\npredict(fit, c(1.5,2.5,3.5,4.5))\n\n# fit without penalty (we can use at most g=2 inner knots if k=3)\nfit2 <- unireg(x, y, penalty=\"none\", g=2)\n\n# plot of fitted spline with or without data\nplot(fit2)\nplot(fit2, onlySpline=TRUE)\n\n# fit without penalty and without constraint \n# (does not differ from fit2 with constraint in this case)\nfit3 <- unireg(x, y, penalty=\"none\", g=2, constr=\"none\")\n\n# plot of true and fitted functions\nplot(jitter(x), y, xlab=\"x (jittered)\")\ncurve(dchisq(x,3), 0, 5, type=\"l\", col=\"grey\", lwd=2, add=TRUE)\npoints(fit, lwd=2)\npoints(fit2, col=\"blue\", lwd=2)\npoints(fit3, col=\"red\", lwd=2)\nlegend(\"bottomright\", legend = c(\"true mean function\", \n \"difference penalized unimodal fit\", \n \"unpenalized fit (with and without constraint)\"),\n col=c(\"grey\",\"black\",\"red\"),lwd=c(2,2,2))\n\n# estimated variance\nfit$sigmasq\nfit2$sigmasq\n\n## Not run: \n##D # fit with isotonic, antitonic and inverse-unimodal constraint (just for completeness)\n##D fit4 <- unireg(x,y,constr=\"antitonic\",g=5)\n##D fit5 <- unireg(x,y,constr=\"isotonic\",g=5)\n##D fit6 <- unireg(x,y,constr=\"invuni\",g=5)\n##D \n##D points(fit4,col=\"orange\",lwd=2)\n##D points(fit5,col=\"brown\",lwd=2)\n##D points(fit6,col=\"yellow\",lwd=2)\n##D \n##D # suppose only aggregated data had been given\n##D means <- c(mean(y[1:20]), mean(y[21:40]), mean(y[41:60]), mean(y[61:80]), \n##D mean(y[81:100]), mean(y[101:120]))\n##D sigmasq <- c(sd(y[1:20]),sd(y[21:40]),sd(y[41:60]),sd(y[61:80]),sd(y[81:100]),sd(y[101:120]))^2\n##D \n##D # unimodal fit with differences penalty\n##D fit7 <- unireg(x=unique(x), y=means, g=5, w=NULL, sigmasq=sigmasq, abstol=NULL)\n##D plot(unique(x), means, pch=19, ylim=range(y))\n##D curve(dchisq(x,3), 0, 5, type=\"l\", col=\"grey\", lwd=2, add=TRUE)\n##D points(fit7, type=\"l\", col=\"green\", lwd=2)\n##D legend(\"bottomright\", legend = c(\"true mean function\", \"observed mean values\", \n##D \"diff. penalized unimodal fit for means\"),\n##D col=c(\"grey\",\"black\",\"green\"), lty=c(1,NA,1), lwd=c(2,0,2), pch=c(NA,19,NA))\n## End(Not run)\n\n\n"} {"package":"robustX","topic":"BACON","snippet":"### Name: BACON\n### Title: BACON for Regression or Multivariate Covariance Estimation\n### Aliases: BACON .lmBACON\n### Keywords: robust regression\n\n### ** Examples\n\ndata(starsCYG, package = \"robustbase\")\n## Plot simple data and fitted lines\nplot(starsCYG)\nlmST <- lm(log.light ~ log.Te, data = starsCYG)\nabline(lmST, col = \"gray\") # least squares line\nstr(B.ST <- with(starsCYG, BACON(x = log.Te, y = log.light)))\n## 'subset': A good set of of points (to determine regression):\ncolB <- adjustcolor(2, 1/2)\npoints(log.light ~ log.Te, data = starsCYG, subset = B.ST$subset,\n pch = 19, cex = 1.5, col = colB)\n## A BACON-derived line:\nlmB <- lm(log.light ~ log.Te, data = starsCYG, subset = B.ST$subset)\nabline(lmB, col = colB, lwd = 2)\n\nrequire(robustbase)\n(RlmST <- lmrob(log.light ~ log.Te, data = starsCYG))\nabline(RlmST, col = \"blue\")\n\n\n"} {"package":"robustX","topic":"L1median","snippet":"### Name: L1median\n### Title: Compute the Multivariate L1-Median aka 'Spatial Median'\n### Aliases: L1median optimMethods nlminbMethods\n### Keywords: robust multivariate\n\n### ** Examples\n\ndata(stackloss)\nL1median(stackloss)\nL1median(stackloss, method = \"HoCrJo\")\n\n## Explore all methods:\nm <- eval(formals(L1median)$method); allMeths <- m[m != \"Brent\"]\nL1m <- sapply(allMeths, function(meth) L1median(stackloss, method = meth))\n## --> with a warning for L-BFGS-B\nstr(L1m)\npm <- sapply(L1m, function(.) if(is.numeric(.)) . else .$par)\nt(pm) # SANN differs a bit; same objective ?\n\n\n"} {"package":"robustX","topic":"Qrot","snippet":"### Name: Qrot\n### Title: Rotation Matrix to Specific Direction\n### Aliases: Qrot\n### Keywords: array\n\n### ** Examples\n\nQ <- Qrot(6)\nzapsmall(crossprod(Q)) # 6 x 6 unity <==> Q'Q = I <==> Q orthogonal\n\nif(require(\"MASS\")) {\n Qt <- Qrot(6, transpose = TRUE)\n stopifnot(all.equal(Qt, t(Q)))\n fractions(Qt ^2) # --> 1/6 1/30 etc, in an almost lower-triagonal matrix\n}\n\n\n"} {"package":"robustX","topic":"covNNC","snippet":"### Name: covNNC\n### Title: Robust Covariance Estimation via Nearest Neighbor Cleaning\n### Aliases: covNNC cov.nnve\n### Keywords: multivariate robust\n\n### ** Examples\n\ndata(iris)\ncovNNC(iris[-5])\n\ndata(hbk, package=\"robustbase\")\nhbk.x <- data.matrix(hbk[, 1:3])\ncovNNC(hbk.x)\n\n\n"} {"package":"robustX","topic":"mvBACON","snippet":"### Name: mvBACON\n### Title: BACON: Blocked Adaptive Computationally-Efficient Outlier\n### Nominators\n### Aliases: mvBACON\n### Keywords: multivariate robust\n\n### ** Examples\n\n require(robustbase) # for example data and covMcd():\n ## simple 2D example :\n plot(starsCYG, main = \"starsCYG data (n=47)\")\n B.st <- mvBACON(starsCYG)\n points(starsCYG[ ! B.st$subset,], pch = 4, col = 2, cex = 1.5)\n stopifnot(identical(which(!B.st$subset), c(7L,11L,20L,30L,34L)))\n ## finds the 4 clear outliers (and 1 \"borderline\");\n ## it does not find obs. 14 which is an outlier according to covMcd(.)\n\n iniS <- setNames(, eval(formals(mvBACON)$init.sel)) # all initialization methods, incl \"random\"\n set.seed(123)\n Bs.st <- lapply(iniS[iniS != \"manual\"], function(s)\n mvBACON(as.matrix(starsCYG), init.sel = s, verbose=FALSE))\n ii <- - match(\"steps\", names(Bs.st[[1]]))\n Bs.s1 <- lapply(Bs.st, `[`, ii)\n stopifnot(exprs = {\n length(Bs.s1) >= 4\n length(unique(Bs.s1)) == 1 # all 4 methods give the same\n })\n\n ## Example where \"dUniMedian\" and \"V2\" differ :\n data(pulpfiber, package=\"robustbase\")\n dU.plp <- mvBACON(as.matrix(pulpfiber), init.sel = \"dUniMedian\")\n V2.plp <- mvBACON(as.matrix(pulpfiber), init.sel = \"V2\")\n (oU <- which(! dU.plp$subset))\n (o2 <- which(! V2.plp$subset))\n stopifnot(setdiff(o2, oU) %in% c(57L,58L,59L,62L))\n ## and 57, 58, 59, and 62 *are* outliers according to covMcd(.)\n\n ## 'coleman' from pkg 'robustbase'\n coleman.x <- data.matrix(coleman[, 1:6])\n Cc <- covMcd (coleman.x) # truly robust\n summary(Cc) # -> 6 outliers (1,3,10,12,17,18)\n Cb1 <- mvBACON(coleman.x) ##-> subset is all TRUE hmm??\n Cb2 <- mvBACON(coleman.x, init.sel = \"dUniMedian\")\n stopifnot(all.equal(Cb1, Cb2))\n ## try 20 different random starts:\n Cb.r <- lapply(1:20, function(i) { set.seed(i)\n mvBACON(coleman.x, init.sel=\"random\", verbose=FALSE) })\n nm <- names(Cb.r[[1]]); nm <- nm[nm != \"steps\"]\n all(eqC <- sapply(Cb.r[-1], function(CC) all.equal(CC[nm], Cb.r[[1]][nm]))) # TRUE\n ## --> BACON always breaks down, i.e., does not see the outliers here\n ## Don't show: \nstopifnot(Cb1$subset, Cb.r[[1]]$subset, eqC)\n## End(Don't show)\n ## breaks down even when manually starting with all the non-outliers:\n Cb.man <- mvBACON(coleman.x, init.sel = \"manual\",\n man.sel = setdiff(1:20, c(1,3,10,12,17,18)))\n which( ! Cb.man$subset) # the outliers according to mvBACON : _none_\n\n\n"} {"package":"robustX","topic":"rbwheel","snippet":"### Name: rbwheel\n### Title: Multivariate Barrow Wheel Distribution Random Vectors\n### Aliases: rbwheel\n### Keywords: distribution robust\n\n### ** Examples\n\nset.seed(17)\nrX8 <- rbwheel(1000,8, fullResult = TRUE, scaleAfter=FALSE)\nwith(rX8, stopifnot(all.equal(X, X0 %*% A, tol = 1e-15),\n all.equal(X0, X %*% t(A), tol = 1e-15)))\n##--> here, don't need to keep X0 (nor A, since that is Qrot(p))\n\n## for n = 100, you don't see \"it\", but may guess .. :\nn <- 100\npairs(r <- rbwheel(n,6))\nn1 <- attr(r,\"n1\") ; pairs(r, col=1+((1:n) > n1))\n\n## for n = 500, you *do* see it :\nn <- 500\npairs(r <- rbwheel(n,6))\n## show explicitly\nn1 <- attr(r,\"n1\") ; pairs(r, col=1+((1:n) > n1))\n\n## but increasing sig2 does help:\npairs(r <- rbwheel(n,6, sig2 = .2))\n\n## show explicitly\nn1 <- attr(r,\"n1\") ; pairs(r, col=1+((1:n) > n1))\n\nset.seed(12)\npairs(X <- rbwheel(n, 7, spherize=TRUE))\ncolSums(X) # already centered\n\nif(require(\"ICS\") && require(\"robustbase\")) {\n # ICS: Compare M-estimate [Max.Lik. of t_{df = 2}] with high-breakdown :\n stopifnot(require(\"MASS\"))\n X.paM <- ics(X, S1 = cov, S2 = function(.) cov.trob(., nu=2)$cov, stdKurt = FALSE)\n X.paM.<- ics(X, S1 = cov, S2 = function(.) tM(., df=2)$V, stdKurt = FALSE)\n X.paR <- ics(X, S1 = cov, S2 = function(.) covMcd(.)$cov, stdKurt = FALSE)\n plot(X.paM) # not at all clear\n plot(X.paM.)# ditto\n plot(X.paR)# very clear\n}\n## Similar such experiments ---> demo(rbwheel_d) and demo(rbwheel_ics)\n## -------------- -----------------\n\n\n"} {"package":"robustX","topic":"reclas","snippet":"### Name: reclas\n### Title: Recursive Robust Median-like Location and Scale\n### Aliases: reclas plot.reclas\n### Keywords: univar robust\n\n### ** Examples\n\nset.seed(42)\ny <- rt(10000, df = 1.5) # not quite Gaussian ...\nz1 <- reclas(y)\nz3 <- reclas(y, scon= 1 ) # correct fixed scale\nz4 <- reclas(y, scon= 100) # wrong fixed scale\nz2 <- reclas(y, # a more robust initial scale:\n scon = function(y0, m0) robustbase::Qn(y0 - m0),\n updateScale = TRUE) # still updated\n\n## Visualizing -- using the plot() method for \"reclas\":\nM <- median(y) ; yl <- c(-1,1)* 0.5\nOP <- par(mfrow=c(2,2), mar=.1+c(3,3,1,1), mgp=c(1.5, .6, 0))\n plot(z1, M=M, ylim=yl)\n plot(z2, M=M, ylim=yl)\n plot(z3, M=M, ylim=yl)\n plot(z4, M=M, ylim=yl)\npar(OP)\n\n\n"} {"package":"robustX","topic":"robustX-package","snippet":"### Name: robustX-package\n### Title: eXperimental eXtraneous ... Functionality for Robust Statistics\n### Aliases: robustX-package robustX\n### Keywords: package\n\n### ** Examples\n\npairs( rbwheel(100, 4) )\n\n\n"} {"package":"zoib","topic":"AlcoholUse","snippet":"### Name: AlcoholUse\n### Title: California County-level Teenager Monthly Alcohol Use data\n### Aliases: AlcoholUse\n### Keywords: datasets\n\n### ** Examples\n\n ## Not run: \n##D ##### eg3: modelling with clustered beta variables with inflation at 0\n##D library(zoib)\n##D data(\"AlcoholUse\", package = \"zoib\")\n##D eg3 <- zoib(Percentage ~ Grade*Gender+MedDays|1|Grade*Gender+MedDays|1,\n##D data = AlcoholUse, random = 1, EUID= AlcoholUse$County,\n##D zero.inflation = TRUE, one.inflation = FALSE, joint = FALSE, \n##D n.iter=5000, n.thin=20, n.burn=1000) \n##D sample1 <- eg3$coeff\n##D summary(sample1)\n##D \n##D # check convergence on the regression coefficients\n##D traceplot(sample1); \n##D autocorr.plot(sample1);\n##D check.psrf(sample1)\n##D \n##D # plot posterior mean of y vs. observed y to check on goodness of fit.\n##D ypred = rbind(eg3$ypred[[1]],eg3$ypred[[2]])\n##D post.mean= apply(ypred,2,mean); \n##D par(mfrow=c(1,1),mar=c(4,4,0.5,0.5))\n##D plot(AlcoholUse$Percentage, post.mean, xlim=c(0,0.4),ylim=c(0,0.4), \n##D col='blue', xlab='Observed y', ylab='Predicted y', main=\"\")\n##D abline(0,1,col='red')\n##D \n## End(Not run)\n\n\n"} {"package":"zoib","topic":"BiRepeated","snippet":"### Name: BiRepeated\n### Title: Data from a correlated bivariate beta distribution with repeated\n### measures\n### Aliases: BiRepeated\n### Keywords: datasets\n\n### ** Examples\n\n ## Not run: \n##D library(zoib)\n##D data(\"BiRepeated\", package = \"zoib\")\n##D eg2 <- zoib(y1|y2 ~ x|1|x, data= BiRepeated, random=1,n.response=2,\n##D EUID= BiRepeated$id, joint=TRUE,zero.inflation = FALSE,\n##D one.inflation = FALSE, prior.Sigma = \"VC.unif\", \t\t\t\n##D n.iter=7000,n.thin=25,n.burn=2000)\n##D coeff <- eg2$coeff\n##D summary(coeff)\n##D \n##D ### check convergence\n##D traceplot(coeff); \n##D autocorr.plot(coeff); \n##D check.psrf(coeff)\n##D \n##D ### plot posterior mean of y vs. observed y to check on goodness of fit.\n##D n= nrow(BiRepeated)\n##D ypred1 = rbind(eg2$ypred[[1]][,1:n],eg2$ypred[[2]][,1:n])\n##D ypred2 = rbind(eg2$ypred[[1]][,(n+1):(2*n)],eg2$ypred[[2]][,(n+1):(2*n)])\n##D post.mean1 = apply(ypred1,2,mean); \n##D post.mean2 = apply(ypred2,2,mean); \n##D \n##D plot(BiRepeated$y1, post.mean1, xlim=c(0,1),ylim=c(0,1), col='green2',\n##D pch=2,xlab='Observed y', ylab='Predicted y', main=\"\")\n##D points(BiRepeated$y2,post.mean2,col='purple')\n##D abline(0,1,col='red')\n##D legend(0.1,0.9,col=c('green2','purple'),c(\"y1\",\"y2\"),pch=c(2,1))\n##D \n## End(Not run)\n\n\n"} {"package":"zoib","topic":"GasolineYield","snippet":"### Name: GasolineYield\n### Title: Gasoline Yields Data\n### Aliases: GasolineYield\n### Keywords: datasets\n\n### ** Examples\n\n ## Not run: \n##D library(zoib)\n##D data(\"GasolineYield\", package = \"zoib\")\n##D \n##D #################################################\n##D \t# fixed effects zoib with \n##D # batch as a 10-level qualitative variable\n##D ################################################\n##D \n##D eg.fixed <- zoib(yield ~ temp + as.factor(batch)| 1, \n##D data=GasolineYield, joint = FALSE, random = 0, \n##D EUID = 1:nrow(d), zero.inflation = FALSE, \n##D one.inflation = FALSE, n.iter = 1100, n.thin = 5, \n##D n.burn=100)\n##D # yields 400 posterior draws (200 per chain) on the model parameters\n##D coeff <- eg.fixed$coef\n##D summary(coeff)\n##D \n##D ### check on convergence\n##D \ttraceplot(coeff)\n##D \tautocorr.plot(coeff)\n##D \tcheck.psrf(coeff)\n##D \t \n##D ### Design Matrix: Xb, Xd, Xb0, Xb1\n##D eg.fixed$Xb; eg.fixed$Xd; eg.fixed$Xb0; eg.fixed$Xb1 \n##D \n##D # plot posterior mean of y vs. observed y to check on goodness of fit.\n##D ypred = rbind(eg.fixed$ypred[[1]],eg.fixed$ypred[[2]])\n##D post.mean= apply(ypred,2,mean); \n##D plot(GasolineYield$yield, post.mean, col='blue',pch=2); \n##D abline(0,1,col='red')\n##D \n##D ######################################################\n##D # mixed effects zoib with batch as a random variable\n##D #####################################################\n##D eg.random <- zoib(yield ~ temp | 1 | 1, data=GasolineYield,\n##D joint = FALSE, random=1, EUID=GasolineYield$batch,\n##D zero.inflation = FALSE, one.inflation = FALSE,\n##D n.iter=3200, n.thin=15, n.burn=200)\n##D sample2 <- eg.random$coeff\n##D summary(sample2)\n##D \n##D # check convergence on the regression coefficients\n##D traceplot(sample2)\n##D autocorr.plot(sample2) \n##D check.psrf(sample2)\n##D \n##D # plot posterior mean of y vs. observed y to check on goodness of fit.\n##D ypred = rbind(eg.random$ypred[[1]],eg.random$ypred[[2]])\n##D post.mean= apply(ypred,2,mean); \n##D plot(GasolineYield$yield, post.mean, col='blue',pch=2); \n##D abline(0,1,col='red')\n##D \t\n## End(Not run)\n\n\n"} {"package":"zoib","topic":"check.psrf","snippet":"### Name: check.psrf\n### Title: Convergence Check for Markov Chain Monte Carlo simulations via\n### Potential Scale Reduction Factor\n### Aliases: check.psrf\n\n### ** Examples\n\n ## Not run: \n##D \tpost1= data.frame(cbind(rnorm(400,0,1), rbeta(400,2,3)))\n##D \tpost2= data.frame(cbind(rnorm(400,0,1), rbeta(400,2,3)))\n##D \tcheck.psrf(post1,post2)\n##D \t\n## End(Not run)\n\n\n"} {"package":"zoib","topic":"paraplot","snippet":"### Name: paraplot\n### Title: visual display of the posterior inferences of the parameters\n### from a zoib model\n### Aliases: paraplot\n\n### ** Examples\n\n ## Not run: \n##D set.seed(12) \n##D x=rnorm(4); para1 = cbind(x, x-1,x+1); rownames(para1) = c(\"a\",\"b\",\"c\",\"d\")\n##D x=rnorm(4)+1; para2 = cbind(x, x-1,x+1); rownames(para2) = c(\"a\",\"b\",\"e\",\"f\")\n##D x=rnorm(3)+2; para3 = cbind(x, x-1,x+1); rownames(para3) = c(\"a\",\"b\",\"d\")\n##D paraplot(para1, para2, para3, para4=NULL, legpos=c(-1.5,6),\n##D legtext=c(\"model 1\",\"model 2\",\"model 3\"),annotate=TRUE)\n##D paraplot(para1, legpos=c(-2,3), legtext=\"m1\", annotate=TRUE)\n##D \t\n## End(Not run)\n\n\n"} {"package":"zoib","topic":"pred.zoib","snippet":"### Name: pred.zoib\n### Title: posterior predictive samples of Y for given new X\n### Aliases: pred.zoib\n\n### ** Examples\n\n## Not run: \n##D data(\"GasolineYield\")\n##D eg1 <- zoib(yield ~ temp + as.factor(batch)| 1, data=GasolineYield,\n##D joint = FALSE, random = 0, EUID = 1:nrow(d),\n##D zero.inflation = FALSE, one.inflation = FALSE,\n##D n.iter = 1600, n.thin = 2, n.burn=100, seeds=c(1,2),n.chain=2)\n##D xnew <- data.frame(temp = c(205, 218), batch = factor(c(1, 2), levels = 1:10))\n##D ypred <- pred.zoib(eg1, xnew)\n##D \n##D data(\"BiRepeated\")\n##D eg2 <- zoib(y1|y2 ~ x|1|x, data= BiRepeated, n.response=2,\n##D random=1, EUID= BiRepeated$id,\n##D zero.inflation = FALSE, one.inflation = FALSE,\t\t\t\t\n##D prior.Sigma = \"VC.unif\", n.iter=2100, n.thin=10, n.burn=100)\n##D xnew<- data.frame(x=BiRepeated[1:6,4])\n##D pred.zoib(eg2,xnew)\n##D \t\n## End(Not run)\n\n\n"} {"package":"zoib","topic":"zoib","snippet":"### Name: zoib\n### Title: Bayesian Inference for Zero/One Inflated Beta Regression\n### Aliases: zoib\n\n### ** Examples\n\n ## Not run: \n##D #refer to data sets GasolineYield, BiRepeated, and AlcoholUse in package zoib\n##D #for examples on fixed effect models, mixed effects models, joint modeling \n##D #of bivariate beta variables with repeated measures, and modelling clustered \n##D #beta variables with inflation at 0 using zoib\n##D \n## End(Not run)\n\n\n"} {"package":"ClimClass","topic":"ExAtRa","snippet":"### Name: ExAtRa\n### Title: Extra-Atmospheric Radiation\n### Aliases: ExAtRa\n\n### ** Examples\n\n\ndata(Trent_climate)\n# creates a vector with middle days for every month in a year\nquinci <- paste(15,\"/\",1:12,\"/\",2014,sep=\"\")\nposixlt <- strptime(quinci, format=\"%d/%m/%Y\")\nyDay <- posixlt$yday+1 # field yday starts from 0\nlatitude<-46 \n\n# generates 12 values, one for each month\ncoeff_rad<- ExAtRa(DOY=yDay,latitude=latitude, unit=\"mm\")\n\n\n\n"} {"package":"ClimClass","topic":"RDI","snippet":"### Name: RDI\n### Title: Riou's drought index\n### Aliases: RDI\n\n### ** Examples\n\ndata(Trent_climate)\nRDI(lista_cli[[1]], clim_norm=clima_81_10[[1]], first.yr=1981, last.yr=2010, coeff_rad=coeff_rad)\n\n\n\n"} {"package":"ClimClass","topic":"arid","snippet":"### Name: arid\n### Title: Aridity indices\n### Aliases: arid\n\n### ** Examples\n\n\ndata(Trent_climate)\n# clima_81_10 is a list of data frames having climatic means of temperature and precipitation \n# as required by the aridity indices algorithms, each one referring to one station. \n# It can be the output of function climate.\n# coeff_rad is a monthly vector of average daily extra-atmospheric solar radiation, \n# calculated e.g. by function ExAtRa.\n\n\n\n"} {"package":"ClimClass","topic":"as.datcli","snippet":"### Name: as.datcli\n### Title: as.datcli\n### Aliases: as.datcli\n\n### ** Examples\n\n\n### Not Run!! \n# Install 'climatol' from 'http://www.climatol.eu/' first\n### Then load the package, uncomment and run the following line\n# library(climatol)\n\tlibrary(stringr)\n data(Trent_climate)\n\n TrentinoClimateDf <- do.call(rbind,clima_81_10)\n names <- rownames(TrentinoClimateDf)\n TrentinoClimateDf$station <- \n unlist(lapply(X=str_split(names,pattern=\"[.]\"),FUN=function(x) {x[1]}))\n \n\n station <- \"T0129\"\ndatcli <- as.datcli(TrentinoClimateDf,station=station)\n\n### Not Run!! \n# Install 'climatol' from 'http://www.climatol.eu/' first\n### Then load the package, uncomment and run the following line\n# diagwl(datcli,est=station,alt=100,per=\"Period\",mlab=\"en\") ## plots a Walter-Lieth's climograph\n\n\n\n"} {"package":"ClimClass","topic":"bagn_gau","snippet":"### Name: bagn_gau\n### Title: Bagnouls - Gaussen graphs\n### Aliases: bagn_gau\n\n### ** Examples\n\n\ndata(Trent_climate)\n# clima_81_10 can be generated from monthly time series by function \"climate\".\npar(ask=TRUE)\nfor(sta in 1:length(clima_81_10)) {\n bagn_gau(clim_norm_sta= clima_81_10 [[sta]], \n main_title=paste(names(clima_81_10[sta]), \" 1981-2010\")\n\t, bar_width=40)\n}\n\n\n\n\n"} {"package":"ClimClass","topic":"climate","snippet":"### Name: climate\n### Title: Climate normals\n### Aliases: climate\n\n### ** Examples\n\n\ndata(Trent_climate)\n\n# clima_81_10 is a list of data frames of the type series, \n# each one referring to one station \n# having climatic means of temperature and precipitation \n\nclima_81_10<-lapply(lista_cli, FUN=climate, first.yr=1981, last.yr=2010, max.perc.missing=15)\n\n\n\n"} {"package":"ClimClass","topic":"contin","snippet":"### Name: contin\n### Title: Continentality indices\n### Aliases: contin\n\n### ** Examples\n\n\ndata(Trent_climate)\n\n\n# clima_81_10 is a list of data frames having climatic means of temperature and precipitation as \n# required by the aridity indices algorithms, each one referring to one station. \n# It can be the output of function climate.\n\n# creates a data frame with all the continentality indices for all stations in clima_81_10\n\nlatit<-coord_elev$North\nelev<-coord_elev$Elevation\n\ncontin_I<-NULL\nfor(i in 1:length(clima_81_10)) {\n contin_I[[i]]<-contin(clima_81_10[[i]], \n latitude=latit[i], \n elevation=elev[i], \n Michalet_correction=TRUE)\n}\nnames(contin_I)<-names(clima_81_10)\n\n\n\n"} {"package":"ClimClass","topic":"koeppen_geiger","snippet":"### Name: koeppen_geiger\n### Title: Koeppen - Geiger's climate classification\n### Aliases: koeppen_geiger\n\n### ** Examples\n\ndata(Trent_climate)\n# clima_81_10 is a list of data frames having climatic means of temperature and precipitation as \n# required by Koeppen - Geiger classification, each one referring to one station. \n# It can be the output of function climate.\nclass_clim_l<-lapply(clima_81_10, FUN=koeppen_geiger, A_B_C_special_sub.classes=TRUE)\n\n\n\n"} {"package":"ClimClass","topic":"oiv_ind","snippet":"### Name: oiv_ind\n### Title: OIV bioclimatic indices for viticulture\n### Aliases: oiv_ind\n\n### ** Examples\n\ndata(Trent_climate)\noiv_ind(daily_Tn=Tn,daily_Tx=Tx, daily_P=P, first.yr=1981, last.yr=2010, subs_missing=FALSE)\n\n\n\n"} {"package":"ClimClass","topic":"peguy","snippet":"### Name: peguy\n### Title: Peguy Climograph\n### Aliases: peguy\n\n### ** Examples\n\n\nlibrary(stringr)\ndata(Trent_climate)\n\n\nTrentinoClimateDf <- do.call(rbind,clima_81_10)\nnames <- rownames(TrentinoClimateDf)\nTrentinoClimateDf$station <- unlist(lapply(X=str_split(names,pattern=\"[.]\"),FUN=function(x) {x[1]}))\n \n\n\ndata <- TrentinoClimateDf[TrentinoClimateDf$station %in% unique(TrentinoClimateDf$station)[1:3],]\np <- peguy(data=data)\n\n\n\n\n"} {"package":"ClimClass","topic":"plot.thornthwaite","snippet":"### Name: plot.thornthwaite\n### Title: Thornthwaite - Mather's quantile plot\n### Aliases: plot.thornthwaite\n\n### ** Examples\n\n\ndata(Trent_climate)\n\n\n# quantiles is the list (\"thornthwaite\" S3 object)of quantile tables generated \n# by function thornthwaite; \n# it is the second element of the output list, \n# which can be split into two separate lists (see function thornthwaite)\nsta <- 1 # 1st station in the list of quantile tables\nq_list=quantiles[[sta]]\nclass(q_list) <- \"thornthwaite\" ## q_list is coerced to a \"thornthwaite\" S3 object\nplot(q_list, \nst_name=names(quantiles)[sta], variables=c(\"Precipitation\", \"Et0\"), \nleg_pos = \"topleft\", col=c(1:6,1), pch=c(1:6,16), \nlty=1, horiz=TRUE, y.intersp=0.1)\n\n\n\n"} {"package":"ClimClass","topic":"thornthwaite","snippet":"### Name: thornthwaite\n### Title: Thornthwaite and Mather's water balance\n### Aliases: thornthwaite\n\n### ** Examples\n\n\ndata(Trent_climate)\n\n\n# lista_cli is a list of data frames of the type \"series\", \n# each one referring to one station - see function \"climate\".\n# clima_81_10 is a list of data frames having climatic means \n# of temperature and precipitation, each one referring to one station. \n# It can be the output of function \"climate\".\nlibrary(geosphere) # required for function daylength\nthornt_lst<-NULL\nlista_cli <- lista_cli[1:3] ## lista_cli is reduced to diminish elapsed time of execution!\nfor(k in 1 : length(lista_cli[1:3])) {\n thornt_lst[[k]]<-thornthwaite(series=lista_cli[[k]], \n clim_norm=clima_81_10[[k]],\n latitude = 46, first.yr=1981, \n last.yr=2010, snow_melt_coeff=c(0.5,0.5 ) )\n}\nnames(thornt_lst)<-names(lista_cli)\n \n# splits list into two lists\nW_balance<-NULL; quantiles<-NULL\nfor(k in 1 : length(lista_cli))\n{\n W_balance[[k]]<-thornt_lst[[k]]$W_balance\n quantiles[[k]]<-thornt_lst[[k]]$quantiles\n }\n names(W_balance)<-names(thornt_lst); names(quantiles)<-names(thornt_lst)\n \n\n\n"} {"package":"mlrintermbo","topic":"OptimizerInterMBO","snippet":"### Name: OptimizerInterMBO\n### Title: Tuner and Optimizer using mlrMBO\n### Aliases: OptimizerInterMBO mlr_optimizers_intermbo mlr_tuners_intermbo\n### TunerInterMBO\n\n### ** Examples\n\nlibrary(\"paradox\")\nlibrary(\"bbotk\")\n\n# silly example function: minimize x^2 for -1 < x < 1\ndomain <- ParamSet$new(list(ParamDbl$new(\"x\", lower = -1, upper = 1)))\ncodomain <- ParamSet$new(list(ParamDbl$new(\"y\", tags = \"minimize\")))\nobjective <- ObjectiveRFun$new(function(xs) list(y = xs$x^2), domain, codomain)\n\n# initialize instance\ninstance <- OptimInstanceSingleCrit$new(objective, domain, trm(\"evals\", n_evals = 6))\n\n# use intermbo optimizer\noptser <- opt(\"intermbo\")\n\n# optimizer has hyperparameters from mlrMBO\noptser$param_set$values$final.method <- \"best.predicted\"\n\n# optimization happens here.\noptser$optimize(instance)\n\ninstance$result\n\n\n"} {"package":"mlrintermbo","topic":"makeMlr3Surrogate","snippet":"### Name: makeMlr3Surrogate\n### Title: Create Surrogate Learner\n### Aliases: makeMlr3Surrogate\n\n### ** Examples\n\n# DiceKriging Learner:\nmakeMlr3Surrogate()\n\n# mlr3pipelines Graph: imputation %>>% 'ranger' (randomForest):\nmakeMlr3Surrogate(is.numeric = FALSE)\n\n# just the 'ranger' Learner:\nmakeMlr3Surrogate(is.numeric = FALSE, has.dependencies = FALSE)\n\n\n\n"} {"package":"superpc","topic":"superpc.cv","snippet":"### Name: superpc.cv\n### Title: Cross-validation for supervised principal components\n### Aliases: superpc.cv\n### Keywords: regression survival\n\n### ** Examples\n\n## Not run: \n##D set.seed(332)\n##D \n##D #generate some data\n##D x <- matrix(rnorm(50*30), ncol=30)\n##D y <- 10 + svd(x[1:50,])$v[,1] + .1*rnorm(30)\n##D censoring.status <- sample(c(rep(1,20), rep(0,10)))\n##D \n##D featurenames <- paste(\"feature\", as.character(1:50), sep=\"\")\n##D data <- list(x=x, \n##D y=y, \n##D censoring.status=censoring.status, \n##D featurenames=featurenames)\n##D \n##D a <- superpc.train(data, type=\"survival\")\n##D aa <- superpc.cv(a, data)\n## End(Not run)\n\n\n"} {"package":"superpc","topic":"superpc.decorrelate","snippet":"### Name: superpc.decorrelate\n### Title: Decorrelate features with respect to competing predictors\n### Aliases: superpc.decorrelate\n### Keywords: regression survival\n\n### ** Examples\n\nset.seed(332)\n\n#generate some data\nx <- matrix(rnorm(50*30), ncol=30)\ny <- 10 + svd(x[1:50,])$v[,1] + .1*rnorm(30)\ncensoring.status <- sample(c(rep(1,20), rep(0,10)))\n\nfeaturenames <- paste(\"feature\", as.character(1:50), sep=\"\")\ncompeting.predictors <- list(pred1=rnorm(30), \n pred2=as.factor(sample(c(1,2), \n replace=TRUE, \n size=30)))\n\n#decorrelate x. Remember to decorrelate test data in the same way, before making predictions.\nfoo <- superpc.decorrelate(x, competing.predictors)\nxnew <- t(foo$res)\n\n#now use xnew in superpc\ndata <- list(x=xnew, \n y=y, \n censoring.status=censoring.status, \n featurenames=featurenames)\na <- superpc.train(data, type=\"survival\")\n\n#etc. \n\n\n"} {"package":"superpc","topic":"superpc.fit.to.outcome","snippet":"### Name: superpc.fit.to.outcome\n### Title: Fit predictive model using outcome of supervised principal\n### components\n### Aliases: superpc.fit.to.outcome\n### Keywords: regression survival\n\n### ** Examples\n\nset.seed(332)\n\n#generate some data\nx <- matrix(rnorm(50*30), ncol=30)\ny <- 10 + svd(x[1:50,])$v[,1] + .1*rnorm(30)\nytest <- 10 + svd(x[1:50,])$v[,1] + .1*rnorm(30)\ncensoring.status <- sample(c(rep(1,20), rep(0,10)))\ncensoring.status.test <- sample(c(rep(1,20), rep(0,10)))\n\nfeaturenames <- paste(\"feature\", as.character(1:50), sep=\"\")\ndata <- list(x=x, \n y=y, \n censoring.status=censoring.status, \n featurenames=featurenames)\ndata.test <- list(x=x, \n y=ytest, \n censoring.status=censoring.status.test, \n featurenames=featurenames)\n\na <- superpc.train(data, type=\"survival\")\nfit <- superpc.predict(a, \n data, \n data.test, \n threshold=1.0, \n n.components=1, \n prediction.type=\"continuous\")\nsuperpc.fit.to.outcome(a, \n data, \n fit$v.pred)\n\n\n"} {"package":"superpc","topic":"superpc.listfeatures","snippet":"### Name: superpc.listfeatures\n### Title: Return a list of the important predictors\n### Aliases: superpc.listfeatures\n### Keywords: regression survival\n\n### ** Examples\n\nset.seed(332)\n\n#generate some data\nx <- matrix(rnorm(50*30), ncol=30)\ny <- 10 + svd(x[1:50,])$v[,1] + .1*rnorm(30)\nytest <- 10 + svd(x[1:50,])$v[,1] + .1*rnorm(30)\ncensoring.status <- sample(c(rep(1,20), rep(0,10)))\ncensoring.status.test <- sample(c(rep(1,20), rep(0,10)))\n\nfeaturenames <- paste(\"feature\", as.character(1:50), sep=\"\")\ndata <- list(x=x, \n y=y, \n censoring.status=censoring.status, \n featurenames=featurenames)\ndata.test <- list(x=x, \n y=ytest, \n censoring.status=censoring.status.test, \n featurenames=featurenames)\n\na <- superpc.train(data, type=\"survival\")\nfit.red <- superpc.predict.red(a, \n data, \n data.test, \n .6)\nsuperpc.listfeatures(data, \n a, \n fit.red, \n num.features=10)\n\n\n"} {"package":"superpc","topic":"superpc.lrtest.curv","snippet":"### Name: superpc.lrtest.curv\n### Title: Compute values of likelihood ratio test from supervised\n### principal components fit\n### Aliases: superpc.lrtest.curv\n### Keywords: regression survival\n\n### ** Examples\n\nset.seed(332)\n\n#generate some data\nx <- matrix(rnorm(50*30), ncol=30)\ny <- 10 + svd(x[1:50,])$v[,1] + .1*rnorm(30)\nytest <- 10 + svd(x[1:50,])$v[,1] + .1*rnorm(30)\ncensoring.status <- sample(c(rep(1,20), rep(0,10)))\ncensoring.status.test <- sample(c(rep(1,20), rep(0,10)))\n\nfeaturenames <- paste(\"feature\", as.character(1:50), sep=\"\")\ndata <- list(x=x, \n y=y, \n censoring.status=censoring.status, \n featurenames=featurenames)\ndata.test <- list(x=x, \n y=ytest, \n censoring.status=censoring.status.test, \n featurenames=featurenames)\n\na <- superpc.train(data, type=\"survival\")\naa <- superpc.lrtest.curv(a, data, data.test)\n#superpc.plot.lrtest(aa)\n\n\n"} {"package":"superpc","topic":"superpc.plot.lrtest","snippet":"### Name: superpc.plot.lrtest\n### Title: Plot likelhiood ratio test statistics\n### Aliases: superpc.plot.lrtest\n### Keywords: regression survival\n\n### ** Examples\n\nset.seed(332)\n\n#generate some data\nx <- matrix(rnorm(50*30), ncol=30)\ny <- 10 + svd(x[1:50,])$v[,1] + .1*rnorm(30)\nytest <- 10 + svd(x[1:50,])$v[,1] + .1*rnorm(30)\ncensoring.status <- sample(c(rep(1,20), rep(0,10)))\ncensoring.status.test <- sample(c(rep(1,20), rep(0,10)))\n\nfeaturenames <- paste(\"feature\", as.character(1:50), sep=\"\")\ndata <- list(x=x, \n y=y, \n censoring.status=censoring.status, \n featurenames=featurenames)\ndata.test <- list(x=x, \n y=ytest, \n censoring.status=censoring.status.test, \n featurenames=featurenames)\n\na <- superpc.train(data, type=\"survival\")\nbb <- superpc.lrtest.curv(a, \n data, \n data.test)\nsuperpc.plot.lrtest(bb)\n\n\n"} {"package":"superpc","topic":"superpc.plotcv","snippet":"### Name: superpc.plotcv\n### Title: Plot output from superpc.cv\n### Aliases: superpc.plotcv\n### Keywords: regression survival\n\n### ** Examples\n\n## Not run: \n##D set.seed(332)\n##D \n##D #generate some data\n##D x <- matrix(rnorm(50*30), ncol=30)\n##D y <- 10 + svd(x[1:50,])$v[,1] + .1*rnorm(30)\n##D censoring.status <- sample(c(rep(1,20), rep(0,10)))\n##D \n##D featurenames <- paste(\"feature\", as.character(1:50), sep=\"\")\n##D data <- list(x=x, \n##D y=y, \n##D censoring.status=censoring.status, \n##D featurenames=featurenames)\n##D \n##D a <- superpc.train(data, type=\"survival\")\n##D aa <- superpc.cv(a,data)\n##D \n##D superpc.plotcv(aa)\n## End(Not run)\n\n\n"} {"package":"superpc","topic":"superpc.plotred.lrtest","snippet":"### Name: superpc.plotred.lrtest\n### Title: Plot likelihood ratio test statistics from supervised principal\n### components predictor\n### Aliases: superpc.plotred.lrtest\n### Keywords: regression survival\n\n### ** Examples\n\n## Not run: \n##D set.seed(332)\n##D \n##D #generate some data\n##D x <- matrix(rnorm(50*30), ncol=30)\n##D y <- 10 + svd(x[1:50,])$v[,1] + .1*rnorm(30)\n##D ytest <- 10 + svd(x[1:50,])$v[,1] + .1*rnorm(30)\n##D censoring.status <- sample(c(rep(1,20), rep(0,10)))\n##D censoring.status.test <- sample(c(rep(1,20), rep(0,10)))\n##D \n##D featurenames <- paste(\"feature\", as.character(1:50), sep=\"\")\n##D data <- list(x=x, \n##D y=y, \n##D censoring.status=censoring.status, \n##D featurenames=featurenames)\n##D data.test <- list(x=x, \n##D y=ytest, \n##D censoring.status=censoring.status.test, \n##D featurenames=featurenames)\n##D \n##D a <- superpc.train(data, type=\"survival\")\n##D aa <- superpc.cv(a, data)\n##D fit.red <- superpc.predict.red(a, \n##D data, \n##D data.test, \n##D .6)\n##D fit.redcv <- superpc.predict.red.cv(fit.red, \n##D aa, \n##D data, \n##D .6)\n##D superpc.plotred.lrtest(fit.redcv)\n## End(Not run)\n\n\n"} {"package":"superpc","topic":"superpc.predict","snippet":"### Name: superpc.predict\n### Title: Form principal components predictor from a trained superpc\n### object\n### Aliases: superpc.predict\n### Keywords: regression survival\n\n### ** Examples\n\nset.seed(332)\n\n#generate some data\nx <- matrix(rnorm(50*30), ncol=30)\ny <- 10 + svd(x[1:50,])$v[,1] + .1*rnorm(30)\nytest <- 10 + svd(x[1:50,])$v[,1] + .1*rnorm(30)\ncensoring.status <- sample(c(rep(1,20), rep(0,10)))\ncensoring.status.test <- sample(c(rep(1,20), rep(0,10)))\n\nfeaturenames <- paste(\"feature\", as.character(1:50), sep=\"\")\ndata <- list(x=x, \n y=y, \n censoring.status=censoring.status, \n featurenames=featurenames)\ndata.test <- list(x=x, \n y=ytest, \n censoring.status=censoring.status.test, \n featurenames=featurenames)\n\na <- superpc.train(data, type=\"survival\")\nfit <- superpc.predict(a, \n data, \n data.test, \n threshold=1.0, \n n.components=1)\nplot(fit$v.pred, ytest)\n\n\n"} {"package":"superpc","topic":"superpc.predict.red","snippet":"### Name: superpc.predict.red\n### Title: Feature selection for supervised principal components\n### Aliases: superpc.predict.red\n### Keywords: regression survival\n\n### ** Examples\n\nset.seed(332)\n\n#generate some data\nx <- matrix(rnorm(50*30), ncol=30)\ny <- 10 + svd(x[1:50,])$v[,1] + .1*rnorm(30)\nytest <- 10 + svd(x[1:50,])$v[,1] + .1*rnorm(30)\ncensoring.status <- sample(c(rep(1,20), rep(0,10)))\ncensoring.status.test <- sample(c(rep(1,20), rep(0,10)))\n\nfeaturenames <- paste(\"feature\", as.character(1:50), sep=\"\")\ndata <- list(x=x,\n y=y, \n censoring.status=censoring.status, \n featurenames=featurenames)\ndata.test <- list(x=x, \n y=ytest, \n censoring.status=censoring.status.test, \n featurenames=featurenames)\n\na <- superpc.train(data, type=\"survival\")\nfit.red <- superpc.predict.red(a,\n data, \n data.test, \n threshold=.6)\nsuperpc.plotred.lrtest(fit.red)\n\n\n"} {"package":"superpc","topic":"superpc.predict.red.cv","snippet":"### Name: superpc.predict.red.cv\n### Title: Cross-validation of feature selection for supervised principal\n### components\n### Aliases: superpc.predict.red.cv\n### Keywords: regression survival\n\n### ** Examples\n\n## Not run: \n##D set.seed(332)\n##D \n##D #generate some data\n##D x <- matrix(rnorm(50*20), ncol=20)\n##D y <- 10 + svd(x[1:10,])$v[,1] + .1*rnorm(20)\n##D ytest <- 10 + svd(x[1:10,])$v[,1] + .1*rnorm(20)\n##D censoring.status <- sample(c(rep(1,15), rep(0,5)))\n##D censoring.status.test <- sample(c(rep(1,15), rep(0,5)))\n##D \n##D featurenames <- paste(\"feature\", as.character(1:50), sep=\"\")\n##D data <- list(x=x, \n##D y=y, \n##D censoring.status=censoring.status, \n##D featurenames=featurenames)\n##D data.test <- list(x=x,\n##D y=ytest, \n##D censoring.status=censoring.status.test, \n##D featurenames=featurenames)\n##D \n##D a <- superpc.train(data, type=\"survival\")\n##D aa <- superpc.cv(a, data)\n##D fit.red <- superpc.predict.red(a,\n##D data, \n##D data.test, \n##D threshold=.6)\n##D fit.redcv <- superpc.predict.red.cv(fit.red, \n##D aa, \n##D data, \n##D threshold=.6)\n## End(Not run)\n\n\n"} {"package":"superpc","topic":"superpc.predictionplot","snippet":"### Name: superpc.predictionplot\n### Title: Plot outcome predictions from superpc\n### Aliases: superpc.predictionplot\n### Keywords: regression survival\n\n### ** Examples\n\nset.seed(332)\n\n#generate some data\nx <- matrix(rnorm(50*30), ncol=30)\ny <- 10 + svd(x[1:50,])$v[,1] + .1*rnorm(30)\nytest <- 10 + svd(x[1:50,])$v[,1] + .1*rnorm(30)\ncensoring.status <- sample(c(rep(1,20), rep(0,10)))\ncensoring.status.test <- sample(c(rep(1,20), rep(0,10)))\n\nfeaturenames <- paste(\"feature\", as.character(1:50), sep=\"\")\ndata <- list(x=x, \n y=y, \n censoring.status=censoring.status, \n featurenames=featurenames)\ndata.test <- list(x=x, \n y=ytest, \n censoring.status=censoring.status.test, \n featurenames=featurenames)\n\na <- superpc.train(data, type=\"survival\")\nsuperpc.predictionplot(a, \n data, \n data.test, \n threshold=1)\n\n\n"} {"package":"superpc","topic":"superpc.rainbowplot","snippet":"### Name: superpc.rainbowplot\n### Title: Make rainbow plot of superpc and compeiting predictors\n### Aliases: superpc.rainbowplot\n### Keywords: regression survival\n\n### ** Examples\n\nset.seed(332)\n\n#generate some data\nx <- matrix(rnorm(50*30), ncol=30)\ny <- 10 + svd(x[1:50,])$v[,1] + .1*rnorm(30)\nytest <- 10 + svd(x[1:50,])$v[,1] + .1*rnorm(30)\ncensoring.status <- sample(c(rep(1,20), rep(0,10)))\ncensoring.status.test <- sample(c(rep(1,20), rep(0,10)))\n\nfeaturenames <- paste(\"feature\", as.character(1:50), sep=\"\")\ncompeting.predictors.test <- list(pred1=rnorm(30), \n pred2=as.factor(sample(c(1,2),\n replace=TRUE,\n size=30)))\n \ndata <- list(x=x, \n y=y, \n censoring.status=censoring.status, \n featurenames=featurenames)\ndata.test <- list(x=x, \n y=ytest, \n censoring.status=censoring.status.test, \n featurenames=featurenames)\nsample.labels <- paste(\"te\", as.character(1:20), sep=\"\")\n\na <- superpc.train(data, type=\"survival\")\npred <- superpc.predict(a, \n data, \n data.test, \n threshold=.25, \n n.components=1)$v.pred\nsuperpc.rainbowplot(data, \n pred, \n sample.labels, \n competing.predictors=competing.predictors.test)\n\n\n"} {"package":"superpc","topic":"superpc.train","snippet":"### Name: superpc.train\n### Title: Prediction by supervised principal components\n### Aliases: superpc.train\n### Keywords: regression survival\n\n### ** Examples\n\nset.seed(332)\n\n#generate some data\nx <- matrix(rnorm(50*30), ncol=30)\ny <- 10 + svd(x[1:50,])$v[,1] + .1*rnorm(30)\ncensoring.status <- sample(c(rep(1,20), rep(0,10)))\n\nfeaturenames <- paste(\"feature\", as.character(1:50), sep=\"\")\ndata <- list(x=x, \n y=y, \n censoring.status=censoring.status, \n featurenames=featurenames)\n\na <- superpc.train(data, type=\"survival\")\n\n\n"} {"package":"spatsoc","topic":"DT","snippet":"### Name: DT\n### Title: Movement of 10 \"Newfoundland Bog Cows\"\n### Aliases: DT\n\n### ** Examples\n\n# Load data.table\nlibrary(data.table)\n## Don't show: \ndata.table::setDTthreads(1)\n## End(Don't show)\n\n# Read example data\nDT <- fread(system.file(\"extdata\", \"DT.csv\", package = \"spatsoc\"))\n\n\n"} {"package":"spatsoc","topic":"build_lines","snippet":"### Name: build_lines\n### Title: Build Lines\n### Aliases: build_lines\n\n### ** Examples\n\n# Load data.table\nlibrary(data.table)\n## Don't show: \ndata.table::setDTthreads(1)\n## End(Don't show)\n\n# Read example data\nDT <- fread(system.file(\"extdata\", \"DT.csv\", package = \"spatsoc\"))\n\n# Cast the character column to POSIXct\nDT[, datetime := as.POSIXct(datetime, tz = 'UTC')]\n\n# EPSG code for example data\nutm <- 32736\n\n# Build lines for each individual\nlines <- build_lines(DT, projection = utm, id = 'ID', coords = c('X', 'Y'),\n sortBy = 'datetime')\n\n# Build lines for each individual by year\nDT[, yr := year(datetime)]\nlines <- build_lines(DT, projection = utm, id = 'ID', coords = c('X', 'Y'),\n sortBy = 'datetime', splitBy = 'yr')\n\n\n"} {"package":"spatsoc","topic":"build_polys","snippet":"### Name: build_polys\n### Title: Build Polygons\n### Aliases: build_polys\n\n### ** Examples\n\n# Load data.table\nlibrary(data.table)\n## Don't show: \ndata.table::setDTthreads(1)\n## End(Don't show)\n\n# Read example data\nDT <- fread(system.file(\"extdata\", \"DT.csv\", package = \"spatsoc\"))\n\n# Cast the character column to POSIXct\nDT[, datetime := as.POSIXct(datetime, tz = 'UTC')]\n\n# EPSG code for example data\nutm <- 32736\n\n# Build polygons for each individual using kernelUD and getverticeshr\nbuild_polys(DT, projection = utm, hrType = 'kernel',\n hrParams = list(grid = 60, percent = 95),\n id = 'ID', coords = c('X', 'Y'))\n\n# Build polygons for each individual by year\nDT[, yr := year(datetime)]\nbuild_polys(DT, projection = utm, hrType = 'mcp',\n hrParams = list(percent = 95),\n id = 'ID', coords = c('X', 'Y'), splitBy = 'yr')\n\n\n"} {"package":"spatsoc","topic":"dyad_id","snippet":"### Name: dyad_id\n### Title: Dyad ID\n### Aliases: dyad_id\n\n### ** Examples\n\n# Load data.table\nlibrary(data.table)\n## Don't show: \ndata.table::setDTthreads(1)\n## End(Don't show)\n\n# Read example data\nDT <- fread(system.file(\"extdata\", \"DT.csv\", package = \"spatsoc\"))\n\n# Cast the character column to POSIXct\nDT[, datetime := as.POSIXct(datetime, tz = 'UTC')]\n\n# Temporal grouping\ngroup_times(DT, datetime = 'datetime', threshold = '20 minutes')\n\n# Edge list generation\nedges <- edge_dist(\n DT,\n threshold = 100,\n id = 'ID',\n coords = c('X', 'Y'),\n timegroup = 'timegroup',\n returnDist = TRUE,\n fillNA = TRUE\n )\n\n# Generate dyad IDs\ndyad_id(edges, 'ID1', 'ID2')\n\n\n"} {"package":"spatsoc","topic":"edge_dist","snippet":"### Name: edge_dist\n### Title: Distance based edge lists\n### Aliases: edge_dist\n\n### ** Examples\n\n# Load data.table\nlibrary(data.table)\n## Don't show: \ndata.table::setDTthreads(1)\n## End(Don't show)\n\n# Read example data\nDT <- fread(system.file(\"extdata\", \"DT.csv\", package = \"spatsoc\"))\n\n# Cast the character column to POSIXct\nDT[, datetime := as.POSIXct(datetime, tz = 'UTC')]\n\n# Temporal grouping\ngroup_times(DT, datetime = 'datetime', threshold = '20 minutes')\n\n# Edge list generation\nedges <- edge_dist(\n DT,\n threshold = 100,\n id = 'ID',\n coords = c('X', 'Y'),\n timegroup = 'timegroup',\n returnDist = TRUE,\n fillNA = TRUE\n )\n\n\n"} {"package":"spatsoc","topic":"edge_nn","snippet":"### Name: edge_nn\n### Title: Nearest neighbour based edge lists\n### Aliases: edge_nn\n\n### ** Examples\n\n# Load data.table\nlibrary(data.table)\n## Don't show: \ndata.table::setDTthreads(1)\n## End(Don't show)\n\n# Read example data\nDT <- fread(system.file(\"extdata\", \"DT.csv\", package = \"spatsoc\"))\n\n# Select only individuals A, B, C for this example\nDT <- DT[ID %in% c('A', 'B', 'C')]\n\n# Cast the character column to POSIXct\nDT[, datetime := as.POSIXct(datetime, tz = 'UTC')]\n\n# Temporal grouping\ngroup_times(DT, datetime = 'datetime', threshold = '20 minutes')\n\n# Edge list generation\nedges <- edge_nn(DT, id = 'ID', coords = c('X', 'Y'),\n timegroup = 'timegroup')\n\n# Edge list generation using maximum distance threshold\nedges <- edge_nn(DT, id = 'ID', coords = c('X', 'Y'),\n timegroup = 'timegroup', threshold = 100)\n\n# Edge list generation, returning distance between nearest neighbours\nedge_nn(DT, id = 'ID', coords = c('X', 'Y'),\n timegroup = 'timegroup', threshold = 100,\n returnDist = TRUE)\n\n\n\n"} {"package":"spatsoc","topic":"get_gbi","snippet":"### Name: get_gbi\n### Title: Generate group by individual matrix\n### Aliases: get_gbi\n\n### ** Examples\n\n# Load data.table\nlibrary(data.table)\n## Don't show: \ndata.table::setDTthreads(1)\n## End(Don't show)\n\n# Read example data\nDT <- fread(system.file(\"extdata\", \"DT.csv\", package = \"spatsoc\"))\n\n# Cast the character column to POSIXct\nDT[, datetime := as.POSIXct(datetime, tz = 'UTC')]\nDT[, yr := year(datetime)]\n\n# EPSG code for example data\nutm <- 'EPSG:32736'\n\ngroup_polys(DT, area = FALSE, hrType = 'mcp',\n hrParams = list(percent = 95),\n projection = utm, id = 'ID', coords = c('X', 'Y'),\n splitBy = 'yr')\n\ngbiMtrx <- get_gbi(DT = DT, group = 'group', id = 'ID')\n\n\n\n"} {"package":"spatsoc","topic":"group_lines","snippet":"### Name: group_lines\n### Title: Group Lines\n### Aliases: group_lines\n\n### ** Examples\n\n# Load data.table\nlibrary(data.table)\n## Don't show: \ndata.table::setDTthreads(1)\n## End(Don't show)\n\n# Read example data\nDT <- fread(system.file(\"extdata\", \"DT.csv\", package = \"spatsoc\"))\n\n# Subset only individuals A, B, and C\nDT <- DT[ID %in% c('A', 'B', 'C')]\n\n# Cast the character column to POSIXct\nDT[, datetime := as.POSIXct(datetime, tz = 'UTC')]\n\n# EPSG code for example data\nutm <- 32736\n\ngroup_lines(DT, threshold = 50, projection = utm, sortBy = 'datetime',\n id = 'ID', coords = c('X', 'Y'))\n\n## Daily movement tracks\n# Temporal grouping\ngroup_times(DT, datetime = 'datetime', threshold = '1 day')\n\n# Subset only first 50 days\nDT <- DT[timegroup < 25]\n\n# Spatial grouping\ngroup_lines(DT, threshold = 50, projection = utm,\n id = 'ID', coords = c('X', 'Y'),\n timegroup = 'timegroup', sortBy = 'datetime')\n\n## Daily movement tracks by population\ngroup_lines(DT, threshold = 50, projection = utm,\n id = 'ID', coords = c('X', 'Y'),\n timegroup = 'timegroup', sortBy = 'datetime',\n splitBy = 'population')\n\n\n"} {"package":"spatsoc","topic":"group_polys","snippet":"### Name: group_polys\n### Title: Group Polygons\n### Aliases: group_polys\n\n### ** Examples\n\n# Load data.table\nlibrary(data.table)\n## Don't show: \ndata.table::setDTthreads(1)\n## End(Don't show)\n\n# Read example data\nDT <- fread(system.file(\"extdata\", \"DT.csv\", package = \"spatsoc\"))\n\n# Cast the character column to POSIXct\nDT[, datetime := as.POSIXct(datetime, tz = 'UTC')]\n\n# EPSG code for example data\nutm <- 32736\n\ngroup_polys(DT, area = FALSE, hrType = 'mcp',\n hrParams = list(percent = 95), projection = utm,\n id = 'ID', coords = c('X', 'Y'))\n\nareaDT <- group_polys(DT, area = TRUE, hrType = 'mcp',\n hrParams = list(percent = 95), projection = utm,\n id = 'ID', coords = c('X', 'Y'))\nprint(areaDT)\n\n\n"} {"package":"spatsoc","topic":"group_pts","snippet":"### Name: group_pts\n### Title: Group Points\n### Aliases: group_pts\n\n### ** Examples\n\n# Load data.table\nlibrary(data.table)\n## Don't show: \ndata.table::setDTthreads(1)\n## End(Don't show)\n\n# Read example data\nDT <- fread(system.file(\"extdata\", \"DT.csv\", package = \"spatsoc\"))\n\n# Select only individuals A, B, C for this example\nDT <- DT[ID %in% c('A', 'B', 'C')]\n\n# Cast the character column to POSIXct\nDT[, datetime := as.POSIXct(datetime, tz = 'UTC')]\n\n# Temporal grouping\ngroup_times(DT, datetime = 'datetime', threshold = '20 minutes')\n\n# Spatial grouping with timegroup\ngroup_pts(DT, threshold = 5, id = 'ID',\n coords = c('X', 'Y'), timegroup = 'timegroup')\n\n# Spatial grouping with timegroup and splitBy on population\ngroup_pts(DT, threshold = 5, id = 'ID', coords = c('X', 'Y'),\n timegroup = 'timegroup', splitBy = 'population')\n\n\n"} {"package":"spatsoc","topic":"group_times","snippet":"### Name: group_times\n### Title: Group Times\n### Aliases: group_times\n\n### ** Examples\n\n# Load data.table\nlibrary(data.table)\n## Don't show: \ndata.table::setDTthreads(1)\n## End(Don't show)\n\n# Read example data\nDT <- fread(system.file(\"extdata\", \"DT.csv\", package = \"spatsoc\"))\n\n# Cast the character column to POSIXct\nDT[, datetime := as.POSIXct(datetime, tz = 'UTC')]\n\ngroup_times(DT, datetime = 'datetime', threshold = '5 minutes')\n\ngroup_times(DT, datetime = 'datetime', threshold = '2 hours')\n\ngroup_times(DT, datetime = 'datetime', threshold = '10 days')\n\n\n\n"} {"package":"spatsoc","topic":"randomizations","snippet":"### Name: randomizations\n### Title: Data-stream randomizations\n### Aliases: randomizations\n\n### ** Examples\n\n# Load data.table\nlibrary(data.table)\n## Don't show: \ndata.table::setDTthreads(1)\n## End(Don't show)\n\n# Read example data\nDT <- fread(system.file(\"extdata\", \"DT.csv\", package = \"spatsoc\"))\n\n# Select only individuals A, B, C for this example\nDT <- DT[ID %in% c('A', 'B', 'C')]\n\n# Date time columns\nDT[, datetime := as.POSIXct(datetime)]\nDT[, yr := year(datetime)]\n\n# Temporal grouping\ngroup_times(DT, datetime = 'datetime', threshold = '5 minutes')\n\n# Spatial grouping with timegroup\ngroup_pts(DT, threshold = 5, id = 'ID', coords = c('X', 'Y'), timegroup = 'timegroup')\n\n# Randomization: step\nrandStep <- randomizations(\n DT,\n type = 'step',\n id = 'ID',\n group = 'group',\n datetime = 'timegroup',\n splitBy = 'yr',\n iterations = 2\n)\n\n# Randomization: daily\nrandDaily <- randomizations(\n DT,\n type = 'daily',\n id = 'ID',\n group = 'group',\n datetime = 'datetime',\n splitBy = 'yr',\n iterations = 2\n)\n\n# Randomization: trajectory\nrandTraj <- randomizations(\n DT,\n type = 'trajectory',\n id = 'ID',\n group = NULL,\n coords = c('X', 'Y'),\n datetime = 'datetime',\n splitBy = 'yr',\n iterations = 2\n)\n\n\n\n"} {"package":"tracerer","topic":"calc_act","snippet":"### Name: calc_act\n### Title: Calculate the auto-correlation time, alternative implementation\n### Aliases: calc_act\n\n### ** Examples\n\ntrace <- sin(seq(from = 0.0, to = 2.0 * pi, length.out = 100))\n# 38.18202\ncalc_act(trace = trace, sample_interval = 1)\n\n\n"} {"package":"tracerer","topic":"calc_act_r","snippet":"### Name: calc_act_r\n### Title: Calculate the auto-correlation time using only R. Consider using\n### calc_act instead, as it is orders of magnitude faster\n### Aliases: calc_act_r\n\n### ** Examples\n\ntrace <- sin(seq(from = 0.0, to = 2.0 * pi, length.out = 100))\ncalc_act_r(trace = trace, sample_interval = 1) # 38.18202\n\n\n"} {"package":"tracerer","topic":"calc_ess","snippet":"### Name: calc_ess\n### Title: Calculates the Effective Sample Size\n### Aliases: calc_ess\n\n### ** Examples\n\nfilename <- get_tracerer_path(\"beast2_example_output.log\")\nestimates <- parse_beast_tracelog_file(filename)\ncalc_ess(estimates$posterior, sample_interval = 1000)\n\n\n"} {"package":"tracerer","topic":"calc_esses","snippet":"### Name: calc_esses\n### Title: Calculates the Effective Sample Sizes from a parsed BEAST2 log\n### file\n### Aliases: calc_esses\n\n### ** Examples\n\n# Parse an example log file\nestimates <- parse_beast_tracelog_file(\n get_tracerer_path(\"beast2_example_output.log\")\n)\n\n# Calculate the effective sample sizes of all parameter estimates\ncalc_esses(estimates, sample_interval = 1000)\n\n\n"} {"package":"tracerer","topic":"calc_hpd_interval","snippet":"### Name: calc_hpd_interval\n### Title: Calculate the Highest Probability Density of an MCMC trace that\n### has its burn-in removed\n### Aliases: calc_hpd_interval\n\n### ** Examples\n\nestimates <- parse_beast_tracelog_file(\n get_tracerer_path(\"beast2_example_output.log\")\n)\ntree_height_trace <- remove_burn_in(\n estimates$TreeHeight,\n burn_in_fraction = 0.1\n)\n\n# Values will be 0.453 and 1.816\ncalc_hpd_interval(tree_height_trace, proportion = 0.95)\n\n\n"} {"package":"tracerer","topic":"calc_mode","snippet":"### Name: calc_mode\n### Title: Calculate the mode of values If the distribution is bi or\n### multimodal or uniform, NA is returned\n### Aliases: calc_mode\n\n### ** Examples\n\n# In a unimodal distribution, find the value that occurs most\ncalc_mode(c(1, 2, 2))\ncalc_mode(c(1, 1, 2))\n\n# For a uniform distribution, NA is returned\ntracerer:::calc_mode(c(1, 2))\n\n\n"} {"package":"tracerer","topic":"calc_stderr_mean","snippet":"### Name: calc_stderr_mean\n### Title: Calculate the standard error of the mean\n### Aliases: calc_stderr_mean\n\n### ** Examples\n\ntrace <- sin(seq(from = 0.0, to = 2.0 * pi, length.out = 100))\ncalc_stderr_mean(trace) # 0.4347425\n\n\n"} {"package":"tracerer","topic":"calc_summary_stats","snippet":"### Name: calc_summary_stats\n### Title: Calculates the Effective Sample Sizes of one estimated\n### variable's trace.\n### Aliases: calc_summary_stats\n\n### ** Examples\n\nestimates_all <- parse_beast_tracelog_file(\n get_tracerer_path(\"beast2_example_output.log\")\n)\nestimates <- remove_burn_ins(estimates_all, burn_in_fraction = 0.1)\n\n# From a single variable's trace\ncalc_summary_stats(\n estimates$posterior,\n sample_interval = 1000\n)\n\n# From all variables' traces\ncalc_summary_stats(\n estimates,\n sample_interval = 1000\n)\n\n\n"} {"package":"tracerer","topic":"calc_summary_stats_trace","snippet":"### Name: calc_summary_stats_trace\n### Title: Calculates the Effective Sample Sizes of one estimated\n### variable's trace.\n### Aliases: calc_summary_stats_trace\n\n### ** Examples\n\nestimates_all <- parse_beast_tracelog_file(\n get_tracerer_path(\"beast2_example_output.log\")\n)\nestimates <- remove_burn_ins(estimates_all, burn_in_fraction = 0.1)\n\ncalc_summary_stats_trace(\n estimates$posterior,\n sample_interval = 1000\n)\n\n\n"} {"package":"tracerer","topic":"calc_summary_stats_traces","snippet":"### Name: calc_summary_stats_traces\n### Title: Calculates the Effective Sample Sizes of the traces of multiple\n### estimated variables.\n### Aliases: calc_summary_stats_traces\n\n### ** Examples\n\nestimates_all <- parse_beast_tracelog_file(\n get_tracerer_path(\"beast2_example_output.log\")\n)\nestimates <- remove_burn_ins(estimates_all, burn_in_fraction = 0.1)\n\ncalc_summary_stats_traces(\n estimates,\n sample_interval = 1000\n)\n\n\n"} {"package":"tracerer","topic":"check_trace","snippet":"### Name: check_trace\n### Title: Check if the trace is a valid. Will stop if not\n### Aliases: check_trace\n\n### ** Examples\n\ncheck_trace(seq(1, 2))\n\n\n"} {"package":"tracerer","topic":"get_tracerer_path","snippet":"### Name: get_tracerer_path\n### Title: Get the full path of a file in the 'inst/extdata' folder\n### Aliases: get_tracerer_path\n\n### ** Examples\n\nget_tracerer_path(\"beast2_example_output.log\")\nget_tracerer_path(\"beast2_example_output.trees\")\nget_tracerer_path(\"beast2_example_output.xml\")\nget_tracerer_path(\"beast2_example_output.xml.state\")\n\n\n"} {"package":"tracerer","topic":"get_tracerer_paths","snippet":"### Name: get_tracerer_paths\n### Title: Get the full paths of files in the 'inst/extdata' folder\n### Aliases: get_tracerer_paths\n\n### ** Examples\n\nget_tracerer_paths(\n c(\n \"beast2_example_output.log\",\n \"beast2_example_output.trees\",\n \"beast2_example_output.xml\",\n \"beast2_example_output.xml.state\"\n )\n)\n\n\n"} {"package":"tracerer","topic":"is_posterior","snippet":"### Name: is_posterior\n### Title: Determines if the input is a BEAST2 posterior\n### Aliases: is_posterior\n\n### ** Examples\n\ntrees_filename <- get_tracerer_path(\"beast2_example_output.trees\")\ntracelog_filename <- get_tracerer_path(\"beast2_example_output.log\")\nposterior <- parse_beast_posterior(\n trees_filename = trees_filename,\n tracelog_filename = tracelog_filename\n)\nis_posterior(posterior)\n\n\n"} {"package":"tracerer","topic":"is_trees_file","snippet":"### Name: is_trees_file\n### Title: Measure if a file a valid BEAST2 '.trees' file\n### Aliases: is_trees_file\n\n### ** Examples\n\n# TRUE\nis_trees_file(get_tracerer_path(\"beast2_example_output.trees\"))\nis_trees_file(get_tracerer_path(\"unplottable_anthus_aco.trees\"))\nis_trees_file(get_tracerer_path(\"anthus_2_4_a.trees\"))\nis_trees_file(get_tracerer_path(\"anthus_2_4_b.trees\"))\n# FALSE\nis_trees_file(get_tracerer_path(\"mcbette_issue_8.trees\"))\n\n\n"} {"package":"tracerer","topic":"parse_beast_log","snippet":"### Name: parse_beast_log\n### Title: Deprecated function to parse a BEAST2 '.log' output file. Use\n### parse_beast_tracelog_file instead\n### Aliases: parse_beast_log\n\n### ** Examples\n\n# Deprecated\nparse_beast_log(\n tracelog_filename = get_tracerer_path(\"beast2_example_output.log\")\n)\n# Use the function 'parse_beast_tracelog_file' instead\nparse_beast_tracelog_file(\n tracelog_filename = get_tracerer_path(\"beast2_example_output.log\")\n)\n\n\n"} {"package":"tracerer","topic":"parse_beast_output_files","snippet":"### Name: parse_beast_output_files\n### Title: Parse all BEAST2 output files\n### Aliases: parse_beast_output_files\n\n### ** Examples\n\ntrees_filenames <- get_tracerer_path(\"beast2_example_output.trees\")\nlog_filename <- get_tracerer_path(\"beast2_example_output.log\")\nstate_filename <- get_tracerer_path(\"beast2_example_output.xml.state\")\nparse_beast_output_files(\n log_filename = log_filename,\n trees_filenames = trees_filenames,\n state_filename = state_filename\n)\n\n\n"} {"package":"tracerer","topic":"parse_beast_posterior","snippet":"### Name: parse_beast_posterior\n### Title: Parses BEAST2 output files to a posterior\n### Aliases: parse_beast_posterior\n\n### ** Examples\n\ntrees_filenames <- get_tracerer_path(\"beast2_example_output.trees\")\ntracelog_filename <- get_tracerer_path(\"beast2_example_output.log\")\nposterior <- parse_beast_posterior(\n trees_filenames = trees_filenames,\n tracelog_filename = tracelog_filename\n)\n\n\n"} {"package":"tracerer","topic":"parse_beast_state_operators","snippet":"### Name: parse_beast_state_operators\n### Title: Parses a BEAST2 state '.xml.state' output file to get only the\n### operators acceptances\n### Aliases: parse_beast_state_operators\n\n### ** Examples\n\nparse_beast_state_operators(\n state_filename = get_tracerer_path(\"beast2_example_output.xml.state\")\n)\n\n\n"} {"package":"tracerer","topic":"parse_beast_tracelog_file","snippet":"### Name: parse_beast_tracelog_file\n### Title: Parses a BEAST2 tracelog '.log' output file\n### Aliases: parse_beast_tracelog_file\n\n### ** Examples\n\nparse_beast_tracelog_file(\n tracelog_filename = get_tracerer_path(\"beast2_example_output.log\")\n)\n\n\n"} {"package":"tracerer","topic":"parse_beast_trees","snippet":"### Name: parse_beast_trees\n### Title: Parses a BEAST2 .trees output file\n### Aliases: parse_beast_trees\n\n### ** Examples\n\ntrees_filename <- get_tracerer_path(\"beast2_example_output.trees\")\nparse_beast_trees(trees_filename)\n\n\n"} {"package":"tracerer","topic":"remove_burn_in","snippet":"### Name: remove_burn_in\n### Title: Removed the burn-in from a trace\n### Aliases: remove_burn_in\n\n### ** Examples\n\n# Create a trace from one to and including ten\nv <- seq(1, 10)\n\n# Remove the first ten percent of its values,\n# in this case removes the first value, which is one\nw <- remove_burn_in(trace = v, burn_in_fraction = 0.1)\n\n\n"} {"package":"tidyft","topic":"arrange","snippet":"### Name: arrange\n### Title: Arrange entries in data.frame\n### Aliases: arrange\n\n### ** Examples\n\n\na = as.data.table(iris)\na %>% arrange(Sepal.Length)\na\na %>% arrange(cols = c(\"Sepal.Width\",\"Petal.Length\"))\na\n\n\n\n"} {"package":"tidyft","topic":"as_fst","snippet":"### Name: as_fst\n### Title: Save a data.frame as a fst table\n### Aliases: as_fst\n\n### ** Examples\n\n\n## No test: \n iris %>%\n as_fst() -> iris_fst\n iris_fst\n## End(No test)\n\n\n"} {"package":"tidyft","topic":"complete","snippet":"### Name: complete\n### Title: Complete a data frame with missing combinations of data\n### Aliases: complete\n\n### ** Examples\n\ndf <- data.table(\n group = c(1:2, 1),\n item_id = c(1:2, 2),\n item_name = c(\"a\", \"b\", \"b\"),\n value1 = 1:3,\n value2 = 4:6\n)\n\ndf %>% complete(item_id,item_name)\ndf %>% complete(item_id,item_name,fill = 0)\ndf %>% complete(\"item\")\ndf %>% complete(item_id=1:3)\ndf %>% complete(item_id=1:3,group=1:2)\ndf %>% complete(item_id=1:3,group=1:3,item_name=c(\"a\",\"b\",\"c\"))\n\n\n\n"} {"package":"tidyft","topic":"count","snippet":"### Name: count\n### Title: Count observations by group\n### Aliases: count add_count\n\n### ** Examples\n\na = as.data.table(mtcars)\ncount(a,cyl)\ncount(a,cyl,sort = TRUE)\na\n\nb = as.data.table(iris)\nb %>% add_count(Species,name = \"N\")\nb\n\n\n"} {"package":"tidyft","topic":"cummean","snippet":"### Name: cummean\n### Title: Cumulative mean\n### Aliases: cummean\n\n### ** Examples\n\ncummean(1:10)\n\n\n\n"} {"package":"tidyft","topic":"distinct","snippet":"### Name: distinct\n### Title: Select distinct/unique rows in data.table\n### Aliases: distinct\n\n### ** Examples\n\n\n a = as.data.table(iris)\n b = as.data.table(mtcars)\n a %>% distinct(Species)\n b %>% distinct(cyl,vs,.keep_all = TRUE)\n\n\n\n\n"} {"package":"tidyft","topic":"drop_na","snippet":"### Name: drop_na\n### Title: Drop or delete data by rows or columns\n### Aliases: drop_na delete_na\n\n### ** Examples\n\nx = data.table(x = c(1, 2, NA, 3), y = c(NA, NA, 4, 5),z = rep(NA,4))\nx\nx %>% delete_na(2,0.75)\n\nx = data.table(x = c(1, 2, NA, 3), y = c(NA, NA, 4, 5),z = rep(NA,4))\nx %>% delete_na(2,0.5)\n\nx = data.table(x = c(1, 2, NA, 3), y = c(NA, NA, 4, 5),z = rep(NA,4))\nx %>% delete_na(2,0.24)\n\nx = data.table(x = c(1, 2, NA, 3), y = c(NA, NA, 4, 5),z = rep(NA,4))\nx %>% delete_na(2,2)\n\nx = data.table(x = c(1, 2, NA, 3), y = c(NA, NA, 4, 5),z = rep(NA,4))\nx %>% delete_na(1,0.6)\nx = data.table(x = c(1, 2, NA, 3), y = c(NA, NA, 4, 5),z = rep(NA,4))\nx %>% delete_na(1,2)\n\n\n\n\n"} {"package":"tidyft","topic":"dummy","snippet":"### Name: dummy\n### Title: Fast creation of dummy variables\n### Aliases: dummy\n\n### ** Examples\n\n\niris = as.data.table(iris)\niris %>% dummy(Species)\niris %>% dummy(Species,longname = FALSE)\n\nmtcars = as.data.table(mtcars)\nmtcars %>% head() %>% dummy(vs,am)\nmtcars %>% head() %>% dummy(\"cyl|gear\")\n\n\n\n"} {"package":"tidyft","topic":"fill","snippet":"### Name: fill\n### Title: Fill in missing values with previous or next value\n### Aliases: fill shift_fill\n\n### ** Examples\n\n\ndf <- data.table(Month = 1:12, Year = c(2000, rep(NA, 10),2001))\ndf\ndf %>% fill(Year)\n\ndf <- data.table(Month = 1:12, Year = c(2000, rep(NA, 10),2001))\ndf %>% fill(Year,direction = \"up\")\n\n\n\n"} {"package":"tidyft","topic":"filter","snippet":"### Name: filter\n### Title: Filter entries in data.frame\n### Aliases: filter\n\n### ** Examples\n\niris = as.data.table(iris)\niris %>% filter(Sepal.Length > 7)\niris %>% filter(Sepal.Length > 7,Sepal.Width > 3)\niris %>% filter(Sepal.Length > 7 & Sepal.Width > 3)\niris %>% filter(Sepal.Length == max(Sepal.Length))\n\n\n"} {"package":"tidyft","topic":"fst","snippet":"### Name: fst\n### Title: Parse,inspect and extract data.table from fst file\n### Aliases: fst parse_fst slice_fst select_fst filter_fst summary_fst\n\n### ** Examples\n\n\n # write the file first\n path = tempfile(fileext = \".fst\")\n fst::write_fst(iris,path)\n # parse the file but not reading it\n parse_fst(path) -> ft\n\n ft\n\n class(ft)\n lapply(ft,class)\n names(ft)\n dim(ft)\n summary_fst(ft)\n\n # get the data by query\n ft %>% slice_fst(1:3)\n ft %>% slice_fst(c(1,3))\n\n ft %>% select_fst(Sepal.Length)\n ft %>% select_fst(Sepal.Length,Sepal.Width)\n ft %>% select_fst(\"Sepal.Length\")\n ft %>% select_fst(1:3)\n ft %>% select_fst(1,3)\n ft %>% select_fst(\"Se\")\n\n # return a warning with message\n ## No test: \n ft %>% select_fst(\"nothing\")\n \n## End(No test)\n\n ft %>% select_fst(\"Se|Sp\")\n ft %>% select_fst(cols = names(iris)[2:3])\n\n ft %>% filter_fst(Sepal.Width > 3)\n ft %>% filter_fst(Sepal.Length > 6 , Species == \"virginica\")\n ft %>% filter_fst(Sepal.Length > 6 & Species == \"virginica\" & Sepal.Width < 3)\n\n\n\n"} {"package":"tidyft","topic":"export_fst","snippet":"### Name: export_fst\n### Title: Read and write fst files\n### Aliases: export_fst import_fst\n\n### ** Examples\n\n\n## No test: \nexport_fst(iris,\"iris_fst_test.fst\")\niris_dt = import_fst(\"iris_fst_test.fst\")\niris_dt\nunlink(\"iris_fst_test.fst\")\n## End(No test)\n\n\n\n"} {"package":"tidyft","topic":"group_by","snippet":"### Name: group_by\n### Title: Group by one or more variables\n### Aliases: group_by group_exe groups ungroup\n\n### ** Examples\n\na = as.data.table(iris)\na\na %>%\n group_by(Species) %>%\n group_exe(\n head(3)\n )\ngroups(a)\nungroup(a)\ngroups(a)\n\n\n"} {"package":"tidyft","topic":"inner_join","snippet":"### Name: inner_join\n### Title: Join tables\n### Aliases: inner_join left_join right_join full_join anti_join semi_join\n\n### ** Examples\n\n\nworkers = fread(\"\n name company\n Nick Acme\n John Ajax\n Daniela Ajax\n\")\n\npositions = fread(\"\n name position\n John designer\n Daniela engineer\n Cathie manager\n\")\n\nworkers %>% inner_join(positions)\nworkers %>% left_join(positions)\nworkers %>% right_join(positions)\nworkers %>% full_join(positions)\n\n# filtering joins\nworkers %>% anti_join(positions)\nworkers %>% semi_join(positions)\n\n# To suppress the message, supply 'by' argument\nworkers %>% left_join(positions, by = \"name\")\n\n# Use a named 'by' if the join variables have different names\npositions2 = setNames(positions, c(\"worker\", \"position\")) # rename first column in 'positions'\nworkers %>% inner_join(positions2, by = c(\"name\" = \"worker\"))\n\n# the syntax of 'on' could be a bit different\nworkers %>% inner_join(positions2,on = \"name==worker\")\n\n\n\n\n"} {"package":"tidyft","topic":"lead","snippet":"### Name: lead\n### Title: Fast lead/lag for vectors\n### Aliases: lead lag\n\n### ** Examples\n\nlead(1:5)\nlag(1:5)\nlead(1:5,2)\nlead(1:5,n = 2,fill = 0)\n\n\n"} {"package":"tidyft","topic":"longer","snippet":"### Name: longer\n### Title: Pivot data between long and wide\n### Aliases: longer wider\n\n### ** Examples\n\n\nstocks <- data.table(\n time = as.Date('2009-01-01') + 0:9,\n X = rnorm(10, 0, 1),\n Y = rnorm(10, 0, 2),\n Z = rnorm(10, 0, 4)\n)\n\nstocks %>% longer(time)\nstocks %>% longer(-(2:4)) # same\nstocks %>% longer(-\"X|Y|Z\") # same\nlong_stocks = longer(stocks,\"ti\") # same as above except for assignment\n\nlong_stocks %>% wider(time,name = \"name\",value = \"value\")\n\n# the unchanged group could be missed if all the rest will be used\nlong_stocks %>% wider(name = \"name\",value = \"value\")\n\n\n\n"} {"package":"tidyft","topic":"mutate","snippet":"### Name: mutate\n### Title: Create or transform variables\n### Aliases: mutate transmute mutate_when mutate_vars\n\n### ** Examples\n\n # Newly created variables are available immediately\n a = as.data.table(mtcars)\n copy(a) %>% mutate(cyl2 = cyl * 2)\n a\n\n # change forever\n a %>% mutate(cyl2 = cyl * 2)\n a\n\n # You can also use mutate() to remove variables and\n # modify existing variables\n a %>% mutate(\n mpg = NULL,\n disp = disp * 0.0163871 # convert to litres\n )\n\n a %>% transmute(cyl,one = 1)\n a\n\n\n iris[3:8,] %>%\n as.data.table() %>%\n mutate_when(Petal.Width == .2,\n one = 1,Sepal.Length=2)\n\n iris[3:8,] %>%\n as.data.table() %>%\n mutate_vars(\"Pe\",scale)\n\n\n\n"} {"package":"tidyft","topic":"nest","snippet":"### Name: nest\n### Title: Nest and unnest\n### Aliases: nest unnest squeeze chop unchop\n\n### ** Examples\n\n\nmtcars = as.data.table(mtcars)\niris = as.data.table(iris)\n\n# examples for nest\n\n# nest by which columns?\n mtcars %>% nest(cyl)\n mtcars %>% nest(\"cyl\")\n mtcars %>% nest(cyl,vs)\n mtcars %>% nest(vs:am)\n mtcars %>% nest(\"cyl|vs\")\n mtcars %>% nest(c(\"cyl\",\"vs\"))\n\n# nest two columns directly\niris %>% nest(mcols = list(petal=\"^Pe\",sepal=\"^Se\"))\n\n# nest more flexibly\niris %>% nest(mcols = list(ndt1 = 1:3,\n ndt2 = \"Pe\",\n ndt3 = Sepal.Length:Sepal.Width))\n\n# examples for unnest\n# unnest which column?\n mtcars %>% nest(\"cyl|vs\") %>%\n unnest(ndt)\n mtcars %>% nest(\"cyl|vs\") %>%\n unnest(\"ndt\")\n\ndf <- data.table(\n a = list(c(\"a\", \"b\"), \"c\"),\n b = list(c(TRUE,TRUE),FALSE),\n c = list(3,c(1,2)),\n d = c(11, 22)\n)\n\ndf\ndf %>% unnest(a)\ndf %>% unnest(2)\ndf %>% unnest(\"c\")\ndf %>% unnest(cols = names(df)[3])\n\n# You can unnest multiple columns simultaneously\ndf %>% unnest(1:3)\ndf %>% unnest(a,b,c)\ndf %>% unnest(\"a|b|c\")\n\n# examples for squeeze\n# nest which columns?\niris %>% squeeze(1:2)\niris %>% squeeze(\"Se\")\niris %>% squeeze(Sepal.Length:Petal.Width)\n\n# examples for chop\ndf <- data.table(x = c(1, 1, 1, 2, 2, 3), y = 1:6, z = 6:1)\ndf %>% chop(y,z)\ndf %>% chop(y,z) %>% unchop(y,z)\n\n\n"} {"package":"tidyft","topic":"nth","snippet":"### Name: nth\n### Title: Extract the nth value from a vector\n### Aliases: nth\n\n### ** Examples\n\n\nx = 1:10\nnth(x, 1)\nnth(x, 5)\nnth(x, -2)\n\n\n\n"} {"package":"tidyft","topic":"object_size","snippet":"### Name: object_size\n### Title: Nice printing of report the Space Allocated for an Object\n### Aliases: object_size\n\n### ** Examples\n\n\niris %>% object_size()\n\n\n\n"} {"package":"tidyft","topic":"pull","snippet":"### Name: pull\n### Title: Pull out a single variable\n### Aliases: pull\n\n### ** Examples\n\nmtcars %>% pull(2)\nmtcars %>% pull(cyl)\nmtcars %>% pull(\"cyl\")\n\n\n"} {"package":"tidyft","topic":"relocate","snippet":"### Name: relocate\n### Title: Change column order\n### Aliases: relocate\n\n### ** Examples\n\ndf <- data.table(a = 1, b = 1, c = 1, d = \"a\", e = \"a\", f = \"a\")\ndf\ndf %>% relocate(f)\ndf %>% relocate(a,how = \"last\")\n\ndf %>% relocate(is.character)\ndf %>% relocate(is.numeric, how = \"last\")\ndf %>% relocate(\"[aeiou]\")\n\ndf %>% relocate(a, how = \"after\",where = f)\ndf %>% relocate(f, how = \"before\",where = a)\ndf %>% relocate(f, how = \"before\",where = c)\ndf %>% relocate(f, how = \"after\",where = c)\n\ndf2 <- data.table(a = 1, b = \"a\", c = 1, d = \"a\")\ndf2 %>% relocate(is.numeric,\n how = \"after\",\n where = is.character)\ndf2 %>% relocate(is.numeric,\n how=\"before\",\n where = is.character)\n\n\n"} {"package":"tidyft","topic":"replace_vars","snippet":"### Name: replace_vars\n### Title: Fast value replacement in data frame\n### Aliases: replace_vars\n\n### ** Examples\n\n iris %>% as.data.table() %>%\n mutate(Species = as.character(Species))-> new_iris\n\n new_iris %>%\n replace_vars(Species, from = \"setosa\",to = \"SS\")\n new_iris %>%\n replace_vars(Species,from = c(\"setosa\",\"virginica\"),to = \"sv\")\n new_iris %>%\n replace_vars(Petal.Width, from = .2,to = 2)\n new_iris %>%\n replace_vars(from = .2,to = NA)\n new_iris %>%\n replace_vars(is.numeric, from = function(x) x > 3, to = 9999 )\n\n\n\n"} {"package":"tidyft","topic":"rowwise_mutate","snippet":"### Name: rowwise_mutate\n### Title: Computation by rows\n### Aliases: rowwise_mutate rowwise_summarise\n\n### ** Examples\n\n# without rowwise\ndf <- data.table(x = 1:2, y = 3:4, z = 4:5)\ndf %>% mutate(m = mean(c(x, y, z)))\n# with rowwise\ndf <- data.table(x = 1:2, y = 3:4, z = 4:5)\ndf %>% rowwise_mutate(m = mean(c(x, y, z)))\n\n\n# # rowwise is also useful when doing simulations\nparams = fread(\" sim n mean sd\n 1 1 1 1\n 2 2 2 4\n 3 3 -1 2\")\n\nparams %>%\n rowwise_summarise(sim,z = rnorm(n,mean,sd))\n\n\n\n"} {"package":"tidyft","topic":"select","snippet":"### Name: select\n### Title: Select/rename variables by name\n### Aliases: select select_vars select_dt select_mix rename\n\n### ** Examples\n\n\n a = as.data.table(iris)\n a %>% select(1:3)\n a\n\n a = as.data.table(iris)\n a %>% select_vars(is.factor,\"Se\")\n a\n\n a = as.data.table(iris)\n a %>% select(\"Se\") %>%\n rename(sl = Sepal.Length,\n sw = Sepal.Width)\n a\n\n\nDT = data.table(a=1:2,b=3:4,c=5:6)\nDT\nDT %>% rename(B=b)\n\n\n\n"} {"package":"tidyft","topic":"separate","snippet":"### Name: separate\n### Title: Separate a character column into two columns using a regular\n### expression separator\n### Aliases: separate\n\n### ** Examples\n\ndf <- data.table(x = c(NA, \"a.b\", \"a.d\", \"b.c\"))\ndf %>% separate(x, c(\"A\", \"B\"))\n# equals to\ndf <- data.table(x = c(NA, \"a.b\", \"a.d\", \"b.c\"))\ndf %>% separate(\"x\", c(\"A\", \"B\"))\n\n\n"} {"package":"tidyft","topic":"slice","snippet":"### Name: slice\n### Title: Subset rows using their positions\n### Aliases: slice slice_head slice_tail slice_max slice_min slice_sample\n\n### ** Examples\n\n\na = as.data.table(iris)\nslice(a,1,2)\nslice(a,2:3)\nslice_head(a,5)\nslice_head(a,0.1)\nslice_tail(a,5)\nslice_tail(a,0.1)\nslice_max(a,Sepal.Length,10)\nslice_max(a,Sepal.Length,10,with_ties = FALSE)\nslice_min(a,Sepal.Length,10)\nslice_min(a,Sepal.Length,10,with_ties = FALSE)\nslice_sample(a,10)\nslice_sample(a,0.1)\n\n\n\n"} {"package":"tidyft","topic":"summarise","snippet":"### Name: summarise\n### Title: Summarise columns to single values\n### Aliases: summarise summarise_when summarise_vars\n\n### ** Examples\n\n\na = as.data.table(iris)\na %>% summarise(sum = sum(Sepal.Length),avg = mean(Sepal.Length))\n\n\na %>%\n summarise_when(Sepal.Length > 5, avg = mean(Sepal.Length), by = Species)\n\na %>%\n summarise_vars(is.numeric, min, by = Species)\n\n\n\n\n"} {"package":"tidyft","topic":"sys_time_print","snippet":"### Name: sys_time_print\n### Title: Convenient print of time taken\n### Aliases: sys_time_print\n\n### ** Examples\n\n\nsys_time_print(Sys.sleep(1))\n\na = as.data.table(iris)\nsys_time_print({\n res = a %>%\n mutate(one = 1)\n})\nres\n\n\n"} {"package":"tidyft","topic":"mat_df","snippet":"### Name: mat_df\n### Title: Conversion between tidy table and named matrix\n### Aliases: mat_df df_mat\n\n### ** Examples\n\n\nmm = matrix(c(1:8,NA),ncol = 3,dimnames = list(letters[1:3],LETTERS[1:3]))\nmm\ntdf = mat_df(mm)\ntdf\nmat = df_mat(tdf,row,col,value)\nsetequal(mm,mat)\n\ntdf %>%\n setNames(c(\"A\",\"B\",\"C\")) %>%\n df_mat(A,B,C)\n\n\n\n"} {"package":"tidyft","topic":"uncount","snippet":"### Name: uncount\n### Title: \"Uncount\" a data frame\n### Aliases: uncount\n\n### ** Examples\n\n\ndf <- data.table(x = c(\"a\", \"b\"), n = c(1, 2))\nuncount(df, n)\nuncount(df,n,FALSE)\n\n\n"} {"package":"tidyft","topic":"unite","snippet":"### Name: unite\n### Title: Unite multiple columns into one by pasting strings together\n### Aliases: unite\n\n### ** Examples\n\ndf <- CJ(x = c(\"a\", NA), y = c(\"b\", NA))\ndf\n\n# Treat missing value as NA, default\ndf %>% unite(\"z\", x:y, remove = FALSE)\n# Treat missing value as character \"NA\"\ndf %>% unite(\"z\", x:y, na2char = TRUE, remove = FALSE)\n# the unite has memory, \"z\" would not be removed in new operations\n# here we remove the original columns (\"x\" and \"y\")\ndf %>% unite(\"xy\", x:y,remove = TRUE)\n\n# Select all columns\niris %>% as.data.table %>% unite(\"merged_name\",\".\")\n\n\n"} {"package":"tidyft","topic":"utf8_encoding","snippet":"### Name: utf8_encoding\n### Title: Use UTF-8 for character encoding in a data frame\n### Aliases: utf8_encoding\n\n### ** Examples\n\niris %>%\n as.data.table() %>%\n utf8_encoding(Species) # could also use `is.factor`\n\n\n"} {"package":"Tariff","topic":"RandomVA3","snippet":"### Name: RandomVA3\n### Title: 400 records of Sample Input\n### Aliases: RandomVA3\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(RandomVA3)\nhead(RandomVA3$train)\nhead(RandomVA3$test)\n\n\n"} {"package":"Tariff","topic":"SampleCategory3","snippet":"### Name: SampleCategory3\n### Title: Grouping of causes in RandomVA3\n### Aliases: SampleCategory3\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(SampleCategory3)\nSampleCategory3\n\n\n"} {"package":"Tariff","topic":"plot.tariff","snippet":"### Name: plot.tariff\n### Title: Plot CSMF of the results obtained from Tariff algorithm\n### Aliases: plot.tariff\n\n### ** Examples\n\n\n## No test: \ndata(\"RandomVA3\")\ntest <- RandomVA3[1:200, ]\ntrain <- RandomVA3[201:400, ]\nallcauses <- unique(train$cause)\nfit <- tariff(causes.train = \"cause\", symps.train = train, \n\t\t\t\tsymps.test = test, causes.table = allcauses)\nplot(fit, top = 10, main = \"Top 5 population COD distribution\")\nplot(fit, min.prob = 0.05, main = \"Ppulation COD distribution (at least 5%)\")\n## End(No test)\n\n\n"} {"package":"Tariff","topic":"summary.tariff","snippet":"### Name: summary.tariff\n### Title: Summary of the results obtained from Tariff algorithm\n### Aliases: summary.tariff\n### Keywords: Tariff\n\n### ** Examples\n\n\n## No test: \ndata(\"RandomVA3\")\ntest <- RandomVA3[1:200, ]\ntrain <- RandomVA3[201:400, ]\nallcauses <- unique(train$cause)\nfit <- tariff(causes.train = \"cause\", symps.train = train, \n\t\t\tsymps.test = test, causes.table = allcauses)\ncorrect <- which(fit$causes.test[,2] == test$cause)\naccuracy <- length(correct) / dim(test)[1]\nsummary(fit)\nsummary(fit, top = 10)\nsummary(fit, id = \"p849\", top = 3)\n## End(No test)\n\n\n\n"} {"package":"Tariff","topic":"tariff","snippet":"### Name: tariff\n### Title: Replicate Tariff methods\n### Aliases: tariff\n### Keywords: Tariff\n\n### ** Examples\n\n## No test: \ndata(\"RandomVA3\")\ntest <- RandomVA3[1:200, ]\ntrain <- RandomVA3[201:400, ]\nallcauses <- unique(train$cause)\nfit <- tariff(causes.train = \"cause\", symps.train = train, \n\t\t\t\tsymps.test = test, causes.table = allcauses)\ncorrect <- which(fit$causes.test[,2] == test$cause)\naccuracy <- length(correct) / dim(test)[1]\n## End(No test)\n\n\n"} {"package":"quadtree","topic":"add_legend","snippet":"### Name: add_legend\n### Title: Add a gradient legend to a plot\n### Aliases: add_legend\n\n### ** Examples\n\nlibrary(terra)\nlibrary(quadtree)\nhabitat <- terra::rast(system.file(\"extdata\", \"habitat.tif\", package=\"quadtree\"))\nqt <- quadtree(habitat, .2)\n\nold_par <- par(mar = c(5, 4, 4, 5))\nplot(qt, legend = FALSE)\nleg <- terra::minmax(habitat)[1:2]\nquadtree::add_legend(leg, rev(terrain.colors(100)))\npar(old_par)\n# this example simply illustrates how it COULD be used, but as stated in the\n# 'Details' section, it shouldn't be called separately from 'plot()' - if\n# customizations to the legend are desired, use the 'legend_args' parameter\n# of 'plot()'.\n\n\n"} {"package":"quadtree","topic":"as_sf","snippet":"### Name: as_sf\n### Title: Convert to other R spatial objects\n### Aliases: as_sf as_vect as_character\n\n### ** Examples\n\n## Don't show: \nif (all(sapply(c(\"sf\"), function(x) !inherits(try(requireNamespace(x, quietly=TRUE), silent=TRUE), 'try-error')))) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\n## Don't show: \n}) # examplesIf\n## End(Don't show)\nlibrary(quadtree)\nhabitat <- terra::rast(system.file(\"extdata\", \"habitat.tif\", package=\"quadtree\"))\n\nqt <- quadtree(habitat, .1)\nsf <- as(qt, \"sf\")\nsr <- as(qt, \"SpatRaster\")\nsv <- as(qt, \"SpatVector\")\nch <- as(qt, \"character\")\n\n\n"} {"package":"quadtree","topic":"as_data_frame","snippet":"### Name: as_data_frame\n### Title: Convert a 'Quadtree' to a data frame\n### Aliases: as_data_frame as_data_frame,Quadtree-method\n\n### ** Examples\n\nlibrary(quadtree)\n\nmat <- rbind(c(1, 1, 0, 1),\n c(1, 1, 1, 0),\n c(1, 0, 1, 1),\n c(0, 1, 1, 1))\nqt <- quadtree(mat, .1)\nplot(qt)\nas_data_frame(qt)\n\n\n"} {"package":"quadtree","topic":"as_raster","snippet":"### Name: as_raster\n### Title: Create a raster from a 'Quadtree'\n### Aliases: as_raster as_raster,Quadtree-method\n\n### ** Examples\n\nlibrary(quadtree)\nhabitat <- terra::rast(system.file(\"extdata\", \"habitat.tif\", package=\"quadtree\"))\n\n# create a quadtree\nqt <- quadtree(habitat, split_threshold = .1, split_method = \"sd\")\n\nrst1 <- as_raster(qt) # use the default raster\nrst2 <- as_raster(qt, habitat) # use another raster as a template\n\nold_par <- par(mfrow = c(2, 2))\nplot(habitat, main = \"original raster\")\nplot(qt, main = \"quadtree\")\nplot(rst1, main = \"raster from quadtree\")\nplot(rst2, main = \"raster from quadtree\")\npar(old_par)\n\n\n"} {"package":"quadtree","topic":"as_vector","snippet":"### Name: as_vector\n### Title: Get all 'Quadtree' cell values as a vector\n### Aliases: as_vector as_vector,Quadtree-method\n\n### ** Examples\n\nlibrary(quadtree)\nhabitat <- terra::rast(system.file(\"extdata\", \"habitat.tif\", package=\"quadtree\"))\n\nqt <- quadtree(habitat, .2)\nhead(as_vector(qt), 20)\nhead(as_vector(qt, FALSE), 20)\n\n\n"} {"package":"quadtree","topic":"copy","snippet":"### Name: copy\n### Title: Create a deep copy of a 'Quadtree'\n### Aliases: copy copy,Quadtree-method copy.Quadtree\n\n### ** Examples\n\nlibrary(quadtree)\nhabitat <- terra::rast(system.file(\"extdata\", \"habitat.tif\", package=\"quadtree\"))\n\n# create a quadtree, then create a shallow copy and a deep copy\nqt1 <- quadtree(habitat, split_threshold = .1)\nplot(qt1)\n\nqt2 <- qt1 # SHALLOW copy\nqt3 <- copy(qt1) # DEEP copy\n\n# change the values of qt1 so we can observe how this affects qt2 and qt3\ntransform_values(qt1, function(x) 1 - x)\n\n# plot it out to see what happened\nold_par <- par(mfrow = c(1, 3))\nplot(qt1, main = \"qt1\", border_col = \"transparent\")\nplot(qt2, main = \"qt2\", border_col = \"transparent\")\nplot(qt3, main = \"qt3\", border_col = \"transparent\")\npar(old_par)\n# qt2 was modified but qt3 was not\n\n\n"} {"package":"quadtree","topic":"extent","snippet":"### Name: extent\n### Title: Get the extent of a 'Quadtree'\n### Aliases: extent extent,Quadtree-method extent.Quadtree\n\n### ** Examples\n\nlibrary(quadtree)\nhabitat <- terra::rast(system.file(\"extdata\", \"habitat.tif\", package=\"quadtree\"))\n\n# create a quadtree\nqt <- quadtree(habitat, split_threshold = .1, adj_type = \"expand\")\n\n# retrieve the extent and the original extent\next <- extent(qt)\next_orig <- extent(qt, original = TRUE)\n\next\next_orig\n\n# plot them\nplot(qt)\nrect(ext[1], ext[3], ext[2], ext[4], border = \"blue\", lwd = 4)\nrect(ext_orig[1], ext_orig[3], ext_orig[2], ext_orig[4],\n border = \"red\", lwd = 4)\n\n\n"} {"package":"quadtree","topic":"extract","snippet":"### Name: extract\n### Title: Extract 'Quadtree' values\n### Aliases: extract extract,Quadtree,ANY-method extract.Quadtree\n\n### ** Examples\n\nlibrary(quadtree)\nhabitat <- terra::rast(system.file(\"extdata\", \"habitat.tif\", package=\"quadtree\"))\n\n# create quadtree\nqt1 <- quadtree(habitat, split_threshold = .1, adj_type = \"expand\")\nplot(qt1)\n\n# create points at which we'll extract values\ncoords <- seq(-1000, 40010, length.out = 10)\npts <- cbind(coords,coords)\n\n# extract the cell values\nvals <- extract(qt1, pts)\n\n# plot the quadtree and the points\nplot(qt1, border_col = \"gray50\", border_lwd = .4)\npoints(pts, pch = 16, cex = .6)\ntext(pts, labels = round(vals, 2), pos = 4)\n\n# we can also extract the cell extents in addition to the values\nextract(qt1, pts, extents = TRUE)\n\n\n"} {"package":"quadtree","topic":"find_lcp","snippet":"### Name: find_lcp\n### Title: Find the LCP between two points on a 'Quadtree'\n### Aliases: find_lcp find_lcp,Quadtree-method find_lcp.Quadtree\n### find_lcp,LcpFinder-method find_lcp.LcpFinder\n\n### ** Examples\n\n####### NOTE #######\n# see the \"quadtree-lcp\" vignette for more details and examples:\n# vignette(\"quadtree-lcp\", package = \"quadtree\")\n####################\n\nlibrary(quadtree)\nhabitat <- terra::rast(system.file(\"extdata\", \"habitat.tif\", package=\"quadtree\"))\n\n# create a quadtree\nqt <- quadtree(habitat, split_threshold = .1, adj_type = \"expand\")\nplot(qt, crop = TRUE, na_col = NULL, border_lwd = .4)\n\n# define our start and end points\nstart_pt <- c(6989, 34007)\nend_pt <- c(33015, 38162)\n\n# create the LCP finder object and find the LCP\nlcpf <- lcp_finder(qt, start_pt)\npath <- find_lcp(lcpf, end_pt)\n\n# plot the LCP\nplot(qt, crop = TRUE, na_col = NULL, border_col = \"gray30\", border_lwd = .4)\npoints(rbind(start_pt, end_pt), pch = 16, col = \"red\")\nlines(path[, 1:2], col = \"black\")\n\n# note that the above path can also be found as follows:\npath <- find_lcp(qt, start_pt, end_pt)\n\n\n"} {"package":"quadtree","topic":"find_lcps","snippet":"### Name: find_lcps\n### Title: Find LCPs to surrounding points\n### Aliases: find_lcps find_lcps,LcpFinder-method\n\n### ** Examples\n\n####### NOTE #######\n# see the \"quadtree-lcp\" vignette for more details and examples:\n# vignette(\"quadtree-lcp\", package = \"quadtree\")\n####################\n\nlibrary(quadtree)\nhabitat <- terra::rast(system.file(\"extdata\", \"habitat.tif\", package=\"quadtree\"))\n\nqt <- quadtree(habitat, split_threshold = .1, adj_type = \"expand\")\n\nstart_pt <- c(19000, 25000)\n\n# finds LCPs to all cells\nlcpf1 <- lcp_finder(qt, start_pt)\npaths1 <- find_lcps(lcpf1, limit = NULL)\n\n# limit LCPs by cost-distance\nlcpf2 <- lcp_finder(qt, start_pt)\npaths2 <- find_lcps(lcpf2, limit = 5000)\n\n# Now plot the reachable cells\nplot(qt, crop = TRUE, na_col = NULL, border_lwd = .3)\npoints(lcpf1, col = \"black\", pch = 16, cex = 1)\npoints(lcpf2, col = \"red\", pch = 16, cex = .7)\npoints(start_pt[1], start_pt[2], bg = \"skyblue\", col = \"black\", pch = 24,\n cex = 1.5)\n\n\n"} {"package":"quadtree","topic":"get_neighbors","snippet":"### Name: get_neighbors\n### Title: Get the neighbors of a 'Quadtree' cell\n### Aliases: get_neighbors get_neighbors,Quadtree,numeric-method\n\n### ** Examples\n\nlibrary(quadtree)\nhabitat <- terra::rast(system.file(\"extdata\", \"habitat.tif\", package=\"quadtree\"))\n\n# create a quadtree\nqt <- quadtree(habitat, split_threshold = .1, adj_type = \"expand\")\n\n# get the cell's neighbors\npt <- c(27000, 10000)\nnbs <- get_neighbors(qt, pt)\n\n# plot the neighbors\nplot(qt, border_lwd = .3)\npoints(pt[1], pt[2], col = \"black\", bg = \"lightblue\", pch = 21)\nwith(data.frame(nbs),\n rect(xmin, ymin, xmax, ymax, col = \"red\", border = \"black\", lwd = 2))\n\n\n"} {"package":"quadtree","topic":"lcp_finder","snippet":"### Name: lcp_finder\n### Title: Create a 'LcpFinder'\n### Aliases: lcp_finder lcp_finder,Quadtree-method\n\n### ** Examples\n\n####### NOTE #######\n# see the \"quadtree-lcp\" vignette for more details and examples:\n# vignette(\"quadtree-lcp\", package = \"quadtree\")\n####################\n\nlibrary(quadtree)\n\nhabitat <- terra::rast(system.file(\"extdata\", \"habitat.tif\", package=\"quadtree\"))\nqt <- quadtree(habitat, split_threshold = .1, adj_type = \"expand\")\n\n# find the LCP between two points\nstart_pt <- c(6989, 34007)\nend_pt <- c(33015, 38162)\n\n# create the LCP finder object and find the LCP\nlcpf <- lcp_finder(qt, start_pt)\npath <- find_lcp(lcpf, end_pt)\n\n# plot the LCP\nplot(qt, crop = TRUE, na_col = NULL, border_lwd = .3)\npoints(rbind(start_pt, end_pt), pch = 16, col = \"red\")\nlines(path[, 1:2], col = \"black\")\n\n\n\n\n"} {"package":"quadtree","topic":"n_cells","snippet":"### Name: n_cells\n### Title: Get the number of cells in a 'Quadtree'\n### Aliases: n_cells n_cells,Quadtree-method\n\n### ** Examples\n\nlibrary(quadtree)\nhabitat <- terra::rast(system.file(\"extdata\", \"habitat.tif\", package=\"quadtree\"))\n\nqt <- quadtree(habitat, .1)\nn_cells(qt)\nn_cells(qt, terminal_only = FALSE)\n\n\n"} {"package":"quadtree","topic":"plot.LcpFinder","snippet":"### Name: plot.LcpFinder\n### Title: Plot a 'LcpFinder' object\n### Aliases: plot.LcpFinder points.LcpFinder points,LcpFinder-method\n### lines.LcpFinder lines,LcpFinder-method\n\n### ** Examples\n\nlibrary(quadtree)\nhabitat <- terra::rast(system.file(\"extdata\", \"habitat.tif\", package=\"quadtree\"))\n\nqt <- quadtree(habitat, .1)\n\nstart_point <- c(6989, 34007)\nend_point <- c(12558, 27602)\nlcpf <- lcp_finder(qt, start_point)\nlcp <- find_lcp(lcpf, end_point)\n\nplot(qt, crop = TRUE, border_lwd = .3, na_col = NULL)\npoints(lcpf, col = \"red\", pch = 16, cex = .4)\nlines(lcpf)\n\n\n"} {"package":"quadtree","topic":"plot","snippet":"### Name: plot\n### Title: Plot a 'Quadtree'\n### Aliases: plot plot,Quadtree,missing-method plot.Quadtree\n\n### ** Examples\n\nlibrary(quadtree)\nhabitat <- terra::rast(system.file(\"extdata\", \"habitat.tif\", package=\"quadtree\"))\n\n# create quadtree\nqt <- quadtree(habitat, split_threshold = .1, adj_type = \"expand\")\n\n#####################################\n# DEFAULT\n#####################################\n\n# default - no additional parameters provided\nplot(qt)\n\n#####################################\n# CHANGE PLOT EXTENT\n#####################################\n\n# note that additional parameters like 'main', 'xlab', 'ylab', etc. will be\n# passed to the default 'plot()' function\n\n# crop extent to the original extent of the raster\nplot(qt, crop = TRUE, main = \"cropped\")\n\n# crop and don't plot NA cells\nplot(qt, crop = TRUE, na_col = NULL, main = \"cropped\")\n\n# use 'xlim' and 'ylim' to zoom in on an area\nplot(qt, xlim = c(10000, 20000), ylim = c(20000, 30000), main = \"zoomed in\")\n\n#####################################\n# COLORS AND BORDERS\n#####################################\n\n# change border color and width\nplot(qt, border_col = \"transparent\") # no borders\nplot(qt, border_col = \"gray60\") # gray borders\nplot(qt, border_lwd = .3) # change line thickness of borders\n\n# change color palette\nplot(qt, col = c(\"blue\", \"yellow\", \"red\"))\nplot(qt, col = hcl.colors(100))\nplot(qt, col = c(\"black\", \"white\"))\n\n# change color transparency\nplot(qt, alpha = .5)\nplot(qt, col = c(\"blue\", \"yellow\", \"red\"), alpha = .5)\n\n# change color of NA cells\nplot(qt, na_col = \"lavender\")\n\n# don't plot NA cells at all\nplot(qt, na_col = NULL)\n\n# change 'zlim'\nplot(qt, zlim = c(0, 5))\nplot(qt, zlim = c(.2, .7))\n\n#####################################\n# SHOW NEIGHBOR CONNECTIONS\n#####################################\n\n# plot all neighbor connections\nplot(qt, nb_line_col = \"black\", border_col = \"gray60\")\n\n# don't plot connections to NA cells\nplot(qt, nb_line_col = \"black\", border_col = \"gray60\", na_col = NULL)\n\n#####################################\n# LEGEND\n#####################################\n\n# no legend\nplot(qt, legend = FALSE)\n\n# increase right margin size\nplot(qt, adj_mar_auto = 10)\n\n# use 'legend_args' to customize the legend\nplot(qt, adj_mar_auto = 10,\n legend_args = list(lgd_ht_pct = .8, bar_wd_pct = .4))\n\n\n"} {"package":"quadtree","topic":"projection","snippet":"### Name: projection\n### Title: Retrieve the projection of a 'Quadtree'\n### Aliases: projection projection,Quadtree-method projection<-\n### projection<-,Quadtree,ANY-method projection<-,Quadtree-method\n\n### ** Examples\n\nlibrary(quadtree)\nhabitat <- terra::rast(system.file(\"extdata\", \"habitat.tif\", package=\"quadtree\"))\n\nqt <- quadtree(habitat, .1)\nquadtree::projection(qt) <- \"+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs\"\nquadtree::projection(qt)\n\n\n"} {"package":"quadtree","topic":"quadtree","snippet":"### Name: quadtree\n### Title: Create a 'Quadtree' from a raster or matrix\n### Aliases: quadtree quadtree,ANY-method\n\n### ** Examples\n\n####### NOTE #######\n# see the \"quadtree-creation\" vignette for more details and examples of all\n# the different parameter options:\n# vignette(\"quadtree-creation\", package = \"quadtree\")\n####################\n\nlibrary(quadtree)\nhabitat <- terra::rast(system.file(\"extdata\", \"habitat.tif\", package=\"quadtree\"))\n\nqt <- quadtree(habitat, .15)\nplot(qt)\n# we can make it look nicer by customizing the plotting parameters\nplot(qt, crop = TRUE, na_col = NULL, border_lwd = .3)\n\n# try a different splitting method\nqt <- quadtree(habitat, .05, \"sd\")\nplot(qt)\n\n# ---- using a custom split function ----\n\n# split a cell if any of the values are below a given value\nsplit_fun = function(vals, args) {\n if (any(is.na(vals))) { # check for NAs first\n return(TRUE) # if there are any NAs we'll split automatically\n } else {\n return(any(vals < args$threshold))\n }\n}\n\nqt <- quadtree(habitat, split_method = \"custom\", split_fun = split_fun,\n split_args = list(threshold = .8))\nplot(qt)\n\n\n"} {"package":"quadtree","topic":"read_quadtree","snippet":"### Name: read_quadtree\n### Title: Read/write a 'Quadtree'\n### Aliases: read_quadtree write_quadtree read_quadtree,character-method\n### write_quadtree,character-method\n### write_quadtree,character,Quadtree-method\n\n### ** Examples\n\nlibrary(quadtree)\nhabitat <- terra::rast(system.file(\"extdata\", \"habitat.tif\", package=\"quadtree\"))\n\nqt <- quadtree(habitat, .1)\n\npath <- tempfile(fileext = \"qtree\")\nwrite_quadtree(path, qt)\nqt2 <- read_quadtree(path)\n\n\n"} {"package":"quadtree","topic":"set_values","snippet":"### Name: set_values\n### Title: Change values of 'Quadtree' cells\n### Aliases: set_values set_values,Quadtree,ANY,numeric-method\n\n### ** Examples\n\nlibrary(quadtree)\nhabitat <- terra::rast(system.file(\"extdata\", \"habitat.tif\", package=\"quadtree\"))\n\n# create a quadtree\nqt <- quadtree(habitat, split_threshold = .1)\n\n# generate some random points, then change the values at those points\next <- extent(qt)\npts <- cbind(runif(100, ext[1], ext[2]), runif(100, ext[3], ext[4]))\nset_values(qt, pts, rep(10, 100))\n\n# plot it out to see what happened\nold_par <- par(mfrow = c(1, 2))\nplot(qt, main = \"original\")\nplot(qt, main = \"after modification\")\npar(old_par)\n\n\n"} {"package":"quadtree","topic":"summarize_lcps","snippet":"### Name: summarize_lcps\n### Title: Get a matrix summarizing all LCPs found by a 'LcpFinder'\n### Aliases: summarize_lcps summarize_lcps,LcpFinder-method\n\n### ** Examples\n\nlibrary(quadtree)\nhabitat <- terra::rast(system.file(\"extdata\", \"habitat.tif\", package=\"quadtree\"))\n\nqt <- quadtree(habitat, split_threshold = .1, adj_type = \"expand\")\n\nstart_pt <- c(19000, 25000)\nend_pt <- c(33015, 38162)\n\n# find LCP from 'start_pt' to 'end_pt'\nlcpf <- lcp_finder(qt, start_pt)\nlcp <- find_lcp(lcpf, end_pt)\n\n# retrieve ALL the paths that have been calculated\npaths <- summarize_lcps(lcpf)\nhead(paths)\n\n\n"} {"package":"quadtree","topic":"summary.LcpFinder","snippet":"### Name: summary.LcpFinder\n### Title: Show a summary of a 'LcpFinder'\n### Aliases: summary.LcpFinder summary,LcpFinder-method show.LcpFinder\n### show,LcpFinder-method\n\n### ** Examples\n\nlibrary(quadtree)\nhabitat <- terra::rast(system.file(\"extdata\", \"habitat.tif\", package=\"quadtree\"))\n\nqt <- quadtree(habitat, .1)\n\nstart_point <- c(6989, 34007)\nend_point <- c(33015, 38162)\n\nlcpf <- lcp_finder(qt, start_point)\nlcp <- find_lcp(lcpf, end_point)\n\nsummary(lcpf)\n\n\n"} {"package":"quadtree","topic":"summary.Quadtree","snippet":"### Name: summary.Quadtree\n### Title: Show a summary of a 'Quadtree'\n### Aliases: summary.Quadtree summary,Quadtree-method show.Quadtree\n### show,Quadtree-method\n\n### ** Examples\n\nlibrary(quadtree)\nhabitat <- terra::rast(system.file(\"extdata\", \"habitat.tif\", package=\"quadtree\"))\n\nqt <- quadtree(habitat, .1)\nsummary(qt)\n\n\n"} {"package":"quadtree","topic":"transform_values","snippet":"### Name: transform_values\n### Title: Transform the values of all 'Quadtree' cells\n### Aliases: transform_values transform_values,Quadtree,function-method\n\n### ** Examples\n\nlibrary(quadtree)\nhabitat <- terra::rast(system.file(\"extdata\", \"habitat.tif\", package=\"quadtree\"))\n\n# create a quadtree\nqt1 <- quadtree(habitat, split_threshold = .1)\n\n# copy the quadtree so that we have a copy of the original (since using\n#'transform_values' modifies the quadtree object)\nqt2 <- copy(qt1)\nqt3 <- copy(qt1)\nqt4 <- copy(qt1)\n\ntransform_values(qt2, function(x) 1 - x)\ntransform_values(qt3, function(x) x^3)\ntransform_values(qt4, function(x) {\n if (is.na(x)) return(NA) # make sure to handle NA's\n if (x < .7) return(0)\n return(1)\n})\n\nold_par <- par(mfrow = c(2, 2))\nplot(qt1, main = \"original\", crop = TRUE, na_col = NULL,\n border_lwd = .3, zlim = c(0, 1))\nplot(qt2, main = \"1 - value\", crop = TRUE, na_col = NULL,\n border_lwd = .3, zlim = c(0, 1))\nplot(qt3, main = \"values cubed\", crop = TRUE, na_col = NULL,\n border_lwd = .3, zlim = c(0, 1))\nplot(qt4, main = \"values converted to 0/1\", crop = TRUE, na_col = NULL,\n border_lwd = .3, zlim = c(0, 1))\npar(old_par)\n\n\n"} {"package":"quadtree","topic":"write_quadtree_ptr","snippet":"### Name: write_quadtree_ptr\n### Title: Read/write a 'Quadtree'\n### Aliases: write_quadtree_ptr\n### write_quadtree_ptr,character,Quadtree-method\n\n### ** Examples\n\nlibrary(quadtree)\nhabitat <- terra::rast(system.file(\"extdata\", \"habitat.tif\", package=\"quadtree\"))\n\nqt <- quadtree(habitat, .1)\n\npath <- tempfile(fileext = \"qtree\")\nwrite_quadtree_ptr(path, qt)\n\n\n"} {"package":"texteffect","topic":"infer_Z","snippet":"### Name: infer_Z\n### Title: Infer Treatments on the Test Set\n### Aliases: infer_Z\n\n### ** Examples\n\n##Load the Wikipedia biography data\ndata(BioSample)\n\n# Divide into training and test sets\nY <- BioSample[,1]\nX <- BioSample[,-1]\nset.seed(1)\ntrain.ind <- sample(1:nrow(X), size = 0.5*nrow(X), replace = FALSE)\n\n# Fit an sIBP on the training data\nsibp.fit <- sibp(X, Y, K = 2, alpha = 4, sigmasq.n = 0.8, \n\t\t\t\t train.ind = train.ind)\n\n# Infer the latent treatments in the test set\ninfer_Z(sibp.fit, X)\n\n\n"} {"package":"texteffect","topic":"sibp","snippet":"### Name: sibp\n### Title: Supervised Indian Buffet Process (sibp) for Discovering\n### Treatments\n### Aliases: sibp\n\n### ** Examples\n\n##Load the Wikipedia biography data\ndata(BioSample)\n\n# Divide into training and test sets\nY <- BioSample[,1]\nX <- BioSample[,-1]\nset.seed(1)\ntrain.ind <- sample(1:nrow(X), size = 0.5*nrow(X), replace = FALSE)\n\n# Search sIBP for several parameter configurations; fit each to the training set\nsibp.search <- sibp_param_search(X, Y, K = 2, alphas = c(2,4), sigmasq.ns = c(0.8, 1), \n\t\t\t\t\t\t\t\t iters = 1, train.ind = train.ind)\n\t\t\t\t\t\t\t\t \n## Not run: \n##D # Get metric for evaluating most promising parameter configurations\n##D sibp_rank_runs(sibp.search, X, 10)\n##D \n##D # Qualitatively look at the top candidates\n##D sibp_top_words(sibp.search[[\"4\"]][[\"0.8\"]][[1]], colnames(X), 10, verbose = TRUE)\n##D sibp_top_words(sibp.search[[\"4\"]][[\"1\"]][[1]], colnames(X), 10, verbose = TRUE)\n##D \n##D # Select the most interest treatments to investigate\n##D sibp.fit <- sibp.search[[\"4\"]][[\"0.8\"]][[1]]\n##D \n##D # Estimate the AMCE using the test set\n##D amce<-sibp_amce(sibp.fit, X, Y)\n##D # Plot 95% confidence intervals for the AMCE of each treatment\n##D sibp_amce_plot(amce)\n## End(Not run)\n\n\n"} {"package":"texteffect","topic":"sibp_amce","snippet":"### Name: sibp_amce\n### Title: Infer Treatments on the Test Set\n### Aliases: sibp_amce sibp_amce_plot\n\n### ** Examples\n\n##Load the sample of Wikipedia biography data\ndata(BioSample)\n\n# Divide into training and test sets\nY <- BioSample[,1]\nX <- BioSample[,-1]\nset.seed(1)\ntrain.ind <- sample(1:nrow(X), size = 0.5*nrow(X), replace = FALSE)\n\n# Fit an sIBP on the training data\nsibp.fit <- sibp(X, Y, K = 2, alpha = 4, sigmasq.n = 0.8, \n\t\t\t\t train.ind = train.ind)\n\t\t\t\t \nsibp.amce <- sibp_amce(sibp.fit, X, Y)\nsibp_amce_plot(sibp.amce)\n\n\n"} {"package":"texteffect","topic":"sibp_exclusivity","snippet":"### Name: sibp_exclusivity\n### Title: Calculate Exclusivity Metric\n### Aliases: sibp_exclusivity sibp_rank_runs\n\n### ** Examples\n\n##Load the sample of Wikipedia biography data\ndata(BioSample)\n\n# Divide into training and test sets\nY <- BioSample[,1]\nX <- BioSample[,-1]\nset.seed(1)\ntrain.ind <- sample(1:nrow(X), size = 0.5*nrow(X), replace = FALSE)\n\n# Search sIBP for several parameter configurations; fit each to the training set\nsibp.search <- sibp_param_search(X, Y, K = 2, alphas = c(2,4),\n sigmasq.ns = c(0.8, 1), iters = 1,\n\t\t\t\t\t\t\t train.ind = train.ind)\n# Get metric for evaluating most promising parameter configurations\nsibp_rank_runs(sibp.search, X, 10)\n\n\n"} {"package":"texteffect","topic":"sibp_param_search","snippet":"### Name: sibp_param_search\n### Title: Search Parameter Configurations for Supervised Indian Buffet\n### Process (sibp)\n### Aliases: sibp_param_search\n\n### ** Examples\n\n##Load the sample of Wikipedia biography data\ndata(BioSample)\n\n# Divide into training and test sets\nY <- BioSample[,1]\nX <- BioSample[,-1]\nset.seed(1)\ntrain.ind <- sample(1:nrow(X), size = 0.5*nrow(X), replace = FALSE)\n\n# Search sIBP for several parameter configurations; fit each to the training set\nsibp.search <- sibp_param_search(X, Y, K = 2, alphas = c(2,4),\n sigmasq.ns = c(0.8, 1), iters = 1,\n\t\t\t\t\t\t\t train.ind = train.ind)\n\t\t\t\t\t\t\t\t \n## Not run: \n##D # Get metric for evaluating most promising parameter configurations\n##D sibp_rank_runs(sibp.search, X, 10)\n##D \n##D # Qualitatively look at the top candidates\n##D sibp_top_words(sibp.search[[\"4\"]][[\"0.8\"]][[1]], colnames(X), 10, verbose = TRUE)\n##D sibp_top_words(sibp.search[[\"4\"]][[\"1\"]][[1]], colnames(X), 10, verbose = TRUE)\n## End(Not run)\n\n\n"} {"package":"texteffect","topic":"sibp_top_words","snippet":"### Name: sibp_top_words\n### Title: Report Words Most Associated with each Treatment\n### Aliases: sibp_top_words\n\n### ** Examples\n\n##Load the Wikipedia biography data\ndata(BioSample)\n\n# Divide into training and test sets\nY <- BioSample[,1]\nX <- BioSample[,-1]\nset.seed(1)\ntrain.ind <- sample(1:nrow(X), size = 0.5*nrow(X), replace = FALSE)\n\n# Fit an sIBP on the training data\nsibp.fit <- sibp(X, Y, K = 2, alpha = 4, sigmasq.n = 0.8, \n\t\t\t\t train.ind = train.ind)\n\nsibp_top_words(sibp.fit, colnames(X))\n\n\n"} {"package":"countTransformers","topic":"es","snippet":"### Name: es\n### Title: A Simulated Data Set\n### Aliases: es\n### Keywords: datasets\n\n### ** Examples\n\nlibrary(Biobase)\n\ndata(es)\nprint(es)\n\n# expression set\nex = exprs(es)\nprint(dim(ex))\nprint(ex[1:3,1:2])\n\n# phenotype data\npDat = pData(es)\nprint(dim(pDat))\nprint(pDat[1:2,])\n\n# feature data\nfDat = fData(es)\nprint(dim(fDat))\nprint(fDat[1:2,])\n\n\n\n"} {"package":"countTransformers","topic":"getJaccard","snippet":"### Name: getJaccard\n### Title: Calculate Jaccard Index for Two Binary Vectors\n### Aliases: getJaccard\n### Keywords: method\n\n### ** Examples\n\n n = 10\n set.seed(1234567)\n\n # generate two random binary vector of size n\n cl1 = sample(c(1,0), size = n, prob = c(0.5, 0.5), replace = TRUE)\n cl2 = sample(c(1,0), size = n, prob = c(0.5, 0.5), replace = TRUE)\n cat(\"\\n2x2 contingency table >>\\n\")\n print(table(cl1, cl2))\n\n JI = getJaccard(cl1, cl2)\n cat(\"Jaccard index = \", JI, \"\\n\")\n\n\n\n\n"} {"package":"countTransformers","topic":"l2Transformer","snippet":"### Name: l2Transformer\n### Title: Log Based Count Transformation Minimizing Sum of Sample-Specific\n### Squared Difference\n### Aliases: l2Transformer\n### Keywords: method\n\n### ** Examples\n\nlibrary(Biobase)\n\ndata(es)\nprint(es)\n\n# expression set\nex = exprs(es)\nprint(dim(ex))\nprint(ex[1:3,1:2])\n\n# mean-median before transformation\nvec = c(ex)\nm = mean(vec)\nmd = median(vec)\ndiff = m - md\ncat(\"m=\", m, \", md=\", md, \", diff=\", diff, \"\\n\")\n\nres = l2Transformer(mat = ex)\n\n# estimated model parameter\nprint(res$delta)\n\n# mean-median after transformation\nvec2 = c(res$mat2)\nm2 = mean(vec2)\nmd2 = median(vec2)\ndiff2 = m2 - md2\ncat(\"m2=\", m2, \", md2=\", md2, \", diff2=\", diff2, \"\\n\")\n\n\n\n\n"} {"package":"countTransformers","topic":"lTransformer","snippet":"### Name: lTransformer\n### Title: Log-based transformation\n### Aliases: lTransformer\n### Keywords: method\n\n### ** Examples\n\nlibrary(Biobase)\n\ndata(es)\nprint(es)\n\n# expression set\nex = exprs(es)\nprint(dim(ex))\nprint(ex[1:3,1:2])\n\n# mean-median before transformation\nvec = c(ex)\nm = mean(vec)\nmd = median(vec)\ndiff = m - md\ncat(\"m=\", m, \", md=\", md, \", diff=\", diff, \"\\n\")\n\nres = lTransformer(mat = ex)\n\n# estimated model parameter\nprint(res$delta)\n\n# mean-median after transformation\nvec2 = c(res$mat2)\nm2 = mean(vec2)\nmd2 = median(vec2)\ndiff2 = m2 - md2\ncat(\"m2=\", m2, \", md2=\", md2, \", diff2=\", diff2, \"\\n\")\n\n\n"} {"package":"countTransformers","topic":"lv2Transformer","snippet":"### Name: lv2Transformer\n### Title: Log and VOOM Based Count Transformation Minimizing Sum of\n### Sample-Specific Squared Difference\n### Aliases: lv2Transformer\n### Keywords: method\n\n### ** Examples\n\nlibrary(Biobase)\n\ndata(es)\nprint(es)\n\n# expression set\nex = exprs(es)\nprint(dim(ex))\nprint(ex[1:3,1:2])\n\n# mean-median before transformation\nvec = c(ex)\nm = mean(vec)\nmd = median(vec)\ndiff = m - md\ncat(\"m=\", m, \", md=\", md, \", diff=\", diff, \"\\n\")\n\nres = lv2Transformer(mat = ex)\n\n# estimated model parameter\nprint(res$delta)\n\n# mean-median after transformation\nvec2 = c(res$mat2)\nm2 = mean(vec2)\nmd2 = median(vec2)\ndiff2 = m2 - md2\ncat(\"m2=\", m2, \", md2=\", md2, \", diff2=\", diff2, \"\\n\")\n\n\n"} {"package":"countTransformers","topic":"lvTransformer","snippet":"### Name: lvTransformer\n### Title: Log and VOOM Transformation\n### Aliases: lvTransformer\n### Keywords: method\n\n### ** Examples\n\nlibrary(Biobase)\n\ndata(es)\nprint(es)\n\n# expression set\nex = exprs(es)\nprint(dim(ex))\nprint(ex[1:3,1:2])\n\n# mean-median before transformation\nvec = c(ex)\nm = mean(vec)\nmd = median(vec)\ndiff = m - md\ncat(\"m=\", m, \", md=\", md, \", diff=\", diff, \"\\n\")\n\nres = lvTransformer(mat = ex)\n\n# estimated model parameter\nprint(res$delta)\n\n# mean-median after transformation\nvec2 = c(res$mat2)\nm2 = mean(vec2)\nmd2 = median(vec2)\ndiff2 = m2 - md2\ncat(\"m2=\", m2, \", md2=\", md2, \", diff2=\", diff2, \"\\n\")\n\n\n"} {"package":"countTransformers","topic":"r2Transformer","snippet":"### Name: r2Transformer\n### Title: Root Based Count Transformation Minimizing Sum of\n### Sample-Specific Squared Difference\n### Aliases: r2Transformer\n### Keywords: method\n\n### ** Examples\n\nlibrary(Biobase)\n\ndata(es)\nprint(es)\n\n# expression set\nex = exprs(es)\nprint(dim(ex))\nprint(ex[1:3,1:2])\n\n# mean-median before transformation\nvec = c(ex)\nm = mean(vec)\nmd = median(vec)\ndiff = m - md\ncat(\"m=\", m, \", md=\", md, \", diff=\", diff, \"\\n\")\n\nres = r2Transformer(mat = ex)\n\n# estimated model parameter\nprint(res$eta)\n\n# mean-median after transformation\nvec2 = c(res$mat2)\nm2 = mean(vec2)\nmd2 = median(vec2)\ndiff2 = m2 - md2\ncat(\"m2=\", m2, \", md2=\", md2, \", diff2=\", diff2, \"\\n\")\n\n\n"} {"package":"countTransformers","topic":"rTransformer","snippet":"### Name: rTransformer\n### Title: Root Based Transformation\n### Aliases: rTransformer\n### Keywords: method\n\n### ** Examples\n\nlibrary(Biobase)\n\ndata(es)\nprint(es)\n\n# expression set\nex = exprs(es)\nprint(dim(ex))\nprint(ex[1:3,1:2])\n\n# mean-median before transformation\nvec = c(ex)\nm = mean(vec)\nmd = median(vec)\ndiff = m - md\ncat(\"m=\", m, \", md=\", md, \", diff=\", diff, \"\\n\")\n\nres = rTransformer(mat = ex)\n\n# estimated model parameter\nprint(res$eta)\n\n# mean-median after transformation\nvec2 = c(res$mat2)\nm2 = mean(vec2)\nmd2 = median(vec2)\ndiff2 = m2 - md2\ncat(\"m2=\", m2, \", md2=\", md2, \", diff2=\", diff2, \"\\n\")\n\n\n"} {"package":"countTransformers","topic":"rv2Transformer","snippet":"### Name: rv2Transformer\n### Title: Root and VOOM Based Count Transformation Minimizing Sum of\n### Sample-Specific Squared Difference\n### Aliases: rv2Transformer\n### Keywords: method\n\n### ** Examples\n\nlibrary(Biobase)\n\ndata(es)\nprint(es)\n\n# expression set\nex = exprs(es)\nprint(dim(ex))\nprint(ex[1:3,1:2])\n\n# mean-median before transformation\nvec = c(ex)\nm = mean(vec)\nmd = median(vec)\ndiff = m - md\ncat(\"m=\", m, \", md=\", md, \", diff=\", diff, \"\\n\")\n\nres = rv2Transformer(mat = ex)\n\n# estimated model parameter\nprint(res$eta)\n\n# mean-median after transformation\nvec2 = c(res$mat2)\nm2 = mean(vec2)\nmd2 = median(vec2)\ndiff2 = m2 - md2\ncat(\"m2=\", m2, \", md2=\", md2, \", diff2=\", diff2, \"\\n\")\n\n\n"} {"package":"countTransformers","topic":"rvTransformer","snippet":"### Name: rvTransformer\n### Title: Root and VOOM Transformation\n### Aliases: rvTransformer\n### Keywords: method\n\n### ** Examples\n\nlibrary(Biobase)\n\ndata(es)\nprint(es)\n\n# expression set\nex = exprs(es)\nprint(dim(ex))\nprint(ex[1:3,1:2])\n\n# mean-median before transformation\nvec = c(ex)\nm = mean(vec)\nmd = median(vec)\ndiff = m - md\ncat(\"m=\", m, \", md=\", md, \", diff=\", diff, \"\\n\")\n\nres = rvTransformer(mat = ex)\n\n# estimated model parameter\nprint(res$eta)\n\n# mean-median after transformation\nvec2 = c(res$mat2)\nm2 = mean(vec2)\nmd2 = median(vec2)\ndiff2 = m2 - md2\ncat(\"m2=\", m2, \", md2=\", md2, \", diff2=\", diff2, \"\\n\")\n\n\n"} {"package":"beans","topic":"beans","snippet":"### Name: beans\n### Title: Dry beans\n### Aliases: beans\n### Keywords: datasets\n\n### ** Examples\n\ndata(beans)\nstr(beans)\n\n\n"} {"package":"jrSiCKLSNMF","topic":"AddSickleJrMetadata","snippet":"### Name: AddSickleJrMetadata\n### Title: Add metadata to an object of class SickleJr\n### Aliases: AddSickleJrMetadata\n\n### ** Examples\n\nSimSickleJrSmall<-AddSickleJrMetadata(SimSickleJrSmall,\nSimData$cell_type,\"cell_types_full_data\")\n\n\n"} {"package":"jrSiCKLSNMF","topic":"BuildKNNGraphLaplacians","snippet":"### Name: BuildKNNGraphLaplacians\n### Title: Build KNN graphs and generate their graph Laplacians\n### Aliases: BuildKNNGraphLaplacians\n\n### ** Examples\n\nSimSickleJrSmall<-BuildKNNGraphLaplacians(SimSickleJrSmall)\n\n\n"} {"package":"jrSiCKLSNMF","topic":"BuildSNNGraphLaplacians","snippet":"### Name: BuildSNNGraphLaplacians\n### Title: Build SNN graphs and generate their graph Laplacians\n### Aliases: BuildSNNGraphLaplacians\n\n### ** Examples\n\nSimSickleJrSmall<-BuildSNNGraphLaplacians(SimSickleJrSmall)\n\n\n"} {"package":"jrSiCKLSNMF","topic":"CalculateUMAPSickleJr","snippet":"### Name: CalculateUMAPSickleJr\n### Title: Calculate the UMAP for an object of class SickleJr\n### Aliases: CalculateUMAPSickleJr\n\n### ** Examples\n\n#Since this example has only 10 observations,\n#we need to modify the number of neighbors from the default of 15\numap.settings=umap::umap.defaults\numap.settings$n_neighbors=2\nSimSickleJrSmall<-CalculateUMAPSickleJr(SimSickleJrSmall,\numap.settings=umap.settings)\nSimSickleJrSmall<-CalculateUMAPSickleJr(SimSickleJrSmall,\numap.settings=umap.settings,modality=1)\nSimSickleJrSmall<-CalculateUMAPSickleJr(SimSickleJrSmall,\numap.settings=umap.settings,modality=2)\n\n\n"} {"package":"jrSiCKLSNMF","topic":"ClusterSickleJr","snippet":"### Name: ClusterSickleJr\n### Title: Cluster the \\mathbf{H} matrix\n### Aliases: ClusterSickleJr\n\n### ** Examples\n\nSimSickleJrSmall<-ClusterSickleJr(SimSickleJrSmall,3)\nSimSickleJrSmall<-ClusterSickleJr(SimSickleJrSmall,method=\"louvain\",neighbors=5)\nSimSickleJrSmall<-ClusterSickleJr(SimSickleJrSmall,method=\"spectral\",neighbors=5,numclusts=3)\n#DO NOT DO THIS FOR REAL DATA; this is just to illustrate max clustering\nSimSickleJrSmall<-SetLambdasandRowReg(SimSickleJrSmall,rowReg=\"L2Norm\")\nSimSickleJrSmall<-ClusterSickleJr(SimSickleJrSmall,method=\"max\")\n\n\n"} {"package":"jrSiCKLSNMF","topic":"CreateSickleJr","snippet":"### Name: CreateSickleJr\n### Title: Create an object of class SickleJr\n### Aliases: CreateSickleJr\n\n### ** Examples\n\nExampleSickleJr<-CreateSickleJr(SimData$Xmatrices)\n\n\n"} {"package":"jrSiCKLSNMF","topic":"DetermineClusters","snippet":"### Name: DetermineClusters\n### Title: Perform clustering diagnostics\n### Aliases: DetermineClusters\n\n### ** Examples\n\n#Since these data are too small, the clValid diagnostics do not run\n#properly. See the vignette for an example with the clValid diagnostics\nSimSickleJrSmall<-DetermineClusters(SimSickleJrSmall,numclusts=2:5,runclValidDiagnostics=FALSE)\n\n\n"} {"package":"jrSiCKLSNMF","topic":"DetermineDFromIRLBA","snippet":"### Name: DetermineDFromIRLBA\n### Title: Create elbow plots of the singular values derived from IRLBA to\n### determine D for large datasets\n### Aliases: DetermineDFromIRLBA\n\n### ** Examples\n\nSimSickleJrSmall<-DetermineDFromIRLBA(SimSickleJrSmall,d=5)\n\n\n"} {"package":"jrSiCKLSNMF","topic":"GenerateWmatricesandHmatrix","snippet":"### Name: GenerateWmatricesandHmatrix\n### Title: Initialize the \\mathbf{W} matrices in each modality and the\n### shared \\mathbf{H} matrix\n### Aliases: GenerateWmatricesandHmatrix\n\n### ** Examples\n\nSimSickleJrSmall<-SetLambdasandRowReg(SimSickleJrSmall,\nlambdaWlist=list(10,50),lambdaH=500,rowReg=\"None\")\nSimSickleJrSmall<-GenerateWmatricesandHmatrix(SimSickleJrSmall,d=5,usesvd=TRUE)\n\n\n\n"} {"package":"jrSiCKLSNMF","topic":"MinibatchDiagnosticPlot","snippet":"### Name: MinibatchDiagnosticPlot\n### Title: Plot a diagnostic plot for the mini-batch algorithm\n### Aliases: MinibatchDiagnosticPlot\n\n### ** Examples\n\nSimSickleJrSmall<-MinibatchDiagnosticPlot(SimSickleJrSmall)\n\n\n"} {"package":"jrSiCKLSNMF","topic":"NormalizeCountMatrices","snippet":"### Name: NormalizeCountMatrices\n### Title: Normalize the count matrices and set whether to use the Poisson\n### KL divergence or the Frobenius norm\n### Aliases: NormalizeCountMatrices\n\n### ** Examples\n\nSimSickleJrSmall<-NormalizeCountMatrices(SimSickleJrSmall)\nSimSickleJrSmall<-NormalizeCountMatrices(SimSickleJrSmall, diffFunc=\"fr\",scaleFactor=1e6)\n\n\n"} {"package":"jrSiCKLSNMF","topic":"PlotLossvsLatentFactors","snippet":"### Name: PlotLossvsLatentFactors\n### Title: Create plots to help determine the number of latent factors\n### Aliases: PlotLossvsLatentFactors\n\n### ** Examples\n\nSimSickleJrSmall@latent.factor.elbow.values<-data.frame(NULL,NULL)\nSimSickleJrSmall<-PlotLossvsLatentFactors(SimSickleJrSmall,d_vector=c(2:5),\nrounds=5,parallel=FALSE)\n#Next, we commute 2 of these in parallel.\n## Not run: \n##D SimSickleJrSmall<-PlotLossvsLatentFactors(SimSickleJrSmall,\n##D d_vector=c(6:7),rounds=5,parallel=TRUE,nCores=2)\n## End(Not run)\n\n\n"} {"package":"jrSiCKLSNMF","topic":"PlotSickleJrUMAP","snippet":"### Name: PlotSickleJrUMAP\n### Title: Generate UMAP plots for an object of class SickleJr\n### Aliases: PlotSickleJrUMAP\n\n### ** Examples\n\nSimSickleJrSmall<-PlotSickleJrUMAP(SimSickleJrSmall,\ntitle=\"K-Means Example\")\nSimSickleJrSmall<-PlotSickleJrUMAP(SimSickleJrSmall,umap.modality=1)\n\n\n"} {"package":"jrSiCKLSNMF","topic":"RunjrSiCKLSNMF","snippet":"### Name: RunjrSiCKLSNMF\n### Title: Run jrSiCKLSNMF on an object of class SickleJr\n### Aliases: RunjrSiCKLSNMF\n\n### ** Examples\n\nSimSickleJrSmall<-RunjrSiCKLSNMF(SimSickleJrSmall,rounds=5)\n\n\n"} {"package":"jrSiCKLSNMF","topic":"SetLambdasandRowReg","snippet":"### Name: SetLambdasandRowReg\n### Title: Set lambda values and type of row regularization for an object\n### of class SickleJr\n### Aliases: SetLambdasandRowReg\n\n### ** Examples\n\nSimSickleJrSmall<-SetLambdasandRowReg(SimSickleJrSmall,\nlambdaWlist=list(10,50),lambdaH=500,rowReg=\"None\")\nSimSickleJrSmall<-SetLambdasandRowReg(SimSickleJrSmall,\nlambdaWlist=list(3,15),lambdaH=0,rowReg=\"L2Norm\")\n\n\n"} {"package":"jrSiCKLSNMF","topic":"SetWandHfromWHinitials","snippet":"### Name: SetWandHfromWHinitials\n### Title: Set \\mathbf{W} matrices and \\mathbf{H} matrix from\n### pre-calculated values\n### Aliases: SetWandHfromWHinitials\n\n### ** Examples\n\nSimSickleJrSmall<-SetWandHfromWHinitials(SimSickleJrSmall,d=5)\n\n\n"} {"package":"MetaComp","topic":"load_edge_assignment","snippet":"### Name: load_edge_assignment\n### Title: Efficiently loads an EDGE-produced taxonomic assignment from a\n### file. An assumption has been made - since EDGE tables are generated\n### in an automated fashion, they should be properly formatted - thus the\n### code doesn't check for any inconsistencies except for the very file\n### existence. Note however, the unassigned to taxa entries are removed.\n### This implementation fully relies on the fread function from\n### data.table package gaining performance over traditional R techniques.\n### Aliases: load_edge_assignment\n\n### ** Examples\n\npa_fpath <- system.file(\"extdata\", \"HMP_even//allReads-pangia.list.txt\", package=\"MetaComp\")\npangia_assignment = load_edge_assignment(pa_fpath, type = \"pangia\")\n\ntable(pangia_assignment$LEVEL)\n\npangia_assignment[pangia_assignment$LEVEL == \"phylum\",]\n\n\n\n"} {"package":"MetaComp","topic":"load_edge_assignments","snippet":"### Name: load_edge_assignments\n### Title: Efficiently loads a BWA (or other EDGE-like taxonomic\n### assignment) tables from a list of files. Outputs a named list of\n### assignments.\n### Aliases: load_edge_assignments\n\n### ** Examples\n\nhmp_even_fp <- system.file(\"extdata\", \"HMP_even\", package=\"MetaComp\")\nhmp_stagger_fp <- system.file(\"extdata\", \"HMP_stagger\", package=\"MetaComp\")\ndata_files <- data.frame(V1 = c(\"HMP_even\", \"HMP_stagger\"),\n V2 = c(file.path(hmp_even_fp, \"allReads-gottcha2-speDB-b.list.txt\"),\n file.path(hmp_stagger_fp, \"allReads-gottcha2-speDB-b.list.txt\")))\nwrite.table(data_files, file.path(tempdir(), \"assignments.txt\"),\n row.names = FALSE, col.names = FALSE)\ngottcha2_assignments = load_edge_assignments(file.path(tempdir(), \"assignments.txt\"),\n type = \"gottcha2\")\n\nnames(gottcha2_assignments)\ntable(gottcha2_assignments[[1]]$LEVEL)\n\n\n\n"} {"package":"MetaComp","topic":"merge_edge_assignments","snippet":"### Name: merge_edge_assignments\n### Title: Merges two or more EDGE-like taxonomical assignments. The input\n### data frames are assumed to have the following columns: LEVEL, TAXA,\n### and ABUNDANCE - these will be used in the merge procedure, all other\n### columns will be ignored.\n### Aliases: merge_edge_assignments\n\n### ** Examples\n\n## Not run: \n##D hmp_even_fp <- system.file(\"extdata\", \"HMP_even\", package=\"MetaComp\")\n##D hmp_stagger_fp <- system.file(\"extdata\", \"HMP_stagger\", package=\"MetaComp\")\n##D data_files <- data.frame(V1 = c(\"HMP_even\", \"HMP_stagger\"),\n##D V2 = c(file.path(hmp_even_fp, \"allReads-gottcha2-speDB-b.list.txt\"),\n##D file.path(hmp_stagger_fp, \"allReads-gottcha2-speDB-b.list.txt\")))\n##D write.table(data_files, file.path(tempdir(), \"assignments.txt\"),\n##D row.names = FALSE, col.names = FALSE)\n##D gottcha2_assignments = merge_edge_assignments(\n##D load_edge_assignments(\n##D file.path(tempdir(), \"assignments.txt\"), type = \"gottcha2\"))\n## End(Not run)\n\n\n\n"} {"package":"MetaComp","topic":"plot_edge_assignment","snippet":"### Name: plot_edge_assignment\n### Title: Generates a single column ggplot for a taxonomic assignment\n### table and also outputs a PDF.\n### Aliases: plot_edge_assignment\n\n### ** Examples\n\npa_fpath <- system.file(\"extdata\", \"HMP_even//allReads-pangia.list.txt\", package=\"MetaComp\")\npangia_assignment = load_edge_assignment(pa_fpath, type = \"pangia\")\n\nplot_edge_assignment(pangia_assignment, \"phylum\", \"Pangia\", \"HMP Even\",\n file.path(tempdir(), \"assignment.pdf\"))\n\n\n\n"} {"package":"MetaComp","topic":"plot_merged_assignment","snippet":"### Name: plot_merged_assignment\n### Title: Generates a single column ggplot for a taxonomic assignment\n### table.\n### Aliases: plot_merged_assignment\n\n### ** Examples\n\n## Not run: \n##D hmp_even_fp <- system.file(\"extdata\", \"HMP_even\", package=\"MetaComp\")\n##D hmp_stagger_fp <- system.file(\"extdata\", \"HMP_stagger\", package=\"MetaComp\")\n##D data_files <- data.frame(V1 = c(\"HMP_even\", \"HMP_stagger\"),\n##D V2 = c(file.path(hmp_even_fp, \"allReads-gottcha2-speDB-b.list.txt\"),\n##D file.path(hmp_stagger_fp, \"allReads-gottcha2-speDB-b.list.txt\")))\n##D write.table(data_files, file.path(tempdir(), \"assignments.txt\"),\n##D row.names = FALSE, col.names = FALSE)\n##D gottcha2_assignments = merge_edge_assignments(\n##D load_edge_assignments(\n##D file.path(tempdir(), \"assignments.txt\"), type = \"gottcha2\"))\n##D plot_merged_assignment(gottcha2_assignments, \"family\", 'alphabetical', 100, 0,\n##D \"HMP side-to-side\", file.path(tempdir(), \"assignment.pdf\"))\n## End(Not run)\n\n\n\n"} {"package":"manifestoR","topic":"ManifestoAvailability","snippet":"### Name: ManifestoAvailability\n### Title: Manifesto Availability Information class\n### Aliases: ManifestoAvailability\n\n### ** Examples\n\n## Not run: \n##D wanted <- data.frame(party=c(41320, 41320), date=c(200909, 200509))\n##D mp_availability(wanted)\n## End(Not run)\n\n\n"} {"package":"manifestoR","topic":"ManifestoCorpus","snippet":"### Name: ManifestoCorpus\n### Title: Manifesto Corpus class\n### Aliases: ManifestoCorpus\n\n### ** Examples\n\n## Not run: corpus <- mp_corpus(subset(mp_maindataset(), countryname == \"Russia\"))\n\n\n"} {"package":"manifestoR","topic":"ManifestoDocument","snippet":"### Name: ManifestoDocument\n### Title: Manifesto Document\n### Aliases: ManifestoDocument\n\n### ** Examples\n\n## Not run: \n##D corpus <- mp_corpus(subset(mp_maindataset(), countryname == \"New Zealand\"))\n##D doc <- corpus[[1]]\n##D print(doc)\n## End(Not run)\n\n\n"} {"package":"manifestoR","topic":"mp_availability","snippet":"### Name: mp_availability\n### Title: Availability information for election programmes\n### Aliases: mp_availability\n\n### ** Examples\n\n## Not run: \n##D mp_availability(countryname == \"New Zealand\")\n##D \n##D wanted <- data.frame(party=c(41320, 41320), date=c(200909, 200509))\n##D mp_availability(wanted)\n## End(Not run)\n\n\n"} {"package":"manifestoR","topic":"mp_coreversions","snippet":"### Name: mp_coreversions\n### Title: List the available versions of the Manifesto Project's Main\n### Dataset\n### Aliases: mp_coreversions\n\n### ** Examples\n\n## Not run: mp_coreversions()\n\n\n"} {"package":"manifestoR","topic":"mp_corpus","snippet":"### Name: mp_corpus\n### Title: Get documents from the Manifesto Corpus Database\n### Aliases: mp_corpus\n\n### ** Examples\n\n## Not run: \n##D corpus <- mp_corpus(party == 61620 & rile > 10)\n##D \n##D wanted <- data.frame(party=c(41320, 41320), date=c(200909, 201309))\n##D mp_corpus(wanted)\n##D \n##D mp_corpus(subset(mp_maindataset(), countryname == \"France\"))\n##D \n##D partially_available <- data.frame(party=c(41320, 41320), date=c(200909, 200509))\n##D mp_corpus(partially_available)\n## End(Not run)\n\n\n"} {"package":"manifestoR","topic":"mp_interpolate","snippet":"### Name: mp_interpolate\n### Title: Interpolate values within election periods\n### Aliases: mp_interpolate\n\n### ** Examples\n\n## Not run: \n##D mp_interpolate(mp_maindataset(), method = \"constant\")\n##D mp_interpolate(mp_maindataset(), approx = na.spline, maxgap = 3)\n## End(Not run)\n\n\n"} {"package":"manifestoR","topic":"mp_load_cache","snippet":"### Name: mp_load_cache\n### Title: Load manifestoR's cache\n### Aliases: mp_load_cache\n\n### ** Examples\n\n## Not run: mp_load_cache() ## loads cache from file \"mp_cache.RData\"\n\n\n"} {"package":"manifestoR","topic":"mp_maindataset","snippet":"### Name: mp_maindataset\n### Title: Access the Manifesto Project's Main Dataset\n### Aliases: mp_maindataset mp_southamerica_dataset\n\n### ** Examples\n\n## Not run: \n##D mpds <- mp_maindataset()\n##D head(mpds)\n##D median(subset(mpds, countryname == \"Switzerland\")$rile, na.rm = TRUE)\n## End(Not run)\n## Not run: \n##D mp_maindataset(download_format = \"dta\") %>% read_dta() ## requires package haven\n## End(Not run)\n\n\n"} {"package":"manifestoR","topic":"mp_metadata","snippet":"### Name: mp_metadata\n### Title: Get meta data for election programmes\n### Aliases: mp_metadata\n\n### ** Examples\n\n## Not run: \n##D mp_metadata(party == 21221)\n##D \n##D wanted <- data.frame(party=c(41320, 41320), date=c(200909, 200509))\n##D mp_metadata(wanted)\n## End(Not run)\n\n\n"} {"package":"manifestoR","topic":"mp_save_cache","snippet":"### Name: mp_save_cache\n### Title: Save manifestoR's cache\n### Aliases: mp_save_cache\n\n### ** Examples\n\n## Not run: mp_save_cache() ## save to \"mp_cache.RData\" in current working directory\n\n\n"} {"package":"manifestoR","topic":"mp_view_originals","snippet":"### Name: mp_view_originals\n### Title: View original documents from the Manifesto Corpus Database\n### Aliases: mp_view_originals\n\n### ** Examples\n\n## Not run: \n##D mp_view_originals(party == 41320 & date == 200909)\n## End(Not run)\n\n\n"} {"package":"parquetize","topic":"check_parquet","snippet":"### Name: check_parquet\n### Title: Check if parquet file or dataset is readable and return basic\n### informations\n### Aliases: check_parquet\n\n### ** Examples\n\n\n# check a parquet file\ncheck_parquet(parquetize_example(\"iris.parquet\"))\n\n# check a parquet dataset\ncheck_parquet(parquetize_example(\"iris_dataset\"))\n\n\n"} {"package":"parquetize","topic":"csv_to_parquet","snippet":"### Name: csv_to_parquet\n### Title: Convert a csv file to parquet format\n### Aliases: csv_to_parquet\n\n### ** Examples\n\n\n# Conversion from a local csv file to a single parquet file :\n\ncsv_to_parquet(\n path_to_file = parquetize_example(\"region_2022.csv\"),\n path_to_parquet = tempfile(fileext=\".parquet\")\n)\n\n# Conversion from a local csv file to a single parquet file and select only\n# few columns :\n\ncsv_to_parquet(\n path_to_file = parquetize_example(\"region_2022.csv\"),\n path_to_parquet = tempfile(fileext = \".parquet\"),\n columns = c(\"REG\",\"LIBELLE\")\n)\n\n# Conversion from a local csv file to a partitioned parquet file :\n\ncsv_to_parquet(\n path_to_file = parquetize_example(\"region_2022.csv\"),\n path_to_parquet = tempfile(fileext = \".parquet\"),\n partition = \"yes\",\n partitioning = c(\"REG\")\n)\n\n# Conversion from a URL and a zipped file :\n\ncsv_to_parquet(\n path_to_file = \"https://www.nomisweb.co.uk/output/census/2021/census2021-ts007.zip\",\n filename_in_zip = \"census2021-ts007-ctry.csv\",\n path_to_parquet = tempfile(fileext = \".parquet\")\n)\n\n\n"} {"package":"parquetize","topic":"dbi_to_parquet","snippet":"### Name: dbi_to_parquet\n### Title: Convert a SQL Query on a DBI connection to parquet format\n### Aliases: dbi_to_parquet\n\n### ** Examples\n\n\n# Conversion from a sqlite dbi connection to a single parquet file :\n\ndbi_connection <- DBI::dbConnect(RSQLite::SQLite(),\n system.file(\"extdata\",\"iris.sqlite\",package = \"parquetize\"))\n\n# Reading iris table from local sqlite database\n# and conversion to one parquet file :\n\ndbi_to_parquet(\n conn = dbi_connection,\n sql_query = \"SELECT * FROM iris\",\n path_to_parquet = tempfile(fileext=\".parquet\"),\n)\n\n# Reading iris table from local sqlite database by chunk (using\n# `max_memory` argument) and conversion to multiple parquet files\n\ndbi_to_parquet(\n conn = dbi_connection,\n sql_query = \"SELECT * FROM iris\",\n path_to_parquet = tempdir(),\n max_memory = 2 / 1024\n)\n\n# Using chunk and partition together is not possible directly but easy to do :\n# Reading iris table from local sqlite database by chunk (using\n# `max_memory` argument) and conversion to arrow dataset partitioned by\n# species\n\n# get unique values of column \"iris from table \"iris\"\npartitions <- get_partitions(dbi_connection, table = \"iris\", column = \"Species\")\n\n# loop over those values\nfor (species in partitions) {\n dbi_to_parquet(\n conn = dbi_connection,\n # use glue_sql to create the query filtering the partition\n sql_query = glue::glue_sql(\"SELECT * FROM iris where Species = {species}\",\n .con = dbi_connection),\n # add the partition name in the output dir to respect parquet partition schema\n path_to_parquet = file.path(tempdir(), \"iris\", paste0(\"Species=\", species)),\n max_memory = 2 / 1024,\n )\n}\n\n# If you need a more complicated query to get your partitions, you can use\n# dbGetQuery directly :\ncol_to_partition <- DBI::dbGetQuery(dbi_connection, \"SELECT distinct(`Species`) FROM `iris`\")[,1]\n\n\n\n"} {"package":"parquetize","topic":"download_extract","snippet":"### Name: download_extract\n### Title: download and uncompress file if needed\n### Aliases: download_extract\n\n### ** Examples\n\n\n# 1. unzip a local zip file\n# 2. parquetize it\n\nfile_path <- download_extract(system.file(\"extdata\",\"mtcars.csv.zip\", package = \"readr\"))\ncsv_to_parquet(\n file_path,\n path_to_parquet = tempfile(fileext = \".parquet\")\n)\n\n# 1. download a remote file\n# 2. extract the file census2021-ts007-ctry.csv\n# 3. parquetize it\n\nfile_path <- download_extract(\n \"https://www.nomisweb.co.uk/output/census/2021/census2021-ts007.zip\",\n filename_in_zip = \"census2021-ts007-ctry.csv\"\n)\ncsv_to_parquet(\n file_path,\n path_to_parquet = tempfile(fileext = \".parquet\")\n)\n\n# the file is local and not zipped so :\n# 1. parquetize it\n\nfile_path <- download_extract(parquetize_example(\"region_2022.csv\"))\ncsv_to_parquet(\n file_path,\n path_to_parquet = tempfile(fileext = \".parquet\")\n)\n\n\n\n"} {"package":"parquetize","topic":"fst_to_parquet","snippet":"### Name: fst_to_parquet\n### Title: Convert a fst file to parquet format\n### Aliases: fst_to_parquet\n\n### ** Examples\n\n\n# Conversion from a local fst file to a single parquet file ::\n\nfst_to_parquet(\n path_to_file = system.file(\"extdata\",\"iris.fst\",package = \"parquetize\"),\n path_to_parquet = tempfile(fileext = \".parquet\")\n)\n\n# Conversion from a local fst file to a partitioned parquet file ::\n\nfst_to_parquet(\n path_to_file = system.file(\"extdata\",\"iris.fst\",package = \"parquetize\"),\n path_to_parquet = tempfile(fileext = \".parquet\"),\n partition = \"yes\",\n partitioning = c(\"Species\")\n)\n\n\n"} {"package":"parquetize","topic":"get_partitions","snippet":"### Name: get_partitions\n### Title: get unique values from table's column\n### Aliases: get_partitions\n\n### ** Examples\n\ndbi_connection <- DBI::dbConnect(RSQLite::SQLite(),\n system.file(\"extdata\",\"iris.sqlite\",package = \"parquetize\"))\n\nget_partitions(dbi_connection, \"iris\", \"Species\")\n\n\n"} {"package":"parquetize","topic":"json_to_parquet","snippet":"### Name: json_to_parquet\n### Title: Convert a json file to parquet format\n### Aliases: json_to_parquet\n\n### ** Examples\n\n\n# Conversion from a local json file to a single parquet file ::\n\njson_to_parquet(\n path_to_file = system.file(\"extdata\",\"iris.json\",package = \"parquetize\"),\n path_to_parquet = tempfile(fileext = \".parquet\")\n)\n\n# Conversion from a local ndjson file to a partitioned parquet file ::\n\njson_to_parquet(\n path_to_file = system.file(\"extdata\",\"iris.ndjson\",package = \"parquetize\"),\n path_to_parquet = tempfile(fileext = \".parquet\"),\n format = \"ndjson\"\n)\n\n\n"} {"package":"parquetize","topic":"parquetize_example","snippet":"### Name: parquetize_example\n### Title: Get path to parquetize example\n### Aliases: parquetize_example\n\n### ** Examples\n\nparquetize_example()\nparquetize_example(\"region_2022.csv\")\nparquetize_example(\"iris_dataset\")\n\n\n"} {"package":"parquetize","topic":"rbind_parquet","snippet":"### Name: rbind_parquet\n### Title: Function to bind multiple parquet files by row\n### Aliases: rbind_parquet\n\n### ** Examples\n\n## Not run: \n##D library(arrow)\n##D if (file.exists('output')==FALSE) {\n##D dir.create(\"output\")\n##D }\n##D \n##D file.create(fileext = \"output/test_data1-4.parquet\")\n##D write_parquet(data.frame(\n##D x = c(\"a\",\"b\",\"c\"),\n##D y = c(1L,2L,3L)\n##D ),\n##D \"output/test_data1-4.parquet\")\n##D \n##D file.create(fileext = \"output/test_data4-6.parquet\")\n##D write_parquet(data.frame(\n##D x = c(\"d\",\"e\",\"f\"),\n##D y = c(4L,5L,6L)\n##D ), \"output/test_data4-6.parquet\")\n##D \n##D test_data <- rbind_parquet(folder = \"output\",\n##D output_name = \"test_data\",\n##D delete_initial_files = FALSE)\n## End(Not run)\n\n\n"} {"package":"parquetize","topic":"rds_to_parquet","snippet":"### Name: rds_to_parquet\n### Title: Convert a rds file to parquet format\n### Aliases: rds_to_parquet\n\n### ** Examples\n\n\n# Conversion from a local rds file to a single parquet file ::\n\nrds_to_parquet(\n path_to_file = system.file(\"extdata\",\"iris.rds\",package = \"parquetize\"),\n path_to_parquet = tempfile(fileext = \".parquet\")\n)\n\n# Conversion from a local rds file to a partitioned parquet file ::\n\nrds_to_parquet(\n path_to_file = system.file(\"extdata\",\"iris.rds\",package = \"parquetize\"),\n path_to_parquet = tempfile(fileext = \".parquet\"),\n partition = \"yes\",\n partitioning = c(\"Species\")\n)\n\n\n"} {"package":"parquetize","topic":"sqlite_to_parquet","snippet":"### Name: sqlite_to_parquet\n### Title: Convert a sqlite file to parquet format\n### Aliases: sqlite_to_parquet\n\n### ** Examples\n\n\n# Conversion from a local sqlite file to a single parquet file :\n\nsqlite_to_parquet(\n path_to_file = system.file(\"extdata\",\"iris.sqlite\",package = \"parquetize\"),\n table_in_sqlite = \"iris\",\n path_to_parquet = tempfile(fileext = \".parquet\")\n)\n\n# Conversion from a local sqlite file to a partitioned parquet file :\n\nsqlite_to_parquet(\n path_to_file = system.file(\"extdata\",\"iris.sqlite\",package = \"parquetize\"),\n table_in_sqlite = \"iris\",\n path_to_parquet = tempfile(),\n partition = \"yes\",\n partitioning = c(\"Species\")\n)\n\n\n"} {"package":"parquetize","topic":"table_to_parquet","snippet":"### Name: table_to_parquet\n### Title: Convert an input file to parquet format\n### Aliases: table_to_parquet\n\n### ** Examples\n\n# Conversion from a SAS file to a single parquet file :\n\ntable_to_parquet(\n path_to_file = system.file(\"examples\",\"iris.sas7bdat\", package = \"haven\"),\n path_to_parquet = tempfile(fileext = \".parquet\")\n)\n\n# Conversion from a SPSS file to a single parquet file :\n\ntable_to_parquet(\n path_to_file = system.file(\"examples\",\"iris.sav\", package = \"haven\"),\n path_to_parquet = tempfile(fileext = \".parquet\"),\n)\n# Conversion from a Stata file to a single parquet file without progress bar :\n\ntable_to_parquet(\n path_to_file = system.file(\"examples\",\"iris.dta\", package = \"haven\"),\n path_to_parquet = tempfile(fileext = \".parquet\")\n)\n\n# Reading SPSS file by chunk (using `max_rows` argument)\n# and conversion to multiple parquet files :\n\ntable_to_parquet(\n path_to_file = system.file(\"examples\",\"iris.sav\", package = \"haven\"),\n path_to_parquet = tempfile(),\n max_rows = 50,\n)\n\n# Reading SPSS file by chunk (using `max_memory` argument)\n# and conversion to multiple parquet files of 5 Kb when loaded (5 Mb / 1024)\n# (in real files, you should use bigger value that fit in memory like 3000\n# or 4000) :\n\ntable_to_parquet(\n path_to_file = system.file(\"examples\",\"iris.sav\", package = \"haven\"),\n path_to_parquet = tempfile(),\n max_memory = 5 / 1024,\n)\n\n# Reading SAS file by chunk of 50 lines with encoding\n# and conversion to multiple files :\n\ntable_to_parquet(\n path_to_file = system.file(\"examples\",\"iris.sas7bdat\", package = \"haven\"),\n path_to_parquet = tempfile(),\n max_rows = 50,\n encoding = \"utf-8\"\n)\n\n# Conversion from a SAS file to a single parquet file and select only\n# few columns :\n\ntable_to_parquet(\n path_to_file = system.file(\"examples\",\"iris.sas7bdat\", package = \"haven\"),\n path_to_parquet = tempfile(fileext = \".parquet\"),\n columns = c(\"Species\",\"Petal_Length\")\n)\n\n# Conversion from a SAS file to a partitioned parquet file :\n\ntable_to_parquet(\n path_to_file = system.file(\"examples\",\"iris.sas7bdat\", package = \"haven\"),\n path_to_parquet = tempfile(),\n partition = \"yes\",\n partitioning = c(\"Species\") # vector use as partition key\n)\n\n# Reading SAS file by chunk of 50 lines\n# and conversion to multiple files with zstd, compression level 10\n\nif (isTRUE(arrow::arrow_info()$capabilities[['zstd']])) {\n table_to_parquet(\n path_to_file = system.file(\"examples\",\"iris.sas7bdat\", package = \"haven\"),\n path_to_parquet = tempfile(),\n max_rows = 50,\n compression = \"zstd\",\n compression_level = 10\n )\n}\n\n\n"} {"package":"parquetize","topic":"write_parquet_at_once","snippet":"### Name: write_parquet_at_once\n### Title: write parquet file or dataset based on partition argument\n### Aliases: write_parquet_at_once\n\n### ** Examples\n\n\nwrite_parquet_at_once(iris, tempfile())\n\nwrite_parquet_at_once(iris, tempfile(), partition = \"yes\", partitioning = c(\"Species\"))\n\n\n"} {"package":"parquetize","topic":"write_parquet_by_chunk","snippet":"### Name: write_parquet_by_chunk\n### Title: read input by chunk on function and create dataset\n### Aliases: write_parquet_by_chunk\n\n### ** Examples\n\n\n# example with a dataframe\n\n# we create the function to loop over the data.frame\n\nread_method <- function(input, skip = 0L, n_max = Inf) {\n # if we are after the end of the input we return an empty data.frame\n if (skip+1 > nrow(input)) { return(data.frame()) }\n\n # return the n_max row from skip + 1\n input[(skip+1):(min(skip+n_max, nrow(input))),]\n}\n\n# we use it\n\nwrite_parquet_by_chunk(\n read_method = read_method,\n input = mtcars,\n path_to_parquet = tempfile(),\n max_rows = 10,\n)\n\n\n#\n# Example with haven::read_sas\n#\n\n# we need to pass two argument beside the 3 input, skip and n_max.\n# We will use a closure :\n\nmy_read_closure <- function(encoding, columns) {\n function(input, skip = OL, n_max = Inf) {\n haven::read_sas(data_file = input,\n n_max = n_max,\n skip = skip,\n encoding = encoding,\n col_select = all_of(columns))\n }\n}\n\n# we initialize the closure\n\nread_method <- my_read_closure(encoding = \"WINDOWS-1252\", columns = c(\"Species\", \"Petal_Width\"))\n\n# we use it\nwrite_parquet_by_chunk(\n read_method = read_method,\n input = system.file(\"examples\",\"iris.sas7bdat\", package = \"haven\"),\n path_to_parquet = tempfile(),\n max_rows = 75,\n)\n\n\n\n"} {"package":"birankr","topic":"bipartite_rank","snippet":"### Name: bipartite_rank\n### Title: Bipartite Ranks\n### Aliases: bipartite_rank\n### Keywords: BGRM BiRank Bipartite CoHITS HITS centrality rank\n\n### ** Examples\n\n#create edge list between patients and providers\n df <- data.table(\n patient_id = sample(x = 1:10000, size = 10000, replace = TRUE),\n provider_id = sample(x = 1:5000, size = 10000, replace = TRUE)\n )\n\n#estimate CoHITS ranks\n CoHITS <- bipartite_rank(data = df, normalizer = \"CoHITS\")\n\n\n"} {"package":"birankr","topic":"br_bgrm","snippet":"### Name: br_bgrm\n### Title: BGRM Ranks\n### Aliases: br_bgrm\n### Keywords: BGRM Bipartite centrality rank\n\n### ** Examples\n\n#create edge list between patients and providers\n df <- data.table(\n patient_id = sample(x = 1:10000, size = 10000, replace = TRUE),\n provider_id = sample(x = 1:5000, size = 10000, replace = TRUE)\n )\n\n#estimate BGRM ranks\n BGRM <- br_bgrm(data = df)\n\n\n"} {"package":"birankr","topic":"br_birank","snippet":"### Name: br_birank\n### Title: BiRanks\n### Aliases: br_birank\n### Keywords: BiRank Bipartite centrality rank\n\n### ** Examples\n\n#create edge list between patients and providers\n df <- data.table(\n patient_id = sample(x = 1:10000, size = 10000, replace = TRUE),\n provider_id = sample(x = 1:5000, size = 10000, replace = TRUE)\n )\n\n#estimate BiRank ranks\n BiRank <- br_birank(data = df)\n\n\n"} {"package":"birankr","topic":"br_cohits","snippet":"### Name: br_cohits\n### Title: CoHITS Ranks\n### Aliases: br_cohits\n### Keywords: Bipartite CoHITS centrality rank\n\n### ** Examples\n\n#create edge list between patients and providers\n df <- data.table(\n patient_id = sample(x = 1:10000, size = 10000, replace = TRUE),\n provider_id = sample(x = 1:5000, size = 10000, replace = TRUE)\n )\n\n#estimate CoHITS ranks\n CoHITS <- br_cohits(data = df)\n\n\n"} {"package":"birankr","topic":"br_hits","snippet":"### Name: br_hits\n### Title: HITS Ranks\n### Aliases: br_hits\n### Keywords: Bipartite HITS centrality rank\n\n### ** Examples\n\n#create edge list between patients and providers\n df <- data.table(\n patient_id = sample(x = 1:10000, size = 10000, replace = TRUE),\n provider_id = sample(x = 1:5000, size = 10000, replace = TRUE)\n )\n\n#estimate HITS ranks\n HITS <- br_hits(data = df)\n\n\n"} {"package":"birankr","topic":"pagerank","snippet":"### Name: pagerank\n### Title: Estimate PageRank\n### Aliases: pagerank\n### Keywords: Bipartite PageRank centrality rank\n\n### ** Examples\n\n#Prepare one-mode data\n df_one_mode <- data.frame(\n sender = sample(x = 1:10000, size = 10000, replace = TRUE),\n receiver = sample(x = 1:10000, size = 10000, replace = TRUE)\n )\n\n#Add self-loops for all nodes\n unique_ids <- unique(c(df_one_mode$sender, df_one_mode$receiver))\n df_one_mode <- rbind(df_one_mode, data.frame(sender = unique_ids,\n receiver = unique_ids))\n\n#Estimate PageRank in one-mode data\n PageRank <- pagerank(data = df_one_mode, is_bipartite = FALSE)\n\n#Estimate PageRank in two-mode data\n df_two_mode <- data.frame(\n patient_id = sample(x = 1:10000, size = 10000, replace = TRUE),\n provider_id = sample(x = 1:5000, size = 10000, replace = TRUE)\n )\n PageRank <- pagerank(data = df_two_mode)\n\n\n"} {"package":"birankr","topic":"project_to_one_mode","snippet":"### Name: project_to_one_mode\n### Title: Create a one-mode projection of a two mode graph\n### Aliases: project_to_one_mode\n### Keywords: dgCMatrix matrix\n\n### ** Examples\n\n#make matrix\n my_matrix <- sparseMatrix(i = c(1, 1, 2, 3, 4, 4, 5, 6, 7, 7), \n j = c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), x = 1\n )\n#project to one mode\n project_to_one_mode(adj_mat = my_matrix, mode = \"rows\")\n\n\n"} {"package":"birankr","topic":"sparsematrix_from_edgelist","snippet":"### Name: sparsematrix_from_edgelist\n### Title: Convert edge list to sparse matrix\n### Aliases: sparsematrix_from_edgelist\n### Keywords: dgCMatrix\n\n### ** Examples\n\n#make edge.list\n df <- data.frame(\n id1 = sample(x = 1:20, size = 100, replace = TRUE),\n id2 = sample(x = 1:10, size = 100, replace = TRUE),\n weight = sample(x = 1:10, size = 100, replace = TRUE)\n )\n#convert to sparsematrix\n sparsematrix_from_edgelist(data = df)\n\n\n"} {"package":"birankr","topic":"sparsematrix_from_matrix","snippet":"### Name: sparsematrix_from_matrix\n### Title: Convert matrix to sparse matrix\n### Aliases: sparsematrix_from_matrix\n### Keywords: dgCMatrix matrix\n\n### ** Examples\n\n#make matrix\n my_matrix <- rep(0, 100)\n my_matrix[c(1, 11, 22, 33, 44, 54, 65, 76, 87, 97)] <- 1\n my_matrix <- matrix(data = my_matrix, nrow = 10, ncol = 10)\n#convert to sparsematrix\n sparsematrix_from_matrix(adj_mat = my_matrix)\n\n\n"} {"package":"birankr","topic":"sparsematrix_rm_weights","snippet":"### Name: sparsematrix_rm_weights\n### Title: Remove sparse matrix edge weights\n### Aliases: sparsematrix_rm_weights\n### Keywords: dgCMatrix matrix\n\n### ** Examples\n\n#make matrix\n my_matrix <- sparseMatrix(\n i = c(1, 1, 2, 3, 4, 4, 5, 6, 7, 7), \n j = c(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), \n x = c(1, 1, 3, 1, 2, 1, 1, 1, 2, 1)\n )\n#remove weights\n sparsematrix_rm_weights(my_matrix)\n\n\n"} {"package":"bpDir","topic":"AxialBoxplot","snippet":"### Name: AxialBoxplot\n### Title: AxialBoxplot\n### Aliases: AxialBoxplot\n\n### ** Examples\n\nset.seed(1)\n#install.packages(\"circular\")\nrequire(circular)\n#install.packages(\"plotrix\")\nrequire(plotrix)\ntheta <- circular::rvonmises(100, circular(pi/2), 3, control.circular=list(units=\"radians\"))\naxialTheta <- circular(theta, modulo = \"pi\")\nAxialBoxplot(axialTheta, template = \"radians\", mirror = FALSE)\n\n\n"} {"package":"bpDir","topic":"CircularBoxplot","snippet":"### Name: CircularBoxplot\n### Title: Circular Box-and-Wisker Plot\n### Aliases: CircularBoxplot\n### Keywords: device\n\n### ** Examples\n\n# Circular Boxplot on Vanishing directions of homing pigeons\n#install.packages(\"circular\")\n#install.packages(\"plotrix\")\nrequire(circular)\nrequire(plotrix)\n#help(fisherB12c)\ndata(fisherB12c)\nCircularBoxplot(fisherB12c, template=\"geographics\")\n\n\n"} {"package":"bpDir","topic":"CircularTukeyDepth","snippet":"### Name: CircularTukeyDepth\n### Title: Circular Tukey Depth\n### Aliases: CircularTukeyDepth\n### Keywords: depth\n\n### ** Examples\n\n#install.packages(\"circular\")\nrequire(circular)\ndata1 <- rvonmises(50, circular(pi/2), 5)\nCircularTukeyDepth(data1)\n\n\n"} {"package":"loadeR","topic":"carga.datos","snippet":"### Name: carga.datos\n### Title: Load data from text file.\n### Aliases: carga.datos\n\n### ** Examples\n\ntf <- tempfile()\nwrite.table(iris, tf, sep = \";\", dec = \",\", row.names = FALSE)\ncarga.datos(ruta = tf, nombre.filas = FALSE, preview = TRUE)\n\n\n\n"} {"package":"loadeR","topic":"carga.datos.excel","snippet":"### Name: carga.datos.excel\n### Title: Load data from excel.\n### Aliases: carga.datos.excel\n\n### ** Examples\n\n## No test: \n tf <- tempfile()\n writexl::write_xlsx(iris, paste0(tf, \".xlsx\"), TRUE)\n carga.datos.excel(ruta = paste0(tf, \".xlsx\"), row_names = FALSE, preview = TRUE)\n## End(No test)\n\n\n\n"} {"package":"loadeR","topic":"codigo.monokai","snippet":"### Name: codigo.monokai\n### Title: HTML for show code on shiny application.\n### Aliases: codigo.monokai\n\n### ** Examples\n\ncodigo.monokai(\"id\", \"70vh\")\n\n\n\n"} {"package":"loadeR","topic":"datos.disyuntivos","snippet":"### Name: datos.disyuntivos\n### Title: Create disjunctive columns to a data.frame.\n### Aliases: datos.disyuntivos\n\n### ** Examples\n\ndatos.disyuntivos(iris, \"Species\")\n\n\n\n"} {"package":"loadeR","topic":"devolver.disyuntivos","snippet":"### Name: devolver.disyuntivos\n### Title: Back disjunctive column to original.\n### Aliases: devolver.disyuntivos\n\n### ** Examples\n\nr <- datos.disyuntivos(iris, \"Species\")\ndevolver.disyuntivos(r, \"Species\")\n\n\n\n"} {"package":"loadeR","topic":"dfnormal","snippet":"### Name: dfnormal\n### Title: Data.frame with normal test values.\n### Aliases: dfnormal\n\n### ** Examples\n\ndfnormal(iris[, -5])\n\n\n\n"} {"package":"loadeR","topic":"e_cor","snippet":"### Name: e_cor\n### Title: Correlation plot\n### Aliases: e_cor\n\n### ** Examples\n\np <- round(cor(iris[, -5]), 3)\ne_cor(p)\n\n\n\n"} {"package":"loadeR","topic":"e_histboxplot","snippet":"### Name: e_histboxplot\n### Title: Histogram + boxplot\n### Aliases: e_histboxplot\n\n### ** Examples\n\ne_histboxplot(iris$Sepal.Width, \"Sepal.Width\")\n\n\n\n"} {"package":"loadeR","topic":"e_histnormal","snippet":"### Name: e_histnormal\n### Title: Normal plot\n### Aliases: e_histnormal\n\n### ** Examples\n\ne_histnormal(iris$Sepal.Length)\n\n\n\n"} {"package":"loadeR","topic":"e_qq","snippet":"### Name: e_qq\n### Title: Qplot + Qline\n### Aliases: e_qq\n\n### ** Examples\n\ne_qq(iris$Sepal.Length)\n\n\n\n"} {"package":"loadeR","topic":"infoBoxPROMiDAT","snippet":"### Name: infoBoxPROMiDAT\n### Title: Information box.\n### Aliases: infoBoxPROMiDAT\n\n### ** Examples\n\nlibrary(shiny)\ninfoBoxPROMiDAT(\"Title\", \"Value\", icon(\"info\"))\n\n\n\n"} {"package":"loadeR","topic":"labelInput","snippet":"### Name: labelInput\n### Title: Create a label that can be used to show text.\n### Aliases: labelInput\n\n### ** Examples\n\nlabelInput(\"id\", \"data\")\n\n\n\n"} {"package":"loadeR","topic":"labels_loadeR","snippet":"### Name: labels_loadeR\n### Title: Returns a vector of keys to translate with tr.\n### Aliases: labels_loadeR\n\n### ** Examples\n\nlabels_loadeR()\n\n\n\n"} {"package":"loadeR","topic":"menu.idioma","snippet":"### Name: menu.idioma\n### Title: HTML for language menu.\n### Aliases: menu.idioma\n\n### ** Examples\n\nmenu.idioma()\n\n\n\n"} {"package":"loadeR","topic":"options.run","snippet":"### Name: options.run\n### Title: Creates a button to use in a options menu.\n### Aliases: options.run\n\n### ** Examples\n\noptions.run(\"id\")\n\n\n\n"} {"package":"loadeR","topic":"tabBoxPrmdt","snippet":"### Name: tabBoxPrmdt\n### Title: Tabset panel with options menu.\n### Aliases: tabBoxPrmdt\n\n### ** Examples\n\nlibrary(shiny)\ntabBoxPrmdt(id = \"id\", title = \"title\", tabPanel(\"Tab1\"))\n\n\n\n"} {"package":"loadeR","topic":"tabsOptions","snippet":"### Name: tabsOptions\n### Title: Options menu in footer for tabBoxPrmdt (tabsetPanel).\n### Aliases: tabsOptions\n\n### ** Examples\n\ntabsOptions()\n\n\n\n"} {"package":"loadeR","topic":"tr","snippet":"### Name: tr\n### Title: Returns a translate text (user defined).\n### Aliases: tr\n\n### ** Examples\n\ntr(\"data\", \"en\")\n\n\n\n"} {"package":"loadeR","topic":"translation.loadeR","snippet":"### Name: translation.loadeR\n### Title: Returns a list of sentences with their translation in different\n### languages.\n### Aliases: translation.loadeR\n\n### ** Examples\n\ntranslation.loadeR()\n\n\n\n"} {"package":"loadeR","topic":"var.categoricas","snippet":"### Name: var.categoricas\n### Title: Filter category variables of a data.frame.\n### Aliases: var.categoricas\n\n### ** Examples\n\nvar.categoricas(iris)\n\n\n\n"} {"package":"loadeR","topic":"var.numericas","snippet":"### Name: var.numericas\n### Title: Filter numeric variables of a data.frame.\n### Aliases: var.numericas\n\n### ** Examples\n\nvar.numericas(iris)\n\n\n\n"} {"package":"literanger","topic":"predict.literanger","snippet":"### Name: predict.literanger\n### Title: Literanger prediction\n### Aliases: predict.literanger\n\n### ** Examples\n\n## Classification forest\ntrain_idx <- sample(nrow(iris), 2/3 * nrow(iris))\niris_train <- iris[ train_idx, ]\niris_test <- iris[-train_idx, ]\nrf_iris <- train(data=iris_train, response_name=\"Species\")\npred_iris_bagged <- predict(rf_iris, newdata=iris_test,\n prediction_type=\"bagged\")\npred_iris_inbag <- predict(rf_iris, newdata=iris_test,\n prediction_type=\"inbag\")\n# compare bagged vs actual test values\ntable(iris_test$Species, pred_iris_bagged$values)\n# compare bagged prediction vs in-bag draw\ntable(pred_iris_bagged$values, pred_iris_inbag$values)\n\n\n\n"} {"package":"literanger","topic":"train","snippet":"### Name: train\n### Title: Train forest using ranger for multiple imputation algorithms.\n### Aliases: train\n\n### ** Examples\n\n## Classification forest with default settings\ntrain(data=iris, response_name=\"Species\")\n\n## Prediction\ntrain_idx <- sample(nrow(iris), 2/3 * nrow(iris))\niris_train <- iris[train_idx, ]\niris_test <- iris[-train_idx, ]\nrg_iris <- train(data=iris_train, response_name=\"Species\")\npred_iris <- predict(rg_iris, newdata=iris_test)\ntable(iris_test$Species, pred_iris$values)\n\n\n\n"} {"package":"egor","topic":"EI","snippet":"### Name: EI\n### Title: Calculate EI-Index of ego networks\n### Aliases: EI\n### Keywords: ego-centered network sna\n\n### ** Examples\n\ndata(\"egor32\")\nEI(egor32, \"sex\")\n\n\n"} {"package":"egor","topic":"activate.egor","snippet":"### Name: activate.egor\n### Title: Activate ego, alter or alter-alter tie data level of an egor\n### dataset\n### Aliases: activate.egor\n### Keywords: ego-centered network\n\n### ** Examples\n\ne <- make_egor(5,50)\ne %>% \n activate(\"aatie\") %>% \n mutate(weight2 = 2 + weight) %>% \n activate(\"alter\") %>% \n mutate(age.years = age.years^3)\n\n\n"} {"package":"egor","topic":"alts_diversity_count","snippet":"### Name: alts_diversity_count\n### Title: Calculate diversity measures on an 'egor' object.\n### Aliases: alts_diversity_count alts_diversity_entropy\n### Keywords: analysis ego-centered network\n\n### ** Examples\n\ndata(\"egor32\")\nalts_diversity_count(egor32, \"age\")\nalts_diversity_entropy(egor32, \"age\")\n\n\n"} {"package":"egor","topic":"append_egor","snippet":"### Name: append_egor\n### Title: Append rows/columns to ego, alter or aatie data\n### Aliases: append_egor append_rows append_cols\n\n### ** Examples\n\ne <- make_egor(12, 15)\n\n# Adding a column to the ego level\nadditional_ego_columns <-\n tibble(x = sample(1:3, 12, replace = TRUE))\n \nappend_cols(e, additional_ego_columns)\n\n# Adding rows to the ego and alter level\nadditional_ego_rows <-\n list(\n .egoID = 13,\n sex = \"w\",\n age = factor(\"56 - 65\"),\n age.years = 60,\n country = \"Australia\"\n ) %>%\n as_tibble()\n \nadditional_alter_rows <-\n list(\n .altID = 1:5,\n .egoID = rep(13, 5),\n sex = sample(c(\"f\", \"m\"), 5, replace = TRUE)\n ) %>%\n as_tibble()\n \nappend_rows(e, additional_ego_rows) %>%\n activate(alter) %>%\n append_rows(additional_alter_rows)\n\n\n"} {"package":"egor","topic":"as_igraph","snippet":"### Name: as_igraph\n### Title: Convert 'egor' object to 'network' or 'igraph' objects\n### Aliases: as_igraph as_igraph.nested_egor as.igraph.egor as_network\n### as.network.egor\n\n### ** Examples\n\ne <- make_egor(3, 22)\nas_igraph(e)\n\n\n"} {"package":"egor","topic":"as_tibble.egor","snippet":"### Name: as_tibble.egor\n### Title: Extract ego, alter, and alter-alter tables from an 'egor'\n### object.\n### Aliases: as_tibble.egor as_survey.egor as_egos_df as_alters_df\n### as_aaties_df as_egos_survey as_alters_survey as_aaties_survey\n\n### ** Examples\n\n# Load example data\ndata(egor32)\n\nas_tibble(egor32) # Ego table.\n\negor32 %>%\n activate(\"alter\") %>%\n as_tibble(include.ego.vars=TRUE) # Alter table, but also with ego variables.\n\nlibrary(srvyr)\nas_survey(egor32) # Ego table with survey design.\n\n# Despite alter table being active, obtain the ego table.\n(egor32 <- activate(egor32, \"alter\"))\nas_egos_df(egor32)\n\n# Create global alter table\nas_alters_df(egor32)\n\n# Create global alter-alter relations table\nas_aaties_df(egor32)\n\n# ... adding alter variables\nas_aaties_df(egor32, include.alter.vars = TRUE)\nas_egos_survey(egor32)\nas_alters_survey(egor32) # Notice the resulting cluster design.\n\n\n"} {"package":"egor","topic":"clustered_graphs","snippet":"### Name: clustered_graphs\n### Title: Cluster ego-centered networks by a grouping factor\n### Aliases: clustered_graphs clustered_graphs.list clustered_graphs.egor\n### clustered_graphs.data.frame\n### Keywords: analysis ego-centered network\n\n### ** Examples\n\ndata(\"egor32\")\n\n# Simplify networks to clustered graphs, stored as igraph objects\ngraphs <- clustered_graphs(egor32, \"country\") \n\n# Visualise\npar(mfrow = c(2,3))\nvis_clustered_graphs(\n graphs[1:5]\n)\npar(mfrow = c(1,1))\n\n\n"} {"package":"egor","topic":"comp_ei","snippet":"### Name: comp_ei\n### Title: Calculate the EI-Indices of an 'egor' object as a measurement of\n### ego-alter homophily\n### Aliases: comp_ei\n### Keywords: analysis ego-centered network\n\n### ** Examples\n\ndata(\"egor32\")\ncomp_ei(egor32, \"age\", \"age\")\n\n\n"} {"package":"egor","topic":"comp_ply","snippet":"### Name: comp_ply\n### Title: Calculate custom compositional measures on an 'egor' object\n### Aliases: comp_ply\n### Keywords: analysis ego-centered network\n\n### ** Examples\n\ndf <- make_egor(10, 32)\ncomp_ply(df, \"age.years\", sd, na.rm = TRUE)\n\n\n"} {"package":"egor","topic":"composition","snippet":"### Name: composition\n### Title: Calculate the composition of alter attributes in an 'egor'\n### object\n### Aliases: composition\n### Keywords: analysis ego-centered network\n\n### ** Examples\n\ndata(\"egor32\")\ncomposition(egor32, \"sex\")\n\n\n"} {"package":"egor","topic":"count_dyads","snippet":"### Name: count_dyads\n### Title: Count attribute combinations of dyads in ego-centered networks\n### Aliases: count_dyads\n\n### ** Examples\n\ndata(egor32)\ncount_dyads(object = egor32,\n alter_var_name = \"country\")\n\n# Return result as long tibble.\ncount_dyads(object = egor32,\n alter_var_name = \"country\",\n return_as = \"long\")\n\n\n"} {"package":"egor","topic":"ego_constraint","snippet":"### Name: ego_constraint\n### Title: Calculate Burt constraint for the egos of ego-centered networks\n### Aliases: ego_constraint\n\n### ** Examples\n\ndata(egor32)\nego_constraint(egor32)\n\n\n"} {"package":"egor","topic":"ego_density","snippet":"### Name: ego_density\n### Title: Calculate the relationship density in ego-centered networks\n### Aliases: ego_density ego_density.egor\n### Keywords: analysis ego-centered network\n\n### ** Examples\n\ndata(\"egor32\")\nego_density(egor32)\n\n\n"} {"package":"egor","topic":"as.egor","snippet":"### Name: as.egor\n### Title: egor - a data class for ego-centered network data.\n### Aliases: as.egor as.egor.nested_egor as.egor.list egor\n### Keywords: analysis ego-centered network\n\n### ** Examples\n\ndata(\"egos32\")\ndata(\"alters32\")\ndata(\"aaties32\") \n\negor(alters32, \n egos32, \n aaties32,\n ID.vars = list(ego = \".EGOID\", \n alter = \".ALTID\", \n source = \".SRCID\",\n target = \".TGTID\"))\n\n\n"} {"package":"egor","topic":"egor_vis_app","snippet":"### Name: egor_vis_app\n### Title: 'egor' Network Visualization App\n### Aliases: egor_vis_app\n### Keywords: analysis ego-centered network\n\n### ** Examples\n\n#if(interactive()){\n# data(\"egor32\")\n# egor_vis_app(egor32)\n#}\n\n\n"} {"package":"egor","topic":"onefile_to_egor","snippet":"### Name: onefile_to_egor\n### Title: Import ego-centered network data from 'one file format'\n### Aliases: onefile_to_egor\n### Keywords: import\n\n### ** Examples\n\npath_to_one_file_8 <- system.file(\"extdata\", \"one_file_8.csv\", package = \"egor\")\negos_8 <- read.csv2(path_to_one_file_8)\n\nonefile_to_egor(\n egos = egos_8, netsize = egos_8$netsize,\n attr.start.col = \"alter.sex.1\",\n attr.end.col = \"alter.age.8\",\n aa.first.var = \"X1.to.2\",\n max.alters = 8)\n\n\n"} {"package":"egor","topic":"plot_egograms","snippet":"### Name: plot_egograms\n### Title: Plotting _egor_ objects\n### Aliases: plot_egograms plot_ego_graphs plot_egor plot.egor\n\n### ** Examples\n\ne <- make_egor(net.count = 5, max.alters = 12)\nplot_egograms(x = e,\n ego_no = 2,\n venn_var = \"sex\",\n pie_var = \"country\",\n vertex_size_var = \"age\")\nplot(e)\n\n\n"} {"package":"egor","topic":"read_egonet","snippet":"### Name: read_egonet\n### Title: Read ego-centered network data exported with EgoNet software as\n### an 'egor' object\n### Aliases: read_egonet\n### Keywords: ego-centered import\n\n### ** Examples\n\negos.file <- system.file(\"extdata\", \"egos_32.csv\", package = \"egor\")\nalters.folder <- system.file(\"extdata\", \"alters_32\", package = \"egor\")\nedge.folder <- system.file(\"extdata\", \"edges_32\", package = \"egor\")\n\nef <- read_egonet(egos.file = egos.file, \n alter.folder = alters.folder, \n edge.folder = edge.folder, \n csv.sep = \";\")\n\n\n"} {"package":"egor","topic":"rowlist","snippet":"### Name: rowlist\n### Title: Convert a table to a list of rows\n### Aliases: rowlist\n\n### ** Examples\n\n\nlibrary(tibble)\n(df <- tibble(x=2:1, y=list(list(1:3), list(3:4))))\nrowlist(df)\n\n\n\n"} {"package":"egor","topic":"subset.egor","snippet":"### Name: subset.egor\n### Title: Filter and Subset Ego-centered Datasets\n### Aliases: subset.egor [.egor\n\n### ** Examples\n\n\n# Generate a small sample dataset\n(e <- make_egor(5,4))\n\n# First three egos in the dataset\ne[1:3,]\n\n# Using an external vector\n# (though normally, we would use e[.keep,] here)\n.keep <- rep(c(TRUE, FALSE), length.out=nrow(e$ego))\nsubset(e, .keep)\n\n # Filter egos\nsubset(x = egor32, subset = egor32$ego$variables$sex == \"m\", unit=\"ego\")\nsubset(x = egor32, sex == \"m\")\n\n# Filter alters\nsubset(x = egor32, sex == \"m\", unit = \"alter\")\n\n# Filter aaties\nsubset(x = egor32, weight != 0, unit = \"aatie\")\n\n# Filter egos by alter variables (keep only egos that have more than 13 alters)\nsubset(x = egor32, nrow(alter) > 13, unit = \"alter\")\n\n# Filter alters by ego variables (keep only alters that have egos from Poland)\nsubset(x = egor32, ego$country == \"Poland\", unit = \"ego\")\n\n# Filter edges by alter variables (keep only edges between alters where `sex == \"m\"`)\nsubset(x = egor32, all(alter$sex == \"m\"), unit = \"aatie\")\n\n\n"} {"package":"egor","topic":"threefiles_to_egor","snippet":"### Name: threefiles_to_egor\n### Title: Read/ import ego-centered network data from the three files\n### format, EgoWeb2.0 or openeddi.\n### Aliases: threefiles_to_egor read_egoweb read_openeddi\n\n### ** Examples\n\n# The data for read.egonet.threefiles() needs to be loaded with read.csv(), \n# for it to be converted to an egoR object.\negos.file <- system.file(\"extdata\", \"egos_32.csv\", package = \"egor\")\nalters.file <- system.file(\"extdata\", \"alters_32.csv\", package = \"egor\")\nedges.file <- system.file(\"extdata\", \"edges_32.csv\", package = \"egor\")\n\negos <- read.csv2(egos.file)\nalters <- read.csv2(alters.file)\nedges <- read.csv2(edges.file)\n\ntf <- threefiles_to_egor(egos = egos, alters.df = alters, edges = edges)\n\n# read_egoweb() and read_openeddi() read the files directly from the disk.\n## No test: \n# Fetch current working directory\nwd <- getwd()\n\nsetwd(system.file(\"extdata\", \"openeddi\", package = \"egor\"))\noe <- read_openeddi()\n\nsetwd(system.file(\"extdata\", \"egoweb\", package = \"egor\"))\new <- read_egoweb(alter.file = \"alters_32.csv\", edges.file = \"edges_32.csv\", \n egos.file = \"egos_32.csv\")\n \n# Restore working directory \nsetwd(wd)\n## End(No test)\n\n\n"} {"package":"egor","topic":"twofiles_to_egor","snippet":"### Name: twofiles_to_egor\n### Title: Import ego-centered network data from two file format\n### Aliases: twofiles_to_egor\n### Keywords: import\n\n### ** Examples\n\npath_to_alters_8.csv <- system.file(\"extdata\", \"alters_8.csv\", package = \"egor\")\npath_to_one_file_8 <- system.file(\"extdata\", \"one_file_8.csv\", package = \"egor\")\n\n# read data from disk\negos_8 <- read.csv2(path_to_one_file_8)\nalters_8 <- read.csv2(path_to_alters_8.csv)\n\n# convert to egor object\n twofiles_to_egor(\n egos = egos_8,\n alters = alters_8,\n max.alters = 8,\n aa.first.var = \"X1.to.2\")\n\n\n"} {"package":"egor","topic":"vis_clustered_graphs","snippet":"### Name: vis_clustered_graphs\n### Title: Visualize clustered graphs\n### Aliases: vis_clustered_graphs\n### Keywords: analysis ego-centered network\n\n### ** Examples\n\ndata(\"egor32\")\n\n# Simplify networks to clustered graphs, stored as igraph objects\ngraphs <- clustered_graphs(egor32, \"country\") \n\n# Visualise\npar(mfrow = c(2,3))\nvis_clustered_graphs(\n graphs[1:5]\n)\npar(mfrow = c(1,1))\n\n\n"} {"package":"fuj","topic":"alias_arithmetic","snippet":"### Name: alias_arithmetic\n### Title: Arithmetic wrappers\n### Aliases: alias_arithmetic add subtract multiply divide raise_power\n### remainder divide_int\n\n### ** Examples\n\n add(7, 2) # +\n subtract(7, 2) # -\n multiply(7, 2) # *\n divide(7, 2) # /\nraise_power(7, 2) # ^\n remainder(7, 2) # %%\n divide_int(7, 2) # %/%\n\n\n"} {"package":"fuj","topic":"alias_extract","snippet":"### Name: alias_extract\n### Title: Extract and replace aliases\n### Aliases: alias_extract subset1 subset2 subset3 subassign1 subassign2\n### subassign3\n\n### ** Examples\n\ndf <- quick_dfl(a = 1:5, b = 6:10)\n# alias of `[`\nsubset1(df, 1)\nsubset1(df, 1, )\nsubset1(df, , 1)\nsubset1(df, , 1, drop = FALSE)\n\n# alias of `[[`\nsubset2(df, 1)\nsubset2(df, 1, 2)\n\n# alias of `$`\nsubset3(df, a)\nsubset3(df, \"b\")\nsubset3(df, \"foo\")\n\n# alias of `[<-`\nsubassign1(df, \"a\", , 2)\n\n\n"} {"package":"fuj","topic":"collapse","snippet":"### Name: collapse\n### Title: Collapse\n### Aliases: collapse\n\n### ** Examples\n\ncollapse(1:10)\ncollapse(list(\"a\", b = 1:2))\ncollapse(quick_dfl(a = 1:3, b = 4:6), sep = \"-\")\n\n\n"} {"package":"fuj","topic":"colons","snippet":"### Name: colons\n### Title: Colons\n### Aliases: colons %::% %:::% %colons%\n\n### ** Examples\n\nidentical(\"base\" %::% \"mean\", base::mean)\n\"fuj\" %:::% \"colons_example\" # unexported value\n\n\n\n"} {"package":"fuj","topic":"exattr","snippet":"### Name: exattr\n### Title: Exact attributes\n### Aliases: exattr %attr%\n\n### ** Examples\n\nfoo <- struct(list(), \"foo\", aa = TRUE)\n attr(foo, \"a\") # TRUE : partial match successful\nexattr(foo, \"a\") # NULL : partial match failed\nexattr(foo, \"aa\") # TRUE : exact match\n\n\n"} {"package":"fuj","topic":"flip","snippet":"### Name: flip\n### Title: Flip\n### Aliases: flip flip.default flip.matrix flip.data.frame\n\n### ** Examples\n\nflip(letters[1:3])\nflip(seq.int(9, -9, by = -3))\nflip(head(iris))\nflip(head(iris), keep_rownames = TRUE)\nflip(head(iris), by = \"col\")\n\n\n\n"} {"package":"fuj","topic":"if_null","snippet":"### Name: if_null\n### Title: Default value for 'NULL' or no length\n### Aliases: if_null %||% %|||% %len%\n\n### ** Examples\n\n# replace NULL\nNULL %||% 1L\n2L %||% 1L\n\n# replace empty\n\"\" %|||% 1L\nNA %|||% 1L\ndouble() %|||% 1L\nNULL %|||% 1L\n\n# replace no length\nlogical() %len% TRUE\n\n\n"} {"package":"fuj","topic":"list0","snippet":"### Name: list0\n### Title: Listing for dots\n### Aliases: list0\n\n### ** Examples\n\ntry(list(1, ))\nlist0(1, )\nlist0(a = 1, )\n\n\n"} {"package":"fuj","topic":"match_ext","snippet":"### Name: match_ext\n### Title: Value matching - Extensions\n### Aliases: match_ext is_in is_out %out% is_within %wi% is_without %wo%\n### no_match any_match\n\n### ** Examples\n\n1:10 %in% c(1, 3, 5, 9)\n1:10 %out% c(1, 3, 5, 9)\nletters[1:5] %wo% letters[3:7]\nletters[1:5] %wi% letters[3:7]\n\n# base functions only return unique values\n\n c(1:6, 7:2) %wo% c(3, 7, 12) # -> keeps duplicates\n setdiff(c(1:6, 7:2), c(3, 7, 12)) # -> unique values\n\n c(1:6, 7:2) %wi% c(3, 7, 12) # -> keeps duplicates\nintersect(c(1:6, 7:2), c(3, 7, 12)) # -> unique values\n\n\n\n"} {"package":"fuj","topic":"muffle","snippet":"### Name: muffle\n### Title: Muffle messages\n### Aliases: muffle wuffle\n\n### ** Examples\n\n\n# load function\nfoo <- function(...) {\n message(\"You entered :\", paste0(...))\n c(...)\n}\n\n# wrap around function or muffle the function ti's\nmuffle(foo(1, 2))\nmuffle(fun = foo)(1, 2)\nsapply(1:3, muffle(fun = foo))\n\n# silence warnings\nwuffle(as.integer(\"a\"))\nsapply(list(1, \"a\", \"0\", \".2\"), wuffle(fun = as.integer))\n\n\n"} {"package":"fuj","topic":"names","snippet":"### Name: names\n### Title: Set names\n### Aliases: names set_names remove_names %names% is_named\n\n### ** Examples\n\nset_names(1:5)\nset_names(1:5, c(\"a\", \"b\", \"c\", \"d\", \"e\"))\n\nx <- c(a = 1, b = 2)\nremove_names(x)\nx %names% c(\"c\", \"d\")\nis_named(x)\n\n\n\n"} {"package":"fuj","topic":"new_condition","snippet":"### Name: new_condition\n### Title: New condition\n### Aliases: new_condition\n\n### ** Examples\n\n# empty condition\nx <- new_condition(\"informative error message\", class = \"foo\")\ntry(stop(x))\n\n# with pkg\nx <- new_condition(\"msg\", class = \"foo\", pkg = \"bar\")\n# class contains multiple identifiers, including a \"bar:fooError\"\nclass(x)\n# message contains package information at the end\ntry(stop(x))\n\n\n"} {"package":"fuj","topic":"os","snippet":"### Name: os\n### Title: Determine operating systems\n### Aliases: os is_windows is_macos is_linux\n\n### ** Examples\n\nis_windows()\nis_macos()\nis_linux()\n\n\n"} {"package":"fuj","topic":"quick_df","snippet":"### Name: quick_df\n### Title: Quick DF\n### Aliases: quick_df empty_df quick_dfl\n\n### ** Examples\n\n# unnamed will use make.names()\nx <- list(1:10, letters[1:10])\nquick_df(x)\n\n# named is preferred\nnames(x) <- c(\"numbers\", \"letters\")\nquick_df(x)\n\n# empty data.frame\nempty_df() # or quick_df(NULL)\n\n\n\n"} {"package":"fuj","topic":"require_namespace","snippet":"### Name: require_namespace\n### Title: Require namespace\n### Aliases: require_namespace\n\n### ** Examples\n\nisTRUE(require_namespace(\"base\")) # returns invisibly\ntry(require_namespace(\"1package\")) # (using a purposefully bad name)\n\n\n"} {"package":"fuj","topic":"struct","snippet":"### Name: struct\n### Title: Simple structures\n### Aliases: struct\n\n### ** Examples\n\nx <- list(a = 1, b = 2)\n# structure() retains the $names attribute of x but struct() does not\nstructure(x, class = \"data.frame\", row.names = 1L)\nstruct(x, \"data.frame\", row.names = 1L)\nstruct(x, \"data.frame\", row.names = 1L, names = names(x))\n\n# structure() corrects entries for \"factor\" class\n# but struct() demands the data to be an integer\nstructure(1, class = \"factor\", levels = \"a\")\ntry(struct(1, \"factor\", levels = \"a\"))\nstruct(1L, \"factor\", levels = \"a\")\n\n# When first argument is NULL -- attributes() coerces\ntry(structure(NULL)) # NULL, no call to attributes()\nstruct(NULL, NULL) # list(), without warning\nx <- NULL\nattributes(x) <- NULL\nx # NULL\nattributes(x) <- list() # struct() always grabs ... into a list\nx # list()\n\n# Due to the use of class() to assign class, you may experience some\n# other differences between structure() and struct()\nx <- structure(1, class = \"integer\")\ny <- struct(1, \"integer\")\nstr(x)\nstr(y)\n\nall.equal(x, y)\n\n# Be careful about carrying over attributes\nx <- quick_df(list(a = 1:2, b = 3:4))\n# returns empty data.frame\nstruct(x, \"data.frame\", new = 1)\n\n# safely changing names without breaking rownames\nstruct(x, \"data.frame\", names = c(\"c\", \"d\")) # breaks\nstruct(x, \"data.frame\", names = c(\"c\", \"d\"), .keep_attr = TRUE)\nstruct(x, \"data.frame\", names = c(\"c\", \"d\"), .keep_attr = \"row.names\")\n\n# safely adds comments\nstruct(x, \"data.frame\", comment = \"hi\", .keep_attr = TRUE)\nstruct(x, \"data.frame\", comment = \"hi\", .keep_attr = c(\"names\", \"row.names\"))\n\n# assignment in ... overwrites attributes\nstruct(x, \"data.frame\", names = c(\"var1\", \"var2\"), .keep_attr = TRUE)\n\n\n"} {"package":"fuj","topic":"verbose","snippet":"### Name: verbose\n### Title: Verbose\n### Aliases: verbose\n\n### ** Examples\n\nop <- options(verbose = FALSE)\nverbose(\"will not show\")\n\noptions(verbose = TRUE)\nverbose(\"message printed\")\nverbose(\"multiple lines \", \"will be \", \"combined\")\noptions(op)\n\nop <- options(fuj.verbose = function() TRUE)\nverbose(\"function will evaluate\")\nverbose(NULL) # nothing\nverbose(NULL, \"something\")\nverbose(if (FALSE) {\n\"`if` returns `NULL` when not `TRUE`, which makes for additional control\"\n})\noptions(op)\n\n\n"} {"package":"RcppZiggurat","topic":"RcppZiggurat-package","snippet":"### Name: RcppZiggurat-package\n### Title: Collection and comparison of different Ziggurat RNGs\n### Aliases: RcppZiggurat-package RcppZiggurat\n### Keywords: package\n\n### ** Examples\n\n set.seed(42)\n system.time(replicate(500, rnorm(10000)))\n\n zsetseed(42)\n system.time(replicate(500, zrnorm(10000)))\n\n\n"} {"package":"RcppZiggurat","topic":"zrnorm","snippet":"### Name: zrnorm\n### Title: Collection of Ziggurat Normal RNGs\n### Aliases: zrnorm zrnormLZLLV zrnormMT zrnormV1 zrnormV1b zrnormStl\n### zrnormStlV1 zrnormVec zrnormVecV1 zrnormGSL zrnormQL zrnormGl zrnormR\n### zsetseed zsetseedV1 zsetseedV1b zsetseedLZLLV zsetseedMT zsetseedGSL\n### zsetseedQL zsetseedGl zgetseed zgetseedV1 zgetpars zsetpars ziggbin\n### ziggsum ziggtest\n### Keywords: package\n\n### ** Examples\n\n set.seed(42)\n system.time(replicate(500, rnorm(10000)))\n\n zsetseed(42)\n system.time(replicate(500, zrnorm(10000)))\n\n\n"} {"package":"RcppZiggurat","topic":"RcppZiggurat","snippet":"### Name: RcppZiggurat-package\n### Title: Collection and comparison of different Ziggurat RNGs\n### Aliases: RcppZiggurat-package RcppZiggurat\n### Keywords: package\n\n### ** Examples\n\n set.seed(42)\n system.time(replicate(500, rnorm(10000)))\n\n zsetseed(42)\n system.time(replicate(500, zrnorm(10000)))\n\n\n"} {"package":"pheble","topic":"ph_anomaly","snippet":"### Name: ph_anomaly\n### Title: Detect anomalies.\n### Aliases: ph_anomaly\n\n### ** Examples\n\n## Import data.\ndata(ph_crocs)\n## No test: \n## Remove anomalies with autoencoder.\nrm_outs <- ph_anomaly(df = ph_crocs, ids_col = \"Biosample\",\n class_col = \"Species\", method = \"ae\")\n## Alternatively, remove anomalies with extended isolation forest. Notice\n## that port is defined, because running H2O sessions one after another\n## can return connection errors.\nrm_outs <- ph_anomaly(df = ph_crocs, ids_col = \"Biosample\",\n class_col = \"Species\", method = \"iso\",\n port = 50001)\n## End(No test)\n\n\n"} {"package":"pheble","topic":"ph_ctrl","snippet":"### Name: ph_ctrl\n### Title: Parameters for resampling and training a dataset.\n### Aliases: ph_ctrl\n\n### ** Examples\n\n## Import data.\ndata(ph_crocs)\n## Echo control object for train function.\nctrl <- ph_ctrl(ph_crocs$Species, resample_method = \"boot\")\n\n\n"} {"package":"pheble","topic":"ph_ensemble","snippet":"### Name: ph_ensemble\n### Title: Classify phenotypes via ensemble learning.\n### Aliases: ph_ensemble\n\n### ** Examples\n\n## Import data.\ndata(ph_crocs)\n## No test: \n## Remove anomalies with autoencoder.\nrm_outs <- ph_anomaly(df = ph_crocs, ids_col = \"Biosample\",\n class_col = \"Species\", method = \"ae\")\n## Preprocess anomaly-free data frame into train, validation, and test sets\n## with PCs as predictors.\npc_dfs <- ph_prep(df = rm_outs$df, ids_col = \"Biosample\",\n class_col = \"Species\", vali_pct = 0.15,\n test_pct = 0.15, method = \"pca\")\n## Echo control object for train function.\nctrl <- ph_ctrl(ph_crocs$Species, resample_method = \"boot\")\n## Train all models for ensemble.\n## Note: Increasing n_cores will dramatically reduce train time.\ntrain_models <- ph_train(train_df = pc_dfs$train_df,\n vali_df = pc_dfs$vali_df,\n test_df = pc_dfs$test_df,\n class_col = \"Species\",\n ctrl = ctrl,\n task = \"multi\",\n methods = \"all\",\n tune_length = 5,\n quiet = FALSE)\n## You can also train just a few, although more is preferable.\n## Note: Increasing n_cores will dramatically reduce train time.\ntrain_models <- ph_train(train_df = pc_dfs$train_df,\n vali_df = pc_dfs$vali_df,\n test_df = pc_dfs$test_df,\n class_col = \"Species\",\n ctrl = ctrl,\n task = \"multi\",\n methods = c(\"lda\", \"mda\",\n \"nnet\", \"pda\", \"sparseLDA\"),\n tune_length = 5,\n quiet = FALSE)\n## Train the ensemble.\n## Note: Increasing n_cores will dramatically reduce train time.\nensemble_model <- ph_ensemble(train_models = train_models$train_models,\n train_df = pc_dfs$train_df,\n vali_df = pc_dfs$vali_df,\n test_df = pc_dfs$test_df,\n class_col = \"Species\",\n ctrl = ctrl,\n task = \"multi\",\n top_models = 3,\n metalearner = \"glmnet\",\n tune_length = 25,\n quiet = FALSE)\n## End(No test)\n\n\n"} {"package":"pheble","topic":"ph_equate","snippet":"### Name: ph_equate\n### Title: Equate factors levels.\n### Aliases: ph_equate\n\n### ** Examples\n\n## Make data frame of predicted classes with different levels.\n## An internal or external column should contain the observed\n## classes with every possible level.\nobs <- as.factor(c(\"A\", \"C\", \"B\", \"D\", \"E\"))\nmethod_a <- c(\"A\", \"B\", \"B\", \"C\", \"D\")\nmethod_b <- c(\"A\", \"C\", \"B\", \"D\", \"C\")\nmethod_c <- c(\"A\", \"C\", \"B\", \"B\", \"C\")\ndf <- data.frame(method_a, method_b, method_c)\ndf <- ph_equate(df = df, class = obs)\n\n\n"} {"package":"pheble","topic":"ph_eval","snippet":"### Name: ph_eval\n### Title: Evaluate a phenotype classification model.\n### Aliases: ph_eval\n\n### ** Examples\n\n## Import data.\ndata(ph_crocs)\n## No test: \n## Remove anomalies with autoencoder.\nrm_outs <- ph_anomaly(df = ph_crocs, ids_col = \"Biosample\",\n class_col = \"Species\", method = \"ae\")\n## Preprocess anomaly-free data frame into train, validation, and test sets\n## with PCs as predictors.\npc_dfs <- ph_prep(df = rm_outs$df, ids_col = \"Biosample\",\n class_col = \"Species\", vali_pct = 0.15,\n test_pct = 0.15, method = \"pca\")\n## Echo control object for train function.\nctrl <- ph_ctrl(ph_crocs$Species, resample_method = \"boot\")\n## Train a few models for ensemble, although more is preferable.\n## Note: Increasing n_cores will dramatically reduce train time.\ntrain_models <- ph_train(train_df = pc_dfs$train_df,\n vali_df = pc_dfs$vali_df,\n test_df = pc_dfs$test_df,\n class_col = \"Species\",\n ctrl = ctrl,\n task = \"multi\",\n methods = c(\"lda\", \"mda\",\n \"nnet\", \"pda\", \"sparseLDA\"),\n tune_length = 5,\n quiet = FALSE)\n## Evaluate e.g. the first model.\ntest_pred <- predict(train_models$train_models[[1]], pc_dfs$test_df)\ntest_obs <- as.factor(pc_dfs$test_df$Species)\ntest_cm <- ph_eval(pred = test_pred, obs = test_obs)\n## End(No test)\n\n\n"} {"package":"pheble","topic":"ph_prep","snippet":"### Name: ph_prep\n### Title: Preprocessing for phenotype classification via ensemble\n### learning.\n### Aliases: ph_prep\n\n### ** Examples\n\n## Import data.\ndata(ph_crocs)\n## No test: \n## Remove anomalies with autoencoder.\nrm_outs <- ph_anomaly(df = ph_crocs, ids_col = \"Biosample\",\n class_col = \"Species\", method = \"ae\")\n## Preprocess anomaly-free data frame into train, validation, and test sets\n## with PCs as predictors.\npc_dfs <- ph_prep(df = rm_outs$df, ids_col = \"Biosample\",\n class_col = \"Species\", vali_pct = 0.15,\n test_pct = 0.15, method = \"pca\")\n## Alternatively, preprocess data frame into train, validation, and test\n## sets with latent variables as predictors. Notice that port is defined,\n## because running H2O sessions one after another can cause connection\n## errors.\nae_dfs <- ph_prep(df = rm_outs$df, ids_col = \"Biosample\", class_col = \"Species\",\n vali_pct = 0.15, test_pct = 0.15, method = \"ae\", port = 50001)\n## End(No test)\n\n\n"} {"package":"pheble","topic":"ph_train","snippet":"### Name: ph_train\n### Title: Generate predictions for phenotype ensemble.\n### Aliases: ph_train\n\n### ** Examples\n\n## Import data.\ndata(ph_crocs)\n## No test: \n## Remove anomalies with autoencoder.\nrm_outs <- ph_anomaly(df = ph_crocs, ids_col = \"Biosample\",\n class_col = \"Species\", method = \"ae\")\n## Preprocess anomaly-free data frame into train, validation, and test sets\n## with PCs as predictors.\npc_dfs <- ph_prep(df = rm_outs$df, ids_col = \"Biosample\",\n class_col = \"Species\", vali_pct = 0.15,\n test_pct = 0.15, method = \"pca\")\n## Echo control object for train function.\nctrl <- ph_ctrl(ph_crocs$Species, resample_method = \"boot\")\n## Train all models for ensemble.\n## Note: Increasing n_cores will dramatically reduce train time.\ntrain_models <- ph_train(train_df = pc_dfs$train_df,\n vali_df = pc_dfs$vali_df,\n test_df = pc_dfs$test_df,\n class_col = \"Species\",\n ctrl = ctrl,\n task = \"multi\",\n methods = \"all\",\n tune_length = 5,\n quiet = FALSE)\n## You can also train just a few, although more is preferable.\n## Note: Increasing n_cores will dramatically reduce train time.\ntrain_models <- ph_train(train_df = pc_dfs$train_df,\n vali_df = pc_dfs$vali_df,\n test_df = pc_dfs$test_df,\n class_col = \"Species\",\n ctrl = ctrl,\n task = \"multi\",\n methods = c(\"lda\", \"mda\",\n \"nnet\", \"pda\", \"sparseLDA\"),\n tune_length = 5,\n quiet = FALSE)\n## End(No test)\n\n\n"} {"package":"SigOptR","topic":"create_experiment","snippet":"### Name: create_experiment\n### Title: Create an experiment\n### Aliases: create_experiment\n\n### ** Examples\n\nenv <- Sys.getenv(\"NOT_CRAN\")\nif (!identical(env, \"true\")) {\n0\n} else {\ncreate_experiment(list(\n name=\"R test experiment\",\n parameters=list(\n list(name=\"x1\", type=\"double\", bounds=list(min=0, max=100)),\n list(name=\"x2\", type=\"double\", bounds=list(min=0, max=100))\n )\n))}\n\n\n"} {"package":"SigOptR","topic":"create_observation","snippet":"### Name: create_observation\n### Title: Create an observation for an experiment\n### Aliases: create_observation\n\n### ** Examples\n\nenv <- Sys.getenv(\"NOT_CRAN\")\nif (!identical(env, \"true\")) {\n0\n} else {\nexperiment <- create_experiment(list(\n name=\"R test experiment\",\n parameters=list(\n list(name=\"x1\", type=\"double\", bounds=list(min=0, max=100)),\n list(name=\"x2\", type=\"double\", bounds=list(min=0, max=100))\n )\n))\nsuggestion <- create_suggestion(experiment$id)\ncreate_observation(experiment$id, list(suggestion=suggestion$id, value=99.08))\ncreate_observation(experiment$id, list(suggestion=suggestion$id, value=99.58, value_stddev=0.1))}\n\n\n"} {"package":"SigOptR","topic":"create_suggestion","snippet":"### Name: create_suggestion\n### Title: Create a suggestion for an experiment\n### Aliases: create_suggestion\n\n### ** Examples\n\nenv <- Sys.getenv(\"NOT_CRAN\")\nif (!identical(env, \"true\")) {\n0\n} else {\nexperiment <- create_experiment(list(\n name=\"R test experiment\",\n parameters=list(\n list(name=\"x1\", type=\"double\", bounds=list(min=0, max=100)),\n list(name=\"x2\", type=\"double\", bounds=list(min=0, max=100))\n )\n))\ncreate_suggestion(experiment$id)}\n\n\n"} {"package":"SigOptR","topic":"fetch_experiment","snippet":"### Name: fetch_experiment\n### Title: Fetch an experiment\n### Aliases: fetch_experiment\n\n### ** Examples\n\nenv <- Sys.getenv(\"NOT_CRAN\")\nif (!identical(env, \"true\")) {\n0\n} else {\nexperiment <- create_experiment(list(\n name=\"R test experiment\",\n parameters=list(\n list(name=\"x1\", type=\"double\", bounds=list(min=0, max=100)),\n list(name=\"x2\", type=\"double\", bounds=list(min=0, max=100))\n )\n))\nfetch_experiment(experiment$id)}\n\n\n"} {"package":"SigOptR","topic":"franke","snippet":"### Name: franke\n### Title: Franke function - http://www.sfu.ca/~ssurjano/franke2d.html\n### Aliases: franke\n\n### ** Examples\n\nfranke(0,1)\n\n\n"} {"package":"FSelector","topic":"as.simple.formula","snippet":"### Name: as.simple.formula\n### Title: Converting to formulas\n### Aliases: as.simple.formula\n\n### ** Examples\n\n data(iris)\n result <- cfs(Species ~ ., iris)\n f <- as.simple.formula(result, \"Species\")\n\n\n"} {"package":"FSelector","topic":"best.first.search","snippet":"### Name: best.first.search\n### Title: Best-first search\n### Aliases: best.first.search\n\n### ** Examples\n\n library(rpart)\n data(iris)\n \n evaluator <- function(subset) {\n #k-fold cross validation\n k <- 5\n splits <- runif(nrow(iris))\n results = sapply(1:k, function(i) {\n test.idx <- (splits >= (i - 1) / k) & (splits < i / k)\n train.idx <- !test.idx\n test <- iris[test.idx, , drop=FALSE]\n train <- iris[train.idx, , drop=FALSE]\n tree <- rpart(as.simple.formula(subset, \"Species\"), train)\n error.rate = sum(test$Species != predict(tree, test, type=\"c\")) / nrow(test)\n return(1 - error.rate)\n })\n print(subset)\n print(mean(results))\n return(mean(results))\n }\n \n subset <- best.first.search(names(iris)[-5], evaluator)\n f <- as.simple.formula(subset, \"Species\")\n print(f)\n\n \n\n\n"} {"package":"FSelector","topic":"cfs","snippet":"### Name: cfs\n### Title: CFS filter\n### Aliases: cfs\n\n### ** Examples\n\n data(iris)\n \n subset <- cfs(Species~., iris)\n f <- as.simple.formula(subset, \"Species\")\n print(f)\n\n\n"} {"package":"FSelector","topic":"chi.squared","snippet":"### Name: chi.squared\n### Title: Chi-squared filter\n### Aliases: chi.squared\n\n### ** Examples\n\n library(mlbench)\n data(HouseVotes84)\n\n weights <- chi.squared(Class~., HouseVotes84)\n print(weights)\n subset <- cutoff.k(weights, 5)\n f <- as.simple.formula(subset, \"Class\")\n print(f)\n\n\n"} {"package":"FSelector","topic":"consistency","snippet":"### Name: consistency\n### Title: Consistency-based filter\n### Aliases: consistency\n\n### ** Examples\n\n## Not run: \n##D library(mlbench)\n##D data(HouseVotes84)\n##D \n##D subset <- consistency(Class~., HouseVotes84)\n##D f <- as.simple.formula(subset, \"Class\")\n##D print(f)\n## End(Not run)\n\n\n"} {"package":"FSelector","topic":"linear.correlation","snippet":"### Name: correlation\n### Title: Correlation filter\n### Aliases: linear.correlation rank.correlation\n\n### ** Examples\n\n library(mlbench)\n data(BostonHousing)\n d=BostonHousing[-4] # only numeric variables\n \n weights <- linear.correlation(medv~., d)\n print(weights)\n subset <- cutoff.k(weights, 3)\n f <- as.simple.formula(subset, \"medv\")\n print(f)\n\n weights <- rank.correlation(medv~., d)\n print(weights)\n subset <- cutoff.k(weights, 3)\n f <- as.simple.formula(subset, \"medv\")\n print(f)\n\n\n"} {"package":"FSelector","topic":"cutoff.k","snippet":"### Name: cutoff\n### Title: Cutoffs\n### Aliases: cutoff.k cutoff.k.percent cutoff.biggest.diff\n\n### ** Examples\n\n data(iris)\n\n weights <- information.gain(Species~., iris)\n print(weights)\n\n subset <- cutoff.k(weights, 1)\n f <- as.simple.formula(subset, \"Species\")\n print(f)\n\n subset <- cutoff.k.percent(weights, 0.75)\n f <- as.simple.formula(subset, \"Species\")\n print(f)\n\n subset <- cutoff.biggest.diff(weights)\n f <- as.simple.formula(subset, \"Species\")\n print(f)\n \n\n\n"} {"package":"FSelector","topic":"exhaustive.search","snippet":"### Name: exhaustive.search\n### Title: Exhaustive search\n### Aliases: exhaustive.search\n\n### ** Examples\n\n library(rpart)\n data(iris)\n \n evaluator <- function(subset) {\n #k-fold cross validation\n k <- 5\n splits <- runif(nrow(iris))\n results = sapply(1:k, function(i) {\n test.idx <- (splits >= (i - 1) / k) & (splits < i / k)\n train.idx <- !test.idx\n test <- iris[test.idx, , drop=FALSE]\n train <- iris[train.idx, , drop=FALSE]\n tree <- rpart(as.simple.formula(subset, \"Species\"), train)\n error.rate = sum(test$Species != predict(tree, test, type=\"c\")) / nrow(test)\n return(1 - error.rate)\n })\n print(subset)\n print(mean(results))\n return(mean(results))\n }\n \n subset <- exhaustive.search(names(iris)[-5], evaluator)\n f <- as.simple.formula(subset, \"Species\")\n print(f)\n\n \n\n\n"} {"package":"FSelector","topic":"backward.search","snippet":"### Name: greedy.search\n### Title: Greedy search\n### Aliases: backward.search forward.search\n\n### ** Examples\n\n library(rpart)\n data(iris)\n \n evaluator <- function(subset) {\n #k-fold cross validation\n k <- 5\n splits <- runif(nrow(iris))\n results = sapply(1:k, function(i) {\n test.idx <- (splits >= (i - 1) / k) & (splits < i / k)\n train.idx <- !test.idx\n test <- iris[test.idx, , drop=FALSE]\n train <- iris[train.idx, , drop=FALSE]\n tree <- rpart(as.simple.formula(subset, \"Species\"), train)\n error.rate = sum(test$Species != predict(tree, test, type=\"c\")) / nrow(test)\n return(1 - error.rate)\n })\n print(subset)\n print(mean(results))\n return(mean(results))\n }\n \n subset <- forward.search(names(iris)[-5], evaluator)\n f <- as.simple.formula(subset, \"Species\")\n print(f)\n\n \n\n\n"} {"package":"FSelector","topic":"hill.climbing.search","snippet":"### Name: hill.climbing.search\n### Title: Hill climbing search\n### Aliases: hill.climbing.search\n\n### ** Examples\n\n library(rpart)\n data(iris)\n \n evaluator <- function(subset) {\n #k-fold cross validation\n k <- 5\n splits <- runif(nrow(iris))\n results = sapply(1:k, function(i) {\n test.idx <- (splits >= (i - 1) / k) & (splits < i / k)\n train.idx <- !test.idx\n test <- iris[test.idx, , drop=FALSE]\n train <- iris[train.idx, , drop=FALSE]\n tree <- rpart(as.simple.formula(subset, \"Species\"), train)\n error.rate = sum(test$Species != predict(tree, test, type=\"c\")) / nrow(test)\n return(1 - error.rate)\n })\n print(subset)\n print(mean(results))\n return(mean(results))\n }\n \n subset <- hill.climbing.search(names(iris)[-5], evaluator)\n f <- as.simple.formula(subset, \"Species\")\n print(f)\n\n \n\n\n"} {"package":"FSelector","topic":"information.gain","snippet":"### Name: entropy.based\n### Title: Entropy-based filters\n### Aliases: information.gain gain.ratio symmetrical.uncertainty\n\n### ** Examples\n\n data(iris)\n\n weights <- information.gain(Species~., iris)\n print(weights)\n subset <- cutoff.k(weights, 2)\n f <- as.simple.formula(subset, \"Species\")\n print(f)\n\n weights <- information.gain(Species~., iris, unit = \"log2\")\n print(weights)\n\n weights <- gain.ratio(Species~., iris)\n print(weights)\n subset <- cutoff.k(weights, 2)\n f <- as.simple.formula(subset, \"Species\")\n print(f)\n\n weights <- symmetrical.uncertainty(Species~., iris)\n print(weights)\n subset <- cutoff.biggest.diff(weights)\n f <- as.simple.formula(subset, \"Species\")\n print(f)\n\n\n\n"} {"package":"FSelector","topic":"oneR","snippet":"### Name: oneR\n### Title: OneR algorithm\n### Aliases: oneR\n\n### ** Examples\n\n library(mlbench)\n data(HouseVotes84)\n \n weights <- oneR(Class~., HouseVotes84)\n print(weights)\n subset <- cutoff.k(weights, 5)\n f <- as.simple.formula(subset, \"Class\")\n print(f)\n\n\n"} {"package":"FSelector","topic":"random.forest.importance","snippet":"### Name: random.forest.importance\n### Title: RandomForest filter\n### Aliases: random.forest.importance\n\n### ** Examples\n\n library(mlbench)\n data(HouseVotes84)\n \n weights <- random.forest.importance(Class~., HouseVotes84, importance.type = 1)\n print(weights)\n subset <- cutoff.k(weights, 5)\n f <- as.simple.formula(subset, \"Class\")\n print(f)\n\n\n"} {"package":"FSelector","topic":"relief","snippet":"### Name: relief\n### Title: RReliefF filter\n### Aliases: relief\n\n### ** Examples\n\n data(iris)\n \n weights <- relief(Species~., iris, neighbours.count = 5, sample.size = 20)\n print(weights)\n subset <- cutoff.k(weights, 2)\n f <- as.simple.formula(subset, \"Species\")\n print(f)\n\n\n"} {"package":"spnaf","topic":"CA","snippet":"### Name: CA\n### Title: Sample migration data by counties in California.\n### Aliases: CA\n### Keywords: datasets\n\n### ** Examples\n\nCA\n\n\n"} {"package":"spnaf","topic":"Gij.polygon","snippet":"### Name: Gij.polygon\n### Title: Calculate spatial autocorrelation with OD data and corresponding\n### polygons.\n### Aliases: Gij.polygon\n\n### ** Examples\n\n# Data manipulation\nCA <- spnaf::CA\nOD <- cbind(CA$FIPS.County.Code.of.Geography.B, CA$FIPS.County.Code.of.Geography.A)\nOD <- cbind(OD, CA$Flow.from.Geography.B.to.Geography.A)\nOD <- data.frame(OD)\nnames(OD) <- c(\"oid\", \"did\", \"n\")\nOD$n <- as.numeric(OD$n)\nOD <- OD[order(OD[,1], OD[,2]),]\nhead(OD) # check the input df's format\n\n# Load sf polygon\nCA_polygon <- spnaf::CA_polygon\nhead(CA_polygon) # it has a geometry column\n\n# Execution of Gij.polygon with data above and given parameters\n## No test: \nresult <- Gij.polygon(df = OD, shape = CA_polygon, queen = TRUE, snap = 1,\nmethod = 't', R = 1000)\n## End(No test)\n\n# check the results\n## No test: \nhead(result[[1]])\nhead(result[[2]])\n## End(No test)\n\n\n"} {"package":"bigmemory","topic":"describe,big.matrix-method","snippet":"### Name: describe,big.matrix-method\n### Title: The basic \"big.matrix\" operations for sharing and re-attaching.\n### Aliases: describe,big.matrix-method attach.big.matrix describe\n### attach.resource\n### Keywords: classes methods\n\n### ** Examples\n\n# The example is quite silly, as you wouldn't likely do this in a\n# single R session. But if zdescription were passed to another R session\n# via SNOW, foreach, or even by a simple file read/write,\n# then the attach of the second R process would give access to the\n# same object in memory. Please see the package vignette for real examples.\n\nz <- big.matrix(3, 3, type='integer', init=3)\nz[,]\ndim(z)\nz[1,1] <- 2\nz[,]\nzdescription <- describe(z)\nzdescription\ny <- attach.big.matrix(zdescription)\ny[,]\ny\nz\nzz <- attach.resource(zdescription)\nzz[1,1] <- -100\ny[,]\nz[,]\n\n\n"} {"package":"bigmemory","topic":"big.matrix-class","snippet":"### Name: big.matrix-class\n### Title: Class \"big.matrix\"\n### Aliases: big.matrix-class\n### Keywords: classes\n\n### ** Examples\n\nshowClass(\"big.matrix\")\n\n\n"} {"package":"bigmemory","topic":"big.matrix","snippet":"### Name: big.matrix\n### Title: The core \"big.matrix\" operations.\n### Aliases: big.matrix filebacked.big.matrix as.big.matrix is.big.matrix\n### is.big.matrix,big.matrix-method is.big.matrix,ANY-method is.separated\n### is.separated,big.matrix-method is.filebacked\n### is.filebacked,big.matrix-method shared.name\n### shared.name,big.matrix-method file.name file.name,big.matrix-method\n### dir.name dir.name,big.matrix-method is.shared\n### is.shared,big.matrix-method is.readonly is.readonly,big.matrix-method\n### is.nil\n### Keywords: classes methods\n\n### ** Examples\n\nx <- big.matrix(10, 2, type='integer', init=-5)\noptions(bigmemory.allow.dimnames=TRUE)\ncolnames(x) <- c(\"alpha\", \"beta\")\nis.big.matrix(x)\ndim(x)\ncolnames(x)\nrownames(x)\nx[,]\nx[1:8,1] <- 11:18\ncolnames(x) <- NULL\nx[,]\n\n# The following shared memory example is quite silly, as you wouldn't\n# likely do this in a single R session. But if zdescription were\n# passed to another R session via SNOW, foreach, or even by a\n# simple file read/write, then the attach.big.matrix() within the\n# second R process would give access to the same object in memory.\n# Please see the package vignette for real examples.\n\nz <- big.matrix(3, 3, type='integer', init=3)\nz[,]\ndim(z)\nz[1,1] <- 2\nz[,]\nzdescription <- describe(z)\nzdescription\ny <- attach.big.matrix(zdescription)\ny[,]\ny\nz\ny[1,1] <- -100\ny[,]\nz[,]\n\n\n"} {"package":"bigmemory","topic":"descriptor-class","snippet":"### Name: descriptor-class\n### Title: Class \"big.matrix.descriptor\"\n### Aliases: descriptor-class big.matrix.descriptor-class\n### sub.big.matrix,big.matrix.descriptor-method\n### attach.resource,character-method\n### attach.resource,big.matrix.descriptor-method\n### Keywords: classes\n\n### ** Examples\n\nshowClass(\"big.matrix.descriptor\")\n\n\n"} {"package":"bigmemory","topic":"bigmemory-package","snippet":"### Name: bigmemory-package\n### Title: Manage massive matrices with shared memory and memory-mapped\n### files.\n### Aliases: bigmemory-package bigmemory\n### Keywords: package\n\n### ** Examples\n\n\n\n# Our examples are all trivial in size, rather than burning huge amounts\n# of memory.\n\nx <- big.matrix(5, 2, type=\"integer\", init=0,\n dimnames=list(NULL, c(\"alpha\", \"beta\")))\nx\nx[1:2,]\nx[,1] <- 1:5\nx[,\"alpha\"]\ncolnames(x)\noptions(bigmemory.allow.dimnames=TRUE)\ncolnames(x) <- NULL\nx[,]\n\n\n\n\n"} {"package":"bigmemory","topic":"deepcopy","snippet":"### Name: deepcopy\n### Title: Produces a physical copy of a \"big.matrix\"\n### Aliases: deepcopy\n### Keywords: methods\n\n### ** Examples\n\nx <- as.big.matrix(matrix(1:30, 10, 3))\ny <- deepcopy(x, -1) # Don't include the first column.\nx\ny\nhead(x)\nhead(y)\n\n\n"} {"package":"bigmemory","topic":"flush","snippet":"### Name: flush\n### Title: Updating a big.matrix filebacking.\n### Aliases: flush flush,big.matrix-method\n### Keywords: methods\n\n### ** Examples\n\ntemp_dir = tempdir()\nif (!dir.exists(temp_dir)) dir.create(temp_dir)\nx <- big.matrix(nrow=3, ncol=3, backingfile='flushtest.bin',\n descriptorfile='flushtest.desc', backingpath=temp_dir,\n type='integer')\nx[1,1] <- 0\nflush(x)\n\n\n"} {"package":"bigmemory","topic":"morder","snippet":"### Name: morder\n### Title: Ordering and Permuting functions for big.matrix\" and matrix\"\n### objects\n### Aliases: morder morderCols mpermute mpermuteCols\n\n### ** Examples\n\nm = matrix(as.double(as.matrix(iris)), nrow=nrow(iris))\nmorder(m, 1)\norder(m[,1])\n\nm[order(m[,1]), 2]\nmpermute(m, cols=1)\nm[,2]\n\n\n"} {"package":"bigmemory","topic":"mwhich","snippet":"### Name: mwhich\n### Title: Expanded \"which\"-like functionality.\n### Aliases: mwhich\n### Keywords: methods\n\n### ** Examples\n\nx <- as.big.matrix(matrix(1:30, 10, 3))\noptions(bigmemory.allow.dimnames=TRUE)\ncolnames(x) <- c(\"A\", \"B\", \"C\")\nx[,]\nx[mwhich(x, 1:2, list(c(2,3), c(11,17)),\n list(c('ge','le'), c('gt', 'lt')), 'OR'),]\n\nx[mwhich(x, c(\"A\",\"B\"), list(c(2,3), c(11,17)), \n list(c('ge','le'), c('gt', 'lt')), 'AND'),]\n\n# These should produce the same answer with a regular matrix:\ny <- matrix(1:30, 10, 3)\ny[mwhich(y, 1:2, list(c(2,3), c(11,17)),\n list(c('ge','le'), c('gt', 'lt')), 'OR'),]\n\ny[mwhich(y, -3, list(c(2,3), c(11,17)),\n list(c('ge','le'), c('gt', 'lt')), 'AND'),]\n\n\nx[1,1] <- NA\nmwhich(x, 1:2, NA, 'eq', 'OR')\nmwhich(x, 1:2, NA, 'neq', 'AND')\n\n# Column 1 equal to 4 and/or column 2 less than or equal to 16:\nmwhich(x, 1:2, list(4, 16), list('eq', 'le'), 'OR')\nmwhich(x, 1:2, list(4, 16), list('eq', 'le'), 'AND')\n\n# Column 2 less than or equal to 15:\nmwhich(x, 2, 15, 'le')\n\n# No NAs in either column, and column 2 strictly less than 15:\nmwhich(x, c(1:2,2), list(NA, NA, 15), list('neq', 'neq', 'lt'), 'AND')\n\nx <- big.matrix(4, 2, init=1, type=\"double\")\nx[1,1] <- Inf\nmwhich(x, 1, Inf, 'eq')\nmwhich(x, 1, 1, 'gt')\nmwhich(x, 1, 1, 'le')\n\n\n"} {"package":"bigmemory","topic":"is.sub.big.matrix","snippet":"### Name: is.sub.big.matrix\n### Title: Submatrix support\n### Aliases: is.sub.big.matrix is.sub.big.matrix,big.matrix-method\n### sub.big.matrix sub.big.matrix,big.matrix-method\n### Keywords: methods\n\n### ** Examples\n\nx <- big.matrix(10, 5, init=0, type=\"double\")\nx[,] <- 1:50\ny <- sub.big.matrix(x, 2, 9, 2, 3)\ny[,]\ny[1,1] <- -99\nx[,]\nrm(x)\n\n\n"} {"package":"bigmemory","topic":"write.big.matrix","snippet":"### Name: write.big.matrix\n### Title: File interface for a \"big.matrix\"\n### Aliases: write.big.matrix write.big.matrix,big.matrix,character-method\n### read.big.matrix read.big.matrix,character-method\n### Keywords: methods\n\n### ** Examples\n\n# Without specifying the type, this big.matrix x will hold integers.\n\nx <- as.big.matrix(matrix(1:10, 5, 2))\nx[2,2] <- NA\nx[,]\ntemp_dir = tempdir()\nif (!dir.exists(temp_dir)) dir.create(temp_dir)\nwrite.big.matrix(x, file.path(temp_dir, \"foo.txt\"))\n\n# Just for fun, I'll read it back in as character (1-byte integers):\ny <- read.big.matrix(file.path(temp_dir, \"foo.txt\"), type=\"char\")\ny[,]\n\n# Other examples:\nw <- as.big.matrix(matrix(1:10, 5, 2), type='double')\nw[1,2] <- NA\nw[2,2] <- -Inf\nw[3,2] <- Inf\nw[4,2] <- NaN\nw[,]\nwrite.big.matrix(w, file.path(temp_dir, \"bar.txt\"))\nw <- read.big.matrix(file.path(temp_dir, \"bar.txt\"), type=\"double\")\nw[,]\nw <- read.big.matrix(file.path(temp_dir, \"bar.txt\"), type=\"short\")\nw[,]\n\n# Another example using row names (which we don't like).\nx <- as.big.matrix(as.matrix(iris), type='double')\nrownames(x) <- as.character(1:nrow(x))\nhead(x)\nwrite.big.matrix(x, file.path(temp_dir, 'IrisData.txt'), col.names=TRUE, \n row.names=TRUE)\ny <- read.big.matrix(file.path(temp_dir, \"IrisData.txt\"), header=TRUE, \n has.row.names=TRUE)\nhead(y)\n\n# The following would fail with a dimension mismatch:\nif (FALSE) y <- read.big.matrix(file.path(temp_dir, \"IrisData.txt\"), \n header=TRUE)\n\n\n"} {"package":"RcmdrPlugin.aRnova","topic":"OBrienKaiser","snippet":"### Name: OBrienKaiser\n### Title: O'Brien and Kaiser's Repeated-Measures Data\n### Aliases: OBrienKaiser\n### Keywords: datasets\n\n### ** Examples\n\nOBrienKaiser\ncontrasts(OBrienKaiser$treatment)\ncontrasts(OBrienKaiser$gender)\n\n\n"} {"package":"RcmdrPlugin.aRnova","topic":"Pottery","snippet":"### Name: Pottery\n### Title: Chemical Composition of Pottery\n### Aliases: Pottery\n### Keywords: datasets\n\n### ** Examples\n\nPottery\n\n\n"} {"package":"HealthCal","topic":"BFPF","snippet":"### Name: BFPF\n### Title: Body Fat Percentage (BFP) for female\n### Aliases: BFPF\n### Keywords: BFPF HealthCal\n\n### ** Examples\n\ndataset47=c(175,90,25)\nBFPF(dataset47)\n\n\n"} {"package":"HealthCal","topic":"BFPM","snippet":"### Name: BFPM\n### Title: Body Fat Percentage (BFP) for Male\n### Aliases: BFPM\n### Keywords: BFPM HealthCal\n\n### ** Examples\n\ndataset46=c(175,90,25)\nBFPM(dataset46)\n\n\n"} {"package":"HealthCal","topic":"BMI","snippet":"### Name: BMI\n### Title: The Body Mass Index (BMI)\n### Aliases: BMI\n### Keywords: BMI HealthCal\n\n### ** Examples\n\ndataset44=c(175,90)\nBMI(dataset44)\n\n\n"} {"package":"HealthCal","topic":"BMR","snippet":"### Name: BMR\n### Title: The Body Mass Index (BMR)\n### Aliases: BMR\n### Keywords: BMR HealthCal\n\n### ** Examples\n\ndataset45=c(175,90,25)\nBMR(dataset45)\n\n\n"} {"package":"shazam","topic":"calcBaseline","snippet":"### Name: calcBaseline\n### Title: Calculate the BASELINe PDFs (including for regions that include\n### CDR3 and FWR4)\n### Aliases: calcBaseline\n\n### ** Examples\n\n# Load and subset example data\ndata(ExampleDb, package=\"alakazam\")\ndb <- subset(ExampleDb, c_call == \"IGHG\" & sample_id == \"+7d\")\n\n# Collapse clones\ndb <- collapseClones(db, cloneColumn=\"clone_id\", \n sequenceColumn=\"sequence_alignment\",\n germlineColumn=\"germline_alignment_d_mask\",\n method=\"thresholdedFreq\", minimumFrequency=0.6,\n includeAmbiguous=FALSE, breakTiesStochastic=FALSE)\n \n# Calculate BASELINe\nbaseline <- calcBaseline(db, \n sequenceColumn=\"clonal_sequence\",\n germlineColumn=\"clonal_germline\", \n testStatistic=\"focused\",\n regionDefinition=IMGT_V,\n targetingModel=HH_S5F,\n nproc=1)\n \n\n\n"} {"package":"shazam","topic":"calcExpectedMutations","snippet":"### Name: calcExpectedMutations\n### Title: Calculate expected mutation frequencies of a sequence\n### Aliases: calcExpectedMutations\n\n### ** Examples\n\n# Load example data\ndata(ExampleDb, package=\"alakazam\")\n\n# Use first entry in the exampled data for input and germline sequence\nin_seq <- ExampleDb[[\"sequence_alignment\"]][1]\ngerm_seq <- ExampleDb[[\"germline_alignment_d_mask\"]][1]\n\n# Identify all mutations in the sequence\ncalcExpectedMutations(germ_seq,in_seq)\n\n# Identify only mutations the V segment minus CDR3\ncalcExpectedMutations(germ_seq, in_seq, regionDefinition=IMGT_V)\n\n# Define mutations based on hydropathy\ncalcExpectedMutations(germ_seq, in_seq, regionDefinition=IMGT_V,\n mutationDefinition=HYDROPATHY_MUTATIONS)\n\n\n\n"} {"package":"shazam","topic":"calcObservedMutations","snippet":"### Name: calcObservedMutations\n### Title: Count the number of observed mutations in a sequence.\n### Aliases: calcObservedMutations\n\n### ** Examples\n\n# Use an entry in the example data for input and germline sequence\ndata(ExampleDb, package=\"alakazam\")\nin_seq <- ExampleDb[[\"sequence_alignment\"]][100]\ngerm_seq <- ExampleDb[[\"germline_alignment_d_mask\"]][100]\n\n# Identify all mutations in the sequence\nex1_raw <- calcObservedMutations(in_seq, germ_seq, returnRaw=TRUE)\n# Count all mutations in the sequence\nex1_count <- calcObservedMutations(in_seq, germ_seq, returnRaw=FALSE)\nex1_freq <- calcObservedMutations(in_seq, germ_seq, returnRaw=FALSE, frequency=TRUE)\n# Compare this with ex1_count\ntable(ex1_raw$pos$region, ex1_raw$pos$r)[, \"1\"]\ntable(ex1_raw$pos$region, ex1_raw$pos$s)[, \"1\"]\n# Compare this with ex1_freq\ntable(ex1_raw$pos$region, ex1_raw$pos$r)[, \"1\"]/ex1_raw$nonN\ntable(ex1_raw$pos$region, ex1_raw$pos$s)[, \"1\"]/ex1_raw$nonN\n\n# Identify only mutations the V segment minus CDR3\nex2_raw <- calcObservedMutations(in_seq, germ_seq, \n regionDefinition=IMGT_V, returnRaw=TRUE)\n# Count only mutations the V segment minus CDR3\nex2_count <- calcObservedMutations(in_seq, germ_seq, \n regionDefinition=IMGT_V, returnRaw=FALSE)\nex2_freq <- calcObservedMutations(in_seq, germ_seq, \n regionDefinition=IMGT_V, returnRaw=FALSE,\n frequency=TRUE)\n# Compare this with ex2_count\ntable(ex2_raw$pos$region, ex2_raw$pos$r)[, \"1\"]\ntable(ex2_raw$pos$region, ex2_raw$pos$s)[, \"1\"] \n# Compare this with ex2_freq\ntable(ex2_raw$pos$region, ex2_raw$pos$r)[, \"1\"]/ex2_raw$nonN \ntable(ex2_raw$pos$region, ex2_raw$pos$s)[, \"1\"]/ex2_raw$nonN \n\n# Identify mutations by change in hydropathy class\nex3_raw <- calcObservedMutations(in_seq, germ_seq, regionDefinition=IMGT_V,\n mutationDefinition=HYDROPATHY_MUTATIONS, \n returnRaw=TRUE)\n# Count mutations by change in hydropathy class\nex3_count <- calcObservedMutations(in_seq, germ_seq, regionDefinition=IMGT_V,\n mutationDefinition=HYDROPATHY_MUTATIONS, \n returnRaw=FALSE)\nex3_freq <- calcObservedMutations(in_seq, germ_seq, regionDefinition=IMGT_V,\n mutationDefinition=HYDROPATHY_MUTATIONS, \n returnRaw=FALSE, frequency=TRUE)\n# Compre this with ex3_count\ntable(ex3_raw$pos$region, ex3_raw$pos$r)[, \"1\"]\ntable(ex3_raw$pos$region, ex3_raw$pos$s)[, \"1\"]\n# Compare this with ex3_freq\ntable(ex3_raw$pos$region, ex3_raw$pos$r)[, \"1\"]/ex3_raw$nonN \ntable(ex3_raw$pos$region, ex3_raw$pos$s)[, \"1\"]/ex3_raw$nonN \n \n\n\n"} {"package":"shazam","topic":"calcTargetingDistance","snippet":"### Name: calcTargetingDistance\n### Title: Calculates a 5-mer distance matrix from a TargetingModel object\n### Aliases: calcTargetingDistance\n\n### ** Examples\n\n# Calculate targeting distance of HH_S5F\ndist <- calcTargetingDistance(HH_S5F)\n\n# Calculate targeting distance of HH_S1F\ndist <- calcTargetingDistance(HH_S1F)\n\n\n\n"} {"package":"shazam","topic":"calculateMutability","snippet":"### Name: calculateMutability\n### Title: Calculate total mutability\n### Aliases: calculateMutability\n\n### ** Examples\n\n## No test: \n# Subset example data to one isotype and sample as a demo\ndata(ExampleDb, package=\"alakazam\")\ndb <- subset(ExampleDb, c_call == \"IGHA\" & sample_id == \"-1h\")\n\n# Calculate mutability of germline sequences using HH_S5F model\nmutability <- calculateMutability(sequences=db[[\"germline_alignment_d_mask\"]], model=HH_S5F)\n## End(No test)\n\n\n\n"} {"package":"shazam","topic":"collapseClones","snippet":"### Name: collapseClones\n### Title: Constructs effective clonal sequences for all clones\n### Aliases: collapseClones\n\n### ** Examples\n\n# Subset example data\ndata(ExampleDb, package=\"alakazam\")\ndb <- subset(ExampleDb, c_call %in% c(\"IGHA\", \"IGHG\") & sample_id == \"+7d\" &\n clone_id %in% c(\"3100\", \"3141\", \"3184\"))\n\n# thresholdedFreq method, resolving ties deterministically without using ambiguous characters\nclones <- collapseClones(db, cloneColumn=\"clone_id\", sequenceColumn=\"sequence_alignment\", \n germlineColumn=\"germline_alignment_d_mask\",\n method=\"thresholdedFreq\", minimumFrequency=0.6,\n includeAmbiguous=FALSE, breakTiesStochastic=FALSE)\n\n# mostCommon method, resolving ties deterministically using ambiguous characters\nclones <- collapseClones(db, cloneColumn=\"clone_id\", sequenceColumn=\"sequence_alignment\", \n germlineColumn=\"germline_alignment_d_mask\",\n method=\"mostCommon\", \n includeAmbiguous=TRUE, breakTiesStochastic=FALSE)\n\n# Make a copy of db that has a mutation frequency column\ndb2 <- observedMutations(db, frequency=TRUE, combine=TRUE)\n\n# mostMutated method, resolving ties stochastically\nclones <- collapseClones(db2, cloneColumn=\"clone_id\", sequenceColumn=\"sequence_alignment\", \n germlineColumn=\"germline_alignment_d_mask\",\n method=\"mostMutated\", muFreqColumn=\"mu_freq\", \n breakTiesStochastic=TRUE, breakTiesByColumns=NULL)\n \n# mostMutated method, resolving ties deterministically using additional columns\nclones <- collapseClones(db2, cloneColumn=\"clone_id\", sequenceColumn=\"sequence_alignment\", \n germlineColumn=\"germline_alignment_d_mask\",\n method=\"mostMutated\", muFreqColumn=\"mu_freq\", \n breakTiesStochastic=FALSE, \n breakTiesByColumns=list(c(\"duplicate_count\"), c(max)))\n\n# Build consensus for V segment only\n# Capture all nucleotide variations using ambiguous characters \nclones <- collapseClones(db, cloneColumn=\"clone_id\", sequenceColumn=\"sequence_alignment\", \n germlineColumn=\"germline_alignment_d_mask\",\n method=\"catchAll\", regionDefinition=IMGT_V)\n\n# Return the same number of rows as the input\nclones <- collapseClones(db, cloneColumn=\"clone_id\", sequenceColumn=\"sequence_alignment\", \n germlineColumn=\"germline_alignment_d_mask\",\n method=\"mostCommon\", expandedDb=TRUE)\n\n\n\n"} {"package":"shazam","topic":"consensusSequence","snippet":"### Name: consensusSequence\n### Title: Construct a consensus sequence\n### Aliases: consensusSequence\n\n### ** Examples\n\n# Subset example data\ndata(ExampleDb, package=\"alakazam\")\ndb <- subset(ExampleDb, c_call %in% c(\"IGHA\", \"IGHG\") & sample_id == \"+7d\")\nclone <- subset(db, clone_id == \"3192\")\n\n# First compute mutation frequency for most/leastMutated methods\nclone <- observedMutations(clone, frequency=TRUE, combine=TRUE)\n\n# Manually create a tie\nclone <- rbind(clone, clone[which.max(clone$mu_freq), ])\n\n# ThresholdedFreq method. \n# Resolve ties deterministically without using ambiguous characters\ncons1 <- consensusSequence(clone$sequence_alignment,\n method=\"thresholdedFreq\", minFreq=0.3,\n includeAmbiguous=FALSE, \n breakTiesStochastic=FALSE)\ncons1$cons\n \n\n\n"} {"package":"shazam","topic":"convertNumbering","snippet":"### Name: convertNumbering\n### Title: convertNumbering: IMGT-Kabat number conversion\n### Aliases: convertNumbering\n\n### ** Examples\n\nconvertNumbering(\"IGH\", \"IMGT\", \"KABAT\", c(\"51\", \"23\", \"110\"))\nconvertNumbering(\"IGH\", \"KABAT\", \"IMGT\", c(\"51\", \"23\", \"G\"))\n\n\n"} {"package":"shazam","topic":"createBaseline","snippet":"### Name: createBaseline\n### Title: Creates a Baseline object\n### Aliases: createBaseline\n\n### ** Examples\n\n# Creates an empty Baseline object\ncreateBaseline()\n\n\n\n"} {"package":"shazam","topic":"createMutabilityMatrix","snippet":"### Name: createMutabilityMatrix\n### Title: Builds a mutability model\n### Aliases: createMutabilityMatrix\n\n### ** Examples\n\n## No test: \n# Subset example data to 50 sequences of one isotype and sample as a demo\ndata(ExampleDb, package=\"alakazam\")\ndb <- subset(ExampleDb, c_call == \"IGHA\" & sample_id == \"-1h\")[1:50,]\n\n# Create model using only silent mutations\nsub_model <- createSubstitutionMatrix(db, sequenceColumn=\"sequence_alignment\",\n germlineColumn=\"germline_alignment_d_mask\",\n vCallColumn=\"v_call\",model=\"s\")\nmut_model <- createMutabilityMatrix(db, sub_model, model=\"s\", \n sequenceColumn=\"sequence_alignment\",\n germlineColumn=\"germline_alignment_d_mask\",\n vCallColumn=\"v_call\",\n minNumSeqMutations=200,\n numSeqMutationsOnly=FALSE)\n \n# View top 5 mutability estimates\nhead(sort(mut_model, decreasing=TRUE), 5)\n\n# View the number of S mutations used for estimating mutabilities\nmut_model@numMutS\n\n# Count the number of mutations in sequences containing each 5-mer\nmut_count <- createMutabilityMatrix(db, sub_model, model=\"s\", \n sequenceColumn=\"sequence_alignment\",\n germlineColumn=\"germline_alignment_d_mask\",\n vCallColumn=\"v_call\",\n numSeqMutationsOnly=TRUE)\n## End(No test)\n\n\n\n"} {"package":"shazam","topic":"createMutationDefinition","snippet":"### Name: createMutationDefinition\n### Title: Creates a MutationDefinition\n### Aliases: createMutationDefinition\n\n### ** Examples\n\n# Define hydropathy classes\nsuppressPackageStartupMessages(library(alakazam))\nhydropathy <- list(hydrophobic=c(\"A\", \"I\", \"L\", \"M\", \"F\", \"W\", \"V\"),\n hydrophilic=c(\"R\", \"N\", \"D\", \"C\", \"Q\", \"E\", \"K\"),\n neutral=c(\"G\", \"H\", \"P\", \"S\", \"T\", \"Y\"))\nchars <- unlist(hydropathy, use.names=FALSE)\nclasses <- setNames(translateStrings(chars, hydropathy), chars)\n\n# Create hydropathy mutation definition\nmd <- createMutationDefinition(\"Hydropathy\", classes)\n\n\n\n"} {"package":"shazam","topic":"createRegionDefinition","snippet":"### Name: createRegionDefinition\n### Title: Creates a RegionDefinition\n### Aliases: createRegionDefinition\n\n### ** Examples\n\n# Creates an empty RegionDefinition object\ncreateRegionDefinition()\n\n\n\n"} {"package":"shazam","topic":"createSubstitutionMatrix","snippet":"### Name: createSubstitutionMatrix\n### Title: Builds a substitution model\n### Aliases: createSubstitutionMatrix\n\n### ** Examples\n\n## No test: \n# Subset example data to one isotype and sample as a demo\ndata(ExampleDb, package=\"alakazam\")\ndb <- subset(ExampleDb, c_call == \"IGHA\" & sample_id == \"-1h\")[1:25,]\n\n# Count the number of mutations per 5-mer\nsubCount <- createSubstitutionMatrix(db, sequenceColumn=\"sequence_alignment\",\n germlineColumn=\"germline_alignment_d_mask\",\n vCallColumn=\"v_call\",\n model=\"s\", multipleMutation=\"independent\",\n returnModel=\"5mer\", numMutationsOnly=TRUE)\n\n# Create model using only silent mutations\nsub <- createSubstitutionMatrix(db, sequenceColumn=\"sequence_alignment\",\n germlineColumn=\"germline_alignment_d_mask\",\n vCallColumn=\"v_call\",\n model=\"s\", multipleMutation=\"independent\",\n returnModel=\"5mer\", numMutationsOnly=FALSE,\n minNumMutations=20)\n## End(No test)\n\n\n\n"} {"package":"shazam","topic":"createTargetingMatrix","snippet":"### Name: createTargetingMatrix\n### Title: Calculates a targeting rate matrix\n### Aliases: createTargetingMatrix\n\n### ** Examples\n\n## No test: \n# Subset example data to 50 sequences, of one isotype and sample as a demo\ndata(ExampleDb, package=\"alakazam\")\ndb <- subset(ExampleDb, c_call == \"IGHA\" & sample_id == \"-1h\")[1:50,]\n\n# Create 4x1024 models using only silent mutations\nsub_model <- createSubstitutionMatrix(db, model=\"s\", sequenceColumn=\"sequence_alignment\",\n germlineColumn=\"germline_alignment_d_mask\",\n vCallColumn=\"v_call\")\nmut_model <- createMutabilityMatrix(db, sub_model, model=\"s\",\n sequenceColumn=\"sequence_alignment\",\n germlineColumn=\"germline_alignment_d_mask\",\n vCallColumn=\"v_call\")\n\n# Extend substitution and mutability to including Ns (5x3125 model)\nsub_model <- extendSubstitutionMatrix(sub_model)\nmut_model <- extendMutabilityMatrix(mut_model)\n\n# Create targeting model from substitution and mutability\ntar_model <- createTargetingMatrix(sub_model, mut_model)\n## End(No test)\n\n\n\n"} {"package":"shazam","topic":"createTargetingModel","snippet":"### Name: createTargetingModel\n### Title: Creates a TargetingModel\n### Aliases: createTargetingModel\n\n### ** Examples\n\n## No test: \n# Subset example data to one isotype and sample as a demo\ndata(ExampleDb, package=\"alakazam\")\ndb <- subset(ExampleDb, c_call == \"IGHA\" & sample_id == \"-1h\")[1:80,]\n\n# Create model using only silent mutations and ignore multiple mutations\nmodel <- createTargetingModel(db, model=\"s\", sequenceColumn=\"sequence_alignment\",\n germlineColumn=\"germline_alignment_d_mask\",\n vCallColumn=\"v_call\", multipleMutation=\"ignore\")\n\n# View top 5 mutability estimates\nhead(sort(model@mutability, decreasing=TRUE), 5)\n\n# View number of silent mutations used for estimating mutability\nmodel@numMutS\n## End(No test)\n\n\n\n"} {"package":"shazam","topic":"distToNearest","snippet":"### Name: distToNearest\n### Title: Distance to nearest neighbor\n### Aliases: distToNearest\n\n### ** Examples\n\n# Subset example data to one sample as a demo\ndata(ExampleDb, package=\"alakazam\")\ndb <- subset(ExampleDb, sample_id == \"-1h\")\n\n# Use genotyped V assignments, Hamming distance, and normalize by junction length\n# First partition based on V and J assignments, then by junction length\n# Take into consideration ambiguous V and J annotations\ndist <- distToNearest(db, sequenceColumn=\"junction\", \n vCallColumn=\"v_call_genotyped\", jCallColumn=\"j_call\",\n model=\"ham\", first=FALSE, VJthenLen=TRUE, normalize=\"len\")\n \n# Plot histogram of non-NA distances\np1 <- ggplot(data=subset(dist, !is.na(dist_nearest))) + \n theme_bw() + \n ggtitle(\"Distance to nearest: Hamming\") + \n xlab(\"distance\") +\n geom_histogram(aes(x=dist_nearest), binwidth=0.025, \n fill=\"steelblue\", color=\"white\")\nplot(p1)\n\n\n\n"} {"package":"shazam","topic":"editBaseline","snippet":"### Name: editBaseline\n### Title: Edit the Baseline object\n### Aliases: editBaseline\n\n### ** Examples\n\n## No test: \n# Subset example data as a demo\ndata(ExampleDb, package=\"alakazam\")\ndb <- subset(ExampleDb, c_call == \"IGHG\" & sample_id == \"+7d\")\nset.seed(112)\ndb <- dplyr::slice_sample(db, n=100)\n\n# Make Baseline object\nbaseline <- calcBaseline(db, \n sequenceColumn=\"sequence_alignment\",\n germlineColumn=\"germline_alignment_d_mask\", \n testStatistic=\"focused\",\n regionDefinition=IMGT_V,\n targetingModel=HH_S5F,\n nproc=1)\n \n# Edit the field \"description\"\nbaseline <- editBaseline(baseline, field=\"description\", \n value=\"+7d IGHG\")\n## End(No test)\n\n\n\n"} {"package":"shazam","topic":"expectedMutations","snippet":"### Name: expectedMutations\n### Title: Calculate expected mutation frequencies\n### Aliases: expectedMutations\n\n### ** Examples\n\n# Subset example data\ndata(ExampleDb, package=\"alakazam\")\ndb <- subset(ExampleDb, c_call %in% c(\"IGHA\", \"IGHG\") & sample_id == \"+7d\")\nset.seed(112)\ndb <- dplyr::slice_sample(db, n=100)\n# Calculate expected mutations over V region\ndb_exp <- expectedMutations(db,\n sequenceColumn=\"sequence_alignment\",\n germlineColumn=\"germline_alignment_d_mask\",\n regionDefinition=IMGT_V,\n nproc=1)\n\n# Calculate hydropathy expected mutations over V region\ndb_exp <- expectedMutations(db,\n sequenceColumn=\"sequence_alignment\",\n germlineColumn=\"germline_alignment_d_mask\",\n regionDefinition=IMGT_V,\n mutationDefinition=HYDROPATHY_MUTATIONS,\n nproc=1)\n\n\n\n"} {"package":"shazam","topic":"extendMutabilityMatrix","snippet":"### Name: extendMutabilityMatrix\n### Title: Extends a mutability model to include Ns.\n### Aliases: extendMutabilityMatrix\n\n### ** Examples\n\n## No test: \n# Subset example data to one isotype and sample as a demo\ndata(ExampleDb, package=\"alakazam\")\ndb <- subset(ExampleDb, c_call == \"IGHA\" & sample_id == \"-1h\")\nset.seed(112)\ndb <- dplyr::slice_sample(db, n=75)\n\n# Create model using only silent mutations and ignore multiple mutations\nsub_model <- createSubstitutionMatrix(db, model=\"s\", sequenceColumn=\"sequence_alignment\",\n germlineColumn=\"germline_alignment_d_mask\",\n vCallColumn=\"v_call\")\nmut_model <- createMutabilityMatrix(db, sub_model, model=\"s\", \n sequenceColumn=\"sequence_alignment\",\n germlineColumn=\"germline_alignment_d_mask\",\n vCallColumn=\"v_call\")\next_model <- extendMutabilityMatrix(mut_model)\n## End(No test)\n\n\n\n"} {"package":"shazam","topic":"extendSubstitutionMatrix","snippet":"### Name: extendSubstitutionMatrix\n### Title: Extends a substitution model to include Ns.\n### Aliases: extendSubstitutionMatrix\n\n### ** Examples\n\n# Subset example data to one isotype and sample as a demo\ndata(ExampleDb, package=\"alakazam\")\ndb <- subset(ExampleDb, c_call == \"IGHA\" & sample_id == \"-1h\")\n\n# Create model using only silent mutations\nsub_model <- createSubstitutionMatrix(db, sequenceColumn=\"sequence_alignment\",\n germlineColumn=\"germline_alignment_d_mask\",\n vCallColumn=\"v_call\",model=\"s\")\next_model <- extendSubstitutionMatrix(sub_model)\n\n\n\n"} {"package":"shazam","topic":"findThreshold","snippet":"### Name: findThreshold\n### Title: Find distance threshold\n### Aliases: findThreshold\n\n### ** Examples\n\n## No test: \n# Subset example data to 50 sequences, one sample and isotype as a demo\ndata(ExampleDb, package=\"alakazam\")\ndb <- subset(ExampleDb, sample_id == \"-1h\" & c_call==\"IGHG\")[1:50,]\n\n# Use nucleotide Hamming distance and normalize by junction length\ndb <- distToNearest(db, sequenceColumn=\"junction\", vCallColumn=\"v_call\",\n jCallColumn=\"j_call\", model=\"ham\", normalize=\"len\", nproc=1)\n \n# Find threshold using the \"gmm\" method with user defined specificity\noutput <- findThreshold(db$dist_nearest, method=\"gmm\", model=\"gamma-gamma\", \n cutoff=\"user\", spc=0.99)\nplot(output, binwidth=0.02, title=paste0(output@model, \" loglk=\", output@loglk))\nprint(output)\n## End(No test)\n\n\n"} {"package":"shazam","topic":"groupBaseline","snippet":"### Name: groupBaseline\n### Title: Group BASELINe PDFs\n### Aliases: groupBaseline\n\n### ** Examples\n\n \n## Not run: \n##D # Subset example data from alakazam as a demo\n##D data(ExampleDb, package=\"alakazam\")\n##D db <- subset(ExampleDb, c_call %in% c(\"IGHM\", \"IGHG\"))\n##D set.seed(112)\n##D db <- dplyr::slice_sample(db, n=200)\n##D \n##D # Collapse clones\n##D db <- collapseClones(db, cloneColumn=\"clone_id\",\n##D sequenceColumn=\"sequence_alignment\",\n##D germlineColumn=\"germline_alignment_d_mask\",\n##D method=\"thresholdedFreq\", minimumFrequency=0.6,\n##D includeAmbiguous=FALSE, breakTiesStochastic=FALSE)\n##D \n##D # Calculate BASELINe\n##D baseline <- calcBaseline(db, \n##D sequenceColumn=\"clonal_sequence\",\n##D germlineColumn=\"clonal_germline\", \n##D testStatistic=\"focused\",\n##D regionDefinition=IMGT_V,\n##D targetingModel=HH_S5F,\n##D nproc=1)\n##D \n##D # Group PDFs by sample\n##D grouped1 <- groupBaseline(baseline, groupBy=\"sample_id\")\n##D sample_colors <- c(\"-1h\"=\"steelblue\", \"+7d\"=\"firebrick\")\n##D plotBaselineDensity(grouped1, idColumn=\"sample_id\", colorValues=sample_colors, \n##D sigmaLimits=c(-1, 1))\n##D \n##D # Group PDFs by both sample (between variable) and isotype (within variable)\n##D grouped2 <- groupBaseline(baseline, groupBy=c(\"sample_id\", \"c_call\"))\n##D isotype_colors <- c(\"IGHM\"=\"darkorchid\", \"IGHD\"=\"firebrick\", \n##D \"IGHG\"=\"seagreen\", \"IGHA\"=\"steelblue\")\n##D plotBaselineDensity(grouped2, idColumn=\"sample_id\", groupColumn=\"c_call\",\n##D colorElement=\"group\", colorValues=isotype_colors,\n##D sigmaLimits=c(-1, 1))\n##D # Collapse previous isotype (within variable) grouped PDFs into sample PDFs\n##D grouped3 <- groupBaseline(grouped2, groupBy=\"sample_id\")\n##D sample_colors <- c(\"-1h\"=\"steelblue\", \"+7d\"=\"firebrick\")\n##D plotBaselineDensity(grouped3, idColumn=\"sample_id\", colorValues=sample_colors,\n##D sigmaLimits=c(-1, 1))\n## End(Not run)\n\n\n"} {"package":"shazam","topic":"makeAverage1merMut","snippet":"### Name: makeAverage1merMut\n### Title: Make a 1-mer mutability model by averaging over a 5-mer\n### mutability model\n### Aliases: makeAverage1merMut\n\n### ** Examples\n\n# Make a degenerate 5-mer model (length of 1024) based on a 1-mer model\nexample1merMut <- c(A=0.2, T=0.1, C=0.4, G=0.3)\ndegenerate5merMut <- makeDegenerate5merMut(mut1mer = example1merMut)\n \n# Now make a 1-mer model by averaging over the degenerate 5-mer model\n# Expected to get back example1merMut\nmakeAverage1merMut(mut5mer = degenerate5merMut)\n\n\n\n"} {"package":"shazam","topic":"makeAverage1merSub","snippet":"### Name: makeAverage1merSub\n### Title: Make a 1-mer substitution model by averaging over a 5-mer\n### substitution model\n### Aliases: makeAverage1merSub\n\n### ** Examples\n\n# Make a degenerate 5-mer model (4x1024) based on HKL_S1F (4x4)\ndegenerate5merSub <- makeDegenerate5merSub(sub1mer = HKL_S1F)\n\n# Now make a 1-mer model by averaging over the degenerate 5-mer model\n# Expected to get back HKL_S1F\nmakeAverage1merSub(sub5mer = degenerate5merSub)\n\n\n\n"} {"package":"shazam","topic":"makeDegenerate5merMut","snippet":"### Name: makeDegenerate5merMut\n### Title: Make a degenerate 5-mer mutability model based on a 1-mer\n### mutability model\n### Aliases: makeDegenerate5merMut\n\n### ** Examples\n\n# Make a degenerate 5-mer model (length of 1024) based on a 1-mer model\nexample1merMut <- c(A=0.2, T=0.1, C=0.4, G=0.3)\ndegenerate5merMut <- makeDegenerate5merMut(mut1mer = example1merMut)\n\n# Look at a few 5-mers\ndegenerate5merMut[c(\"AAAAT\", \"AACAT\", \"AAGAT\", \"AATAT\")]\n\n# Normalized\nsum(degenerate5merMut)\n\n\n\n"} {"package":"shazam","topic":"makeDegenerate5merSub","snippet":"### Name: makeDegenerate5merSub\n### Title: Make a degenerate 5-mer substitution model based on a 1-mer\n### substitution model\n### Aliases: makeDegenerate5merSub\n\n### ** Examples\n\n# Make a degenerate 5-mer model (4x1024) based on HKL_S1F (4x4)\n# Note: not to be confused with HKL_S5F@substitution, which is non-degenerate\ndegenerate5merSub <- makeDegenerate5merSub(sub1mer = HKL_S1F)\n\n# Look at a few 5-mers\ndegenerate5merSub[, c(\"AAAAT\", \"AACAT\", \"AAGAT\", \"AATAT\")]\n\n\n\n"} {"package":"shazam","topic":"makeGraphDf","snippet":"### Name: makeGraphDf\n### Title: Build a data.frame from a ChangeoClone and an igraph object\n### containing a clonal lineage\n### Aliases: makeGraphDf\n\n### ** Examples\n\n# Load and subset example data\ndata(ExampleDb, package = \"alakazam\")\ndata(ExampleTrees, package = \"alakazam\")\ngraph <- ExampleTrees[[17]]\ndb <- subset(ExampleDb, clone_id == graph$clone)\nclone <- alakazam::makeChangeoClone(db)\n\n# Extend data with lineage information\ndf <- makeGraphDf(graph, clone)\n\n\n\n"} {"package":"shazam","topic":"minNumMutationsTune","snippet":"### Name: minNumMutationsTune\n### Title: Parameter tuning for minNumMutations\n### Aliases: minNumMutationsTune\n\n### ** Examples\n\n# Subset example data to one isotype and sample as a demo\ndata(ExampleDb, package=\"alakazam\")\ndb <- subset(ExampleDb, c_call == \"IGHA\" & sample_id == \"-1h\")\n\n# Count the number of mutations per 5-mer\nsubCount <- createSubstitutionMatrix(db, sequenceColumn=\"sequence_alignment\",\n germlineColumn=\"germline_alignment_d_mask\",\n vCallColumn=\"v_call\",\n model=\"s\", multipleMutation=\"independent\",\n returnModel=\"5mer\", numMutationsOnly=TRUE)\n\n# Tune minNumMutations\nminNumMutationsTune(subCount, seq(from=10, to=80, by=10))\n \n\n\n"} {"package":"shazam","topic":"minNumSeqMutationsTune","snippet":"### Name: minNumSeqMutationsTune\n### Title: Parameter tuning for minNumSeqMutations\n### Aliases: minNumSeqMutationsTune\n\n### ** Examples\n\n## No test: \n# Subset example data to one isotype and sample as a demo\ndata(ExampleDb, package=\"alakazam\")\ndb <- subset(ExampleDb, c_call == \"IGHA\" & sample_id == \"-1h\")\nset.seed(112)\ndb <- dplyr::slice_sample(db, n=75)\n# Create model using only silent mutations\nsub <- createSubstitutionMatrix(db, sequenceColumn=\"sequence_alignment\",\n germlineColumn=\"germline_alignment_d_mask\",\n vCallColumn=\"v_call\", \n model=\"s\", multipleMutation=\"independent\",\n returnModel=\"5mer\", numMutationsOnly=FALSE,\n minNumMutations=20)\n\n# Count the number of mutations in sequences containing each 5-mer\nmutCount <- createMutabilityMatrix(db, substitutionModel = sub,\n sequenceColumn=\"sequence_alignment\",\n germlineColumn=\"germline_alignment_d_mask\",\n vCallColumn=\"v_call\",\n model=\"s\", multipleMutation=\"independent\",\n numSeqMutationsOnly=TRUE)\n\n# Tune minNumSeqMutations\nminNumSeqMutationsTune(mutCount, seq(from=100, to=300, by=50))\n## End(No test) \n\n\n"} {"package":"shazam","topic":"observedMutations","snippet":"### Name: observedMutations\n### Title: Calculate observed numbers of mutations\n### Aliases: observedMutations\n\n### ** Examples\n\n# Subset example data\ndata(ExampleDb, package=\"alakazam\")\ndb <- ExampleDb[1:10, ]\n\n# Calculate mutation frequency over the entire sequence\ndb_obs <- observedMutations(db, sequenceColumn=\"sequence_alignment\",\n germlineColumn=\"germline_alignment_d_mask\",\n frequency=TRUE,\n nproc=1)\n\n# Count of V-region mutations split by FWR and CDR\n# With mutations only considered replacement if charge changes\ndb_obs <- observedMutations(db, sequenceColumn=\"sequence_alignment\",\n germlineColumn=\"germline_alignment_d_mask\",\n regionDefinition=IMGT_V,\n mutationDefinition=CHARGE_MUTATIONS,\n nproc=1)\n \n# Count of VDJ-region mutations, split by FWR and CDR\ndb_obs <- observedMutations(db, sequenceColumn=\"sequence_alignment\",\n germlineColumn=\"germline_alignment_d_mask\",\n regionDefinition=IMGT_VDJ,\n nproc=1)\n \n# Extend data with lineage information\ndata(ExampleTrees, package=\"alakazam\")\ngraph <- ExampleTrees[[17]]\nclone <- alakazam::makeChangeoClone(subset(ExampleDb, clone_id == graph$clone))\ngdf <- makeGraphDf(graph, clone)\n\n# Count of mutations between observed sequence and immediate ancenstor\ndb_obs <- observedMutations(gdf, sequenceColumn=\"sequence\",\n germlineColumn=\"parent_sequence\",\n regionDefinition=IMGT_VDJ,\n nproc=1) \n \n\n\n"} {"package":"shazam","topic":"plotBaselineDensity","snippet":"### Name: plotBaselineDensity\n### Title: Plots BASELINe probability density functions\n### Aliases: plotBaselineDensity\n\n### ** Examples\n\n## Not run: \n##D # Subset example data as a demo\n##D data(ExampleDb, package=\"alakazam\")\n##D db <- subset(ExampleDb, c_call %in% c(\"IGHM\", \"IGHG\"))\n##D set.seed(112)\n##D db <- dplyr::slice_sample(db, n=100)\n##D \n##D # Collapse clones\n##D db <- collapseClones(db, cloneColumn=\"clone_id\",\n##D sequenceColumn=\"sequence_alignment\",\n##D germlineColumn=\"germline_alignment_d_mask\",\n##D method=\"thresholdedFreq\", minimumFrequency=0.6,\n##D includeAmbiguous=FALSE, breakTiesStochastic=FALSE)\n##D \n##D # Calculate BASELINe\n##D baseline <- calcBaseline(db, \n##D sequenceColumn=\"clonal_sequence\",\n##D germlineColumn=\"clonal_germline\", \n##D testStatistic=\"focused\",\n##D regionDefinition=IMGT_V,\n##D targetingModel=HH_S5F,\n##D nproc=1)\n##D \n##D # Grouping the PDFs by the sample and isotype annotations\n##D grouped <- groupBaseline(baseline, groupBy=c(\"sample_id\", \"c_call\"))\n##D \n##D # Plot density faceted by region with custom isotype colors\n##D isotype_colors <- c(\"IGHM\"=\"darkorchid\", \"IGHD\"=\"firebrick\", \n##D \"IGHG\"=\"seagreen\", \"IGHA\"=\"steelblue\")\n##D plotBaselineDensity(grouped, \"sample_id\", \"c_call\", colorValues=isotype_colors, \n##D colorElement=\"group\", sigmaLimits=c(-1, 1))\n##D \n##D # Facet by isotype instead of region\n##D sample_colors <- c(\"-1h\"=\"steelblue\", \"+7d\"=\"firebrick\")\n##D plotBaselineDensity(grouped, \"sample_id\", \"c_call\", facetBy=\"group\",\n##D colorValues=sample_colors, sigmaLimits=c(-1, 1))\n## End(Not run)\n\n\n\n"} {"package":"shazam","topic":"plotBaselineSummary","snippet":"### Name: plotBaselineSummary\n### Title: Plots BASELINe summary statistics\n### Aliases: plotBaselineSummary\n\n### ** Examples\n\n## No test: \n# Subset example data as a demo\ndata(ExampleDb, package=\"alakazam\")\ndb <- subset(ExampleDb, c_call %in% c(\"IGHM\", \"IGHG\"))\nset.seed(112)\ndb <- dplyr::slice_sample(db, n=25)\n\n# Collapse clones\ndb <- collapseClones(db, cloneColumn=\"clone_id\",\n sequenceColumn=\"sequence_alignment\",\n germlineColumn=\"germline_alignment_d_mask\",\n method=\"thresholdedFreq\", minimumFrequency=0.6,\n includeAmbiguous=FALSE, breakTiesStochastic=FALSE)\n \n# Calculate BASELINe\nbaseline <- calcBaseline(db, \n sequenceColumn=\"clonal_sequence\",\n germlineColumn=\"clonal_germline\", \n testStatistic=\"focused\",\n regionDefinition=IMGT_V,\n targetingModel=HH_S5F,\n nproc=1)\n \n# Grouping the PDFs by sample and isotype annotations\ngrouped <- groupBaseline(baseline, groupBy=c(\"sample_id\", \"c_call\"))\n\n# Plot mean and confidence interval by region with custom group colors\nisotype_colors <- c(\"IGHM\"=\"darkorchid\", \"IGHD\"=\"firebrick\", \n \"IGHG\"=\"seagreen\", \"IGHA\"=\"steelblue\")\nplotBaselineSummary(grouped, \"sample_id\", \"c_call\", \n groupColors=isotype_colors, facetBy=\"region\")\n## End(No test)\n\n\n\n"} {"package":"shazam","topic":"plotDensityThreshold","snippet":"### Name: plotDensityThreshold\n### Title: Plot findThreshold results for the density method\n### Aliases: plotDensityThreshold\n\n### ** Examples\n\n## No test: \n# Subset example data to one sample as a demo\ndata(ExampleDb, package=\"alakazam\")\ndb <- subset(ExampleDb, sample_id == \"-1h\")\n\n# Use nucleotide Hamming distance and normalize by junction length\ndb <- distToNearest(db, sequenceColumn=\"junction\", vCallColumn=\"v_call_genotyped\",\n jCallColumn=\"j_call\", model=\"ham\", normalize=\"len\", nproc=1)\n\n# To find the threshold cut, call findThreshold function for \"gmm\" method.\noutput <- findThreshold(db$dist_nearest, method=\"density\")\nprint(output)\n\n# Plot\nplotDensityThreshold(output)\n## End(No test)\n\n\n"} {"package":"shazam","topic":"plotGmmThreshold","snippet":"### Name: plotGmmThreshold\n### Title: Plot findThreshold results for the gmm method\n### Aliases: plotGmmThreshold\n\n### ** Examples\n\n## No test: \n# Subset example data to one sample as a demo\ndata(ExampleDb, package=\"alakazam\")\ndb <- subset(ExampleDb, sample_id == \"-1h\")\n\n# Use nucleotide Hamming distance and normalize by junction length\ndb <- distToNearest(db, sequenceColumn=\"junction\", vCallColumn=\"v_call_genotyped\",\n jCallColumn=\"j_call\", model=\"ham\", normalize=\"len\", nproc=1)\n\n# To find the threshold cut, call findThreshold function for \"gmm\" method.\noutput <- findThreshold(db$dist_nearest, method=\"gmm\", model=\"norm-norm\", cutoff=\"opt\")\nprint(output)\n\n# Plot results\nplotGmmThreshold(output, binwidth=0.02)\n## End(No test)\n\n\n"} {"package":"shazam","topic":"plotMutability","snippet":"### Name: plotMutability\n### Title: Plot mutability probabilities\n### Aliases: plotMutability\n\n### ** Examples\n\n# Plot one nucleotide in circular style\nplotMutability(HH_S5F, \"C\")\n\n# Plot two nucleotides in barchart style\nplotMutability(HH_S5F, c(\"G\", \"T\"), style=\"bar\")\n\n\n\n"} {"package":"shazam","topic":"plotSlideWindowTune","snippet":"### Name: plotSlideWindowTune\n### Title: Visualize parameter tuning for sliding window approach\n### Aliases: plotSlideWindowTune\n\n### ** Examples\n\n# Use an entry in the example data for input and germline sequence\ndata(ExampleDb, package=\"alakazam\")\n\n# Try out thresholds of 2-4 mutations in window sizes of 3-5 nucleotides \n# on a subset of ExampleDb\ntuneList <- slideWindowTune(db = ExampleDb[1:10, ], \n mutThreshRange = 2:4, windowSizeRange = 3:5,\n verbose = FALSE)\n\n# Visualize\n# Plot numbers of sequences filtered without jittering y-axis values\nplotSlideWindowTune(tuneList, pchs=1:3, ltys=1:3, cols=1:3, \n plotFiltered='filtered', jitter.y=FALSE)\n \n# Notice that some of the lines overlap\n# Jittering could help\nplotSlideWindowTune(tuneList, pchs=1:3, ltys=1:3, cols=1:3,\n plotFiltered='filtered', jitter.y=TRUE)\n \n# Plot numbers of sequences remaining instead of filtered\nplotSlideWindowTune(tuneList, pchs=1:3, ltys=1:3, cols=1:3, \n plotFiltered='remaining', jitter.y=TRUE, \n legendPos=\"bottomright\")\n \n# Plot percentages of sequences filtered with a tiny amount of jittering\nplotSlideWindowTune(tuneList, pchs=1:3, ltys=1:3, cols=1:3,\n plotFiltered='filtered', percentage=TRUE, \n jitter.y=TRUE, jitter.y.amt=0.01)\n\n\n"} {"package":"shazam","topic":"plotTune","snippet":"### Name: plotTune\n### Title: Visualize parameter tuning for minNumMutations and\n### minNumSeqMutations\n### Aliases: plotTune\n\n### ** Examples\n\n## No test: \n# Subset example data to one isotype and 200 sequences\ndata(ExampleDb, package=\"alakazam\")\ndb <- subset(ExampleDb, c_call == \"IGHA\")\nset.seed(112)\ndb <- dplyr::slice_sample(db, n=50)\n\ntuneMtx = list()\nfor (i in 1:length(unique(db$sample_id))) {\n # Get data corresponding to current sample\n curDb = db[db[[\"sample_id\"]] == unique(db[[\"sample_id\"]])[i], ]\n \n # Count the number of mutations per 5-mer\n subCount = createSubstitutionMatrix(db=curDb, model=\"s\", \n sequenceColumn=\"sequence_alignment\",\n germlineColumn=\"germline_alignment_d_mask\",\n vCallColumn=\"v_call\",\n multipleMutation=\"independent\",\n returnModel=\"5mer\", numMutationsOnly=TRUE)\n \n # Tune over minNumMutations = 5..50\n subTune = minNumMutationsTune(subCount, seq(from=5, to=50, by=5))\n \n tuneMtx = c(tuneMtx, list(subTune))\n}\n\n# Name tuneMtx after sample names \nnames(tuneMtx) = unique(db[[\"sample_id\"]])\n\n# plot with legend for both samples for a subset of minNumMutations values\nplotTune(tuneMtx, thresh=c(5, 15, 25, 40), criterion=\"3mer\",\n pchs=16:17, ltys=1:2, cols=2:3, \n plotLegend=TRUE, legendPos=c(25, 30))\n\n# plot for only 1 sample for all the minNumMutations values (no legend)\nplotTune(tuneMtx[[1]], thresh=seq(from=5, to=50, by=5), criterion=\"3mer\")\n## End(No test)\n\n\n\n"} {"package":"shazam","topic":"setRegionBoundaries","snippet":"### Name: setRegionBoundaries\n### Title: Build a RegionDefinition object that includes CDR3 and FWR4.\n### Aliases: setRegionBoundaries\n\n### ** Examples\n\n# Load and subset example data\ndata(ExampleDb, package = \"alakazam\") \nlen <- ExampleDb$junction_length[1]\nsequence <- ExampleDb$sequence_alignment[1]\nregion <- setRegionBoundaries(len, sequence, regionDefinition = IMGT_VDJ)\n\n\n\n"} {"package":"shazam","topic":"shmulateSeq","snippet":"### Name: shmulateSeq\n### Title: Simulate mutations in a single sequence\n### Aliases: shmulateSeq\n\n### ** Examples\n\n# Define example input sequence\nsequence <- \"NGATCTGACGACACGGCCGTGTATTACTGTGCGAGAGATA.TTTA\"\n\n# Simulate using the default human 5-mer targeting model\n# Introduce 6 mutations\nshmulateSeq(sequence, numMutations=6, frequency=FALSE)\n\n# Introduction 5% mutations\nshmulateSeq(sequence, numMutations=0.05, frequency=TRUE)\n\n\n\n"} {"package":"shazam","topic":"shmulateTree","snippet":"### Name: shmulateTree\n### Title: Simulate mutations in a lineage tree\n### Aliases: shmulateTree\n\n### ** Examples\n\n# Load example lineage and define example MRCA\ndata(ExampleTrees, package=\"alakazam\")\ngraph <- ExampleTrees[[17]]\nsequence <- \"NGATCTGACGACACGGCCGTGTATTACTGTGCGAGAGATAGTTTA\"\n\n# Simulate using the default human 5-mer targeting model\nshmulateTree(sequence, graph)\n\n# Simulate using the mouse 5-mer targeting model\n# Exclude nodes without a sample identifier\n# Add 20% mutation rate to the immediate offsprings of the MRCA\nshmulateTree(sequence, graph, targetingModel=MK_RS5NF,\n field=\"sample_id\", exclude=NA, junctionWeight=0.2)\n \n\n\n"} {"package":"shazam","topic":"slideWindowDb","snippet":"### Name: slideWindowDb\n### Title: Sliding window approach towards filtering sequences in a\n### 'data.frame'\n### Aliases: slideWindowDb\n\n### ** Examples\n\n# Use an entry in the example data for input and germline sequence\ndata(ExampleDb, package=\"alakazam\")\n\n# Apply the sliding window approach on a subset of ExampleDb\nslideWindowDb(db=ExampleDb[1:10, ], sequenceColumn=\"sequence_alignment\", \n germlineColumn=\"germline_alignment_d_mask\", \n mutThresh=6, windowSize=10, nproc=1)\n\n\n\n"} {"package":"shazam","topic":"slideWindowSeq","snippet":"### Name: slideWindowSeq\n### Title: Sliding window approach towards filtering a single sequence\n### Aliases: slideWindowSeq\n\n### ** Examples\n\n# Use an entry in the example data for input and germline sequence\ndata(ExampleDb, package=\"alakazam\")\nin_seq <- ExampleDb[[\"sequence_alignment\"]][100]\ngerm_seq <- ExampleDb[[\"germline_alignment_d_mask\"]][100]\n\n# Determine if in_seq has 6 or more mutations in 10 consecutive nucleotides\nslideWindowSeq(inputSeq=in_seq, germlineSeq=germ_seq, mutThresh=6, windowSize=10)\nslideWindowSeq(inputSeq=\"TCGTCGAAAA\", germlineSeq=\"AAAAAAAAAA\", mutThresh=6, windowSize=10)\n\n\n"} {"package":"shazam","topic":"slideWindowTune","snippet":"### Name: slideWindowTune\n### Title: Parameter tuning for sliding window approach\n### Aliases: slideWindowTune\n\n### ** Examples\n\n# Load and subset example data\ndata(ExampleDb, package=\"alakazam\")\ndb <- ExampleDb[1:5, ]\n\n# Try out thresholds of 2-4 mutations in window sizes of 7-9 nucleotides. \n# In this case, all combinations are legal.\nslideWindowTune(db, mutThreshRange=2:4, windowSizeRange=7:9)\n\n# Illegal combinations are skipped, returning NAs.\nslideWindowTune(db, mutThreshRange=2:4, windowSizeRange=2:4, \n verbose=FALSE)\n \n# Run calcObservedMutations separately\nexDbMutList <- sapply(1:5, function(i) {\n calcObservedMutations(inputSeq=db[[\"sequence_alignment\"]][i],\n germlineSeq=db[[\"germline_alignment_d_mask\"]][i],\n returnRaw=TRUE)$pos })\nslideWindowTune(db, dbMutList=exDbMutList, \n mutThreshRange=2:4, windowSizeRange=2:4)\n\n\n"} {"package":"shazam","topic":"slideWindowTunePlot","snippet":"### Name: slideWindowTunePlot\n### Title: slideWindowTunePlot - plotSlideWindowTune backward compatability\n### Aliases: slideWindowTunePlot\n\n### ** Examples\n\n# Use an entry in the example data for input and germline sequence\ndata(ExampleDb, package=\"alakazam\")\n\n# Try out thresholds of 2-4 mutations in window sizes of 3-5 nucleotides \n# on a subset of ExampleDb\ntuneList <- slideWindowTune(db = ExampleDb[1:10, ], \n mutThreshRange = 2:4, windowSizeRange = 3:5,\n verbose = FALSE)\n\n# Visualize\n# Plot numbers of sequences filtered without jittering y-axis values\nslideWindowTunePlot(tuneList, pchs=1:3, ltys=1:3, cols=1:3, \n plotFiltered=TRUE, jitter.y=FALSE)\n \n# Notice that some of the lines overlap\n# Jittering could help\nslideWindowTunePlot(tuneList, pchs=1:3, ltys=1:3, cols=1:3,\n plotFiltered=TRUE, jitter.y=TRUE)\n \n# Plot numbers of sequences remaining instead of filtered\nslideWindowTunePlot(tuneList, pchs=1:3, ltys=1:3, cols=1:3, \n plotFiltered=FALSE, jitter.y=TRUE, \n legendPos=\"bottomright\")\n \n# Plot percentages of sequences filtered with a tiny amount of jittering\nslideWindowTunePlot(tuneList, pchs=1:3, ltys=1:3, cols=1:3,\n plotFiltered=TRUE, percentage=TRUE, \n jitter.y=TRUE, jitter.y.amt=0.01)\n\n\n"} {"package":"shazam","topic":"summarizeBaseline","snippet":"### Name: summarizeBaseline\n### Title: Calculate BASELINe summary statistics\n### Aliases: summarizeBaseline\n\n### ** Examples\n\n## No test: \n# Subset example data\ndata(ExampleDb, package=\"alakazam\")\ndb <- subset(ExampleDb, c_call == \"IGHG\")\nset.seed(112)\ndb <- dplyr::slice_sample(db, n=100)\n\n# Collapse clones\ndb <- collapseClones(db, cloneColumn=\"clone_id\",\n sequenceColumn=\"sequence_alignment\",\n germlineColumn=\"germline_alignment_d_mask\",\n method=\"thresholdedFreq\", minimumFrequency=0.6,\n includeAmbiguous=FALSE, breakTiesStochastic=FALSE)\n \n# Calculate BASELINe\nbaseline <- calcBaseline(db, \n sequenceColumn=\"clonal_sequence\",\n germlineColumn=\"clonal_germline\", \n testStatistic=\"focused\",\n regionDefinition=IMGT_V,\n targetingModel=HH_S5F,\n nproc = 1)\n\n# Grouping the PDFs by the sample annotation\ngrouped <- groupBaseline(baseline, groupBy=\"sample_id\")\n\n# Get a data.frame of the summary statistics\nstats <- summarizeBaseline(grouped, returnType=\"df\")\n## End(No test) \n\n\n"} {"package":"shazam","topic":"testBaseline","snippet":"### Name: testBaseline\n### Title: Two-sided test of BASELINe PDFs\n### Aliases: testBaseline\n\n### ** Examples\n\n## No test: \n# Subset example data as a demo\ndata(ExampleDb, package=\"alakazam\")\ndb <- subset(ExampleDb, c_call %in% c(\"IGHM\", \"IGHG\"))\nset.seed(112)\ndb <- dplyr::slice_sample(db, n=200)\n\n# Collapse clones\ndb <- collapseClones(db, cloneColumn=\"clone_id\",\n sequenceColumn=\"sequence_alignment\",\n germlineColumn=\"germline_alignment_d_mask\",\n method=\"thresholdedFreq\", minimumFrequency=0.6,\n includeAmbiguous=FALSE, breakTiesStochastic=FALSE)\n \n# Calculate BASELINe\nbaseline <- calcBaseline(db, \n sequenceColumn=\"clonal_sequence\",\n germlineColumn=\"clonal_germline\", \n testStatistic=\"focused\",\n regionDefinition=IMGT_V,\n targetingModel=HH_S5F,\n nproc=1)\n\n# Group PDFs by the isotype\ngrouped <- groupBaseline(baseline, groupBy=\"c_call\")\n\n# Visualize isotype PDFs\nplot(grouped, \"c_call\")\n\n# Perform test on isotype PDFs\ntestBaseline(grouped, groupBy=\"c_call\")\n## End(No test)\n\n\n"} {"package":"shazam","topic":"writeTargetingDistance","snippet":"### Name: writeTargetingDistance\n### Title: Write targeting model distances to a file\n### Aliases: writeTargetingDistance\n\n### ** Examples\n\n## Not run: \n##D # Write HS5F targeting model to working directory as hs5f.tab\n##D writeTargetingDistance(HH_S5F, \"hh_s5f.tsv\") \n## End(Not run)\n\n\n\n"} {"package":"noisyr","topic":"calculate_expression_profile","snippet":"### Name: calculate_expression_profile\n### Title: Calculate the expression profile of a gene\n### Aliases: calculate_expression_profile\n\n### ** Examples\n\nbams <- rep(system.file(\"extdata\", \"ex1.bam\", package=\"Rsamtools\", mustWork=TRUE), 2)\ngenes <- data.frame(\"id\" = 1:2,\n \"gene_id\" = c(\"gene1\", \"gene2\"),\n \"seqid\" = c(\"seq1\", \"seq2\"),\n \"start\" = 1,\n \"end\" = 100)\nprofile <- calculate_expression_profile(\n gene = genes[1,],\n bams = bams,\n mapq.unique = 99\n)\n\nggplot2::ggplot(tibble::tibble(y = profile$profile[,1],\n x = seq_along(y))) +\nggplot2::geom_bar(ggplot2::aes(x, y), stat = \"identity\") +\nggplot2::theme_minimal()\n\n\n"} {"package":"noisyr","topic":"calculate_expression_similarity_counts","snippet":"### Name: calculate_expression_similarity_counts\n### Title: Calcualate the expression levels and expression levels\n### similarity matrices using the count matrix\n### Aliases: calculate_expression_similarity_counts\n\n### ** Examples\n\ncalculate_expression_similarity_counts(\n expression.matrix = matrix(1:100, ncol = 5),\n similarity.measure = \"correlation_pearson\",\n n.elements.per.window = 3)\n\n\n"} {"package":"noisyr","topic":"calculate_expression_similarity_transcript","snippet":"### Name: calculate_expression_similarity_transcript\n### Title: Calcualte the distance matrices using the BAM files\n### Aliases: calculate_expression_similarity_transcript\n\n### ** Examples\n\nbams <- rep(system.file(\"extdata\", \"ex1.bam\", package=\"Rsamtools\", mustWork=TRUE), 2)\ngenes <- data.frame(\"id\" = 1:2,\n \"gene_id\" = c(\"gene1\", \"gene2\"),\n \"seqid\" = c(\"seq1\", \"seq2\"),\n \"start\" = 1,\n \"end\" = 1600)\nexpression.summary <- calculate_expression_similarity_transcript(\n bams = bams,\n genes = genes,\n mapq.unique = 99\n)\n\n\n"} {"package":"noisyr","topic":"calculate_first_minimum_density","snippet":"### Name: calculate_first_minimum_density\n### Title: Function to find the first local minimum of the density of a\n### vector\n### Aliases: calculate_first_minimum_density\n\n### ** Examples\n\ncalculate_first_minimum_density(\n matrix(c(rep(0,100),rep(3,30),rep(10,50),12,13,15,20),ncol=1),\n log.transform=FALSE, makeplots=TRUE\n)\n\n\n\n"} {"package":"noisyr","topic":"calculate_noise_threshold","snippet":"### Name: calculate_noise_threshold\n### Title: Function to calculate the noise threshold for a given expression\n### matrix and parameters\n### Aliases: calculate_noise_threshold\n\n### ** Examples\n\nexpression.summary <- calculate_expression_similarity_counts(\n expression.matrix = matrix(1:100, ncol=5),\n method = \"correlation_pearson\",\n n.elements.per.window = 3)\ncalculate_noise_threshold(expression.summary)\n\n\n"} {"package":"noisyr","topic":"calculate_noise_threshold_method_statistics","snippet":"### Name: calculate_noise_threshold_method_statistics\n### Title: Function to tabulate statistics for different methods of\n### calculating the noise threshold\n### Aliases: calculate_noise_threshold_method_statistics\n\n### ** Examples\n\nexpression.summary <- calculate_expression_similarity_counts(\n expression.matrix = matrix(1:100, ncol=5),\n method = \"correlation_pearson\",\n n.elements.per.window = 3)\ncalculate_noise_threshold_method_statistics(expression.summary)\n\n\n"} {"package":"noisyr","topic":"cast_gtf_to_genes","snippet":"### Name: cast_gtf_to_genes\n### Title: Function to extract exon names and positions from a gtf file\n### Aliases: cast_gtf_to_genes\n\n### ** Examples\n\nfl <- system.file(\"extdata\", \"example.gtf.gz\", package=\"Rsamtools\", mustWork=TRUE)\ncast_gtf_to_genes(fl)\n\n\n"} {"package":"noisyr","topic":"cast_matrix_to_numeric","snippet":"### Name: cast_matrix_to_numeric\n### Title: Cast a matrix of any type to numeric\n### Aliases: cast_matrix_to_numeric\n\n### ** Examples\n\ncast_matrix_to_numeric(matrix(\n c(1, \"2\", 3.0, 4),\n ncol=2,\n dimnames=list(paste0(\"X\", 1:2),\n paste0(\"Y\", 1:2))))\n\n\n"} {"package":"noisyr","topic":"filter_genes_transcript","snippet":"### Name: filter_genes_transcript\n### Title: Function to filter the gene table for the transcript approach\n### Aliases: filter_genes_transcript\n\n### ** Examples\n\nbams <- rep(system.file(\"extdata\", \"ex1.bam\", package=\"Rsamtools\", mustWork=TRUE), 2)\ngenes <- data.frame(\"id\" = 1:2,\n \"gene_id\" = c(\"gene1\", \"gene2\"),\n \"seqid\" = c(\"seq1\", \"seq2\"),\n \"start\" = 1,\n \"end\" = 1600)\nnoise.thresholds <- c(0, 1)\nexpression.summary = calculate_expression_similarity_transcript(\n bams = bams,\n genes = genes,\n mapq.unique = 99\n)\nfilter_genes_transcript(\n genes = genes,\n expression.matrix = expression.summary$expression.matrix,\n noise.thresholds = noise.thresholds,\n)\n\n\n\n"} {"package":"noisyr","topic":"get_methods_calculate_noise_threshold","snippet":"### Name: get_methods_calculate_noise_threshold\n### Title: Show the methods for calculating a noise threshold\n### Aliases: get_methods_calculate_noise_threshold\n\n### ** Examples\n\nget_methods_calculate_noise_threshold()\n\n\n"} {"package":"noisyr","topic":"get_methods_correlation_distance","snippet":"### Name: get_methods_correlation_distance\n### Title: Show the methods for calculating correlation or distance\n### Aliases: get_methods_correlation_distance\n\n### ** Examples\n\nget_methods_correlation_distance()\n\n\n"} {"package":"noisyr","topic":"noisyr","snippet":"### Name: noisyr\n### Title: Run the noisyR pipeline\n### Aliases: noisyr\n\n### ** Examples\n\nnoisyr(approach.for.similarity.calculation = \"counts\",\n expression.matrix = matrix(1:100, ncol = 5))\n\n\n"} {"package":"noisyr","topic":"noisyr_counts","snippet":"### Name: noisyr_counts\n### Title: Run the noisyR pipeline for the count matrix approach\n### Aliases: noisyr_counts\n\n### ** Examples\n\nnoisyr_counts(\n expression.matrix = matrix(1:100, ncol = 5),\n similarity.measure = \"correlation_pearson\",\n n.elements.per.window = 3)\n\n\n"} {"package":"noisyr","topic":"noisyr_transcript","snippet":"### Name: noisyr_transcript\n### Title: Run the noisyR pipeline for the transcript approach\n### Aliases: noisyr_transcript\n\n### ** Examples\n\nbams <- rep(system.file(\"extdata\", \"ex1.bam\", package=\"Rsamtools\", mustWork=TRUE), 2)\ngenes <- data.frame(\"id\" = 1:2,\n \"gene_id\" = c(\"gene1\", \"gene2\"),\n \"seqid\" = c(\"seq1\", \"seq2\"),\n \"start\" = 1,\n \"end\" = 1600)\nnoisyr_transcript(\n bams = bams,\n genes = genes,\n destination.files = paste0(tempdir(), \"/\", basename(bams), \".noisefiltered.bam\")\n)\n\n\n"} {"package":"noisyr","topic":"optimise_window_length","snippet":"### Name: optimise_window_length\n### Title: Optimise the elements per window for the count matrix approach\n### Aliases: optimise_window_length\n\n### ** Examples\n\noptimise_window_length(\n matrix(1:100+runif(100), ncol=5, byrow=TRUE),\n window.length.min=3, window.length.max=5, iteration.number=5\n)\n\n\n"} {"package":"noisyr","topic":"plot_expression_similarity","snippet":"### Name: plot_expression_similarity\n### Title: Plot the similarity against expression levels\n### Aliases: plot_expression_similarity\n\n### ** Examples\n\nplots <- plot_expression_similarity(\n expression.summary=list(\n \"expression.levels\" = matrix(2^(10*seq(0,1,length.out=100))),\n \"expression.levels.similarity\" = matrix(seq(0,1,length.out=100)+(runif(100)/5))))\nplots[[1]]\nplots[[2]]\n\n\n"} {"package":"noisyr","topic":"remove_noise_from_bams","snippet":"### Name: remove_noise_from_bams\n### Title: Function to remove the noisy reads from the BAM files\n### Aliases: remove_noise_from_bams\n\n### ** Examples\n\nbams <- rep(system.file(\"extdata\", \"ex1.bam\", package=\"Rsamtools\", mustWork=TRUE), 2)\ngenes <- data.frame(\"id\" = 1:2,\n \"gene_id\" = c(\"gene1\", \"gene2\"),\n \"seqid\" = c(\"seq1\", \"seq2\"),\n \"start\" = 1,\n \"end\" = 1600)\nnoise.thresholds <- c(0, 1)\nexpression.summary = calculate_expression_similarity_transcript(\n bams = bams,\n genes = genes,\n mapq.unique = 99\n)\nremove_noise_from_bams(\n bams = bams,\n genes = genes,\n expression = expression.summary,\n noise.thresholds = noise.thresholds,\n destination.files = paste0(tempdir(), \"/\", basename(bams), \".noisefiltered.bam\"),\n mapq.unique = 99\n)\n\n\n\n"} {"package":"noisyr","topic":"remove_noise_from_matrix","snippet":"### Name: remove_noise_from_matrix\n### Title: Function to remove the noisy reads from the expression matrix\n### Aliases: remove_noise_from_matrix\n\n### ** Examples\n\nexpression.matrix <- matrix(1:100, ncol=5)\nnoise.thresholds <- c(5,30,45,62,83)\nremove_noise_from_matrix(\n expression.matrix = expression.matrix,\n noise.thresholds = noise.thresholds\n)\n\n\n\n"} {"package":"Surrogate","topic":"AA.MultS","snippet":"### Name: AA.MultS\n### Title: Compute the multiple-surrogate adjusted association\n### Aliases: AA.MultS\n### Keywords: Adjusted Association Causal-Inference framework\n### Counterfactuals Single-trial setting Sensitivity ICA Multiple\n### surrogates\n\n### ** Examples\n\ndata(ARMD.MultS)\n\n# Regress T on Z, S1 on Z, ..., Sk on Z \n# (to compute the covariance matrix of the residuals)\nRes_T <- residuals(lm(Diff52~Treat, data=ARMD.MultS))\nRes_S1 <- residuals(lm(Diff4~Treat, data=ARMD.MultS))\nRes_S2 <- residuals(lm(Diff12~Treat, data=ARMD.MultS))\nRes_S3 <- residuals(lm(Diff24~Treat, data=ARMD.MultS))\nResiduals <- cbind(Res_T, Res_S1, Res_S2, Res_S3)\n\n# Make covariance matrix of residuals, Sigma_gamma\nSigma_gamma <- cov(Residuals)\n\n# Conduct analysis\nResult <- AA.MultS(Sigma_gamma = Sigma_gamma, N = 188, Alpha = .05)\n\n# Explore results\nsummary(Result)\n\n\n"} {"package":"Surrogate","topic":"BifixedContCont","snippet":"### Name: BifixedContCont\n### Title: Fits a bivariate fixed-effects model to assess surrogacy in the\n### meta-analytic multiple-trial setting (Continuous-continuous case)\n### Aliases: BifixedContCont\n### Keywords: Multiple-trial setting Meta-analytic framework Trial-level\n### surrogacy Individual-level surrogacy Fixed-effect models\n\n### ** Examples\n\n## Not run: \n##D # time consuming code part\n##D # Example 1, based on the ARMD data\n##D data(ARMD)\n##D \n##D # Fit a full bivariate fixed-effects model with weighting according to the \n##D # number of patients in stage 2 of the two stage approach to assess surrogacy:\n##D Sur <- BifixedContCont(Dataset=ARMD, Surr=Diff24, True=Diff52, Treat=Treat, Trial.ID=Center, \n##D Pat.ID=Id, Model=\"Full\", Weighted=TRUE)\n##D \n##D # Obtain a summary of the results\n##D summary(Sur)\n##D \n##D # Obtain a graphical representation of the trial- and individual-level surrogacy\n##D plot(Sur)\n##D \n##D \n##D # Example 2\n##D # Conduct a surrogacy analysis based on a simulated dataset with 2000 patients, \n##D # 100 trials, and Rindiv=Rtrial=.8\n##D # Simulate the data:\n##D Sim.Data.MTS(N.Total=2000, N.Trial=100, R.Trial.Target=.8, R.Indiv.Target=.8,\n##D Seed=123, Model=\"Reduced\")\n##D \n##D # Fit a reduced bivariate fixed-effects model with no weighting according to the \n##D # number of patients in stage 2 of the two stage approach to assess surrogacy:\n##D \\dontrun{ #time-consuming code parts\n##D Sur2 <- BifixedContCont(Dataset=Data.Observed.MTS, Surr=Surr, True=True, Treat=Treat, \n##D Trial.ID=Trial.ID, Pat.ID=Pat.ID, , Model=\"Reduced\", Weighted=FALSE)\n##D \n##D # Show summary and plots of results:\n##D summary(Sur2)\n##D plot(Sur2, Weighted=FALSE)}\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"BimixedCbCContCont","snippet":"### Name: BimixedCbCContCont\n### Title: Fits a bivariate mixed-effects model using the\n### cluster-by-cluster (CbC) estimator to assess surrogacy in the\n### meta-analytic multiple-trial setting (Continuous-continuous case)\n### Aliases: BimixedCbCContCont\n### Keywords: Multiple-trial setting Meta-analytic framework Trial-level\n### surrogacy Individual-level surrogacy Mixed-effect models\n\n### ** Examples\n\n# Open the Schizo dataset (clinial trial in schizophrenic patients)\ndata(Schizo)\n\n# Fit a full bivariate random-effects model by the cluster-by-cluster (CbC) estimator\n# a minimum of 2 subjects per group are allowed in each trial\n fit <- BimixedCbCContCont(Dataset=Schizo, Surr=BPRS, True=PANSS, Treat=Treat,Trial.ID=InvestId,\n Alpha=0.05, Min.Treat.Size = 10)\n# Note that an adjustment for non-positive definiteness was requiered and 113 trials were removed.\n\n# Obtain a summary of the results\nsummary(fit)\n \n\n\n"} {"package":"Surrogate","topic":"BimixedContCont","snippet":"### Name: BimixedContCont\n### Title: Fits a bivariate mixed-effects model to assess surrogacy in the\n### meta-analytic multiple-trial setting (Continuous-continuous case)\n### Aliases: BimixedContCont\n### Keywords: Multiple-trial setting Meta-analytic framework Trial-level\n### surrogacy Individual-level surrogacy Mixed-effect models\n\n### ** Examples\n\n# Open the Schizo dataset (clinial trial in schizophrenic patients)\ndata(Schizo)\n\n## Not run: \n##D #Time consuming (>5 sec) code part\n##D # When a reduced bivariate mixed-effect model is used to assess surrogacy, \n##D # the conditioning number for the D matrix is very high: \n##D Sur <- BimixedContCont(Dataset=Schizo, Surr=BPRS, True=PANSS, Treat=Treat, Model=\"Reduced\", \n##D Trial.ID=InvestId, Pat.ID=Id)\n##D \n##D # Such problems often occur when the total number of patients, the total number \n##D # of trials and/or the trial-level heterogeneity\n##D # of the treatment effects is relatively small\n##D \n##D # As an alternative approach to assess surrogacy, consider using the functions\n##D # BifixedContCont, UnifixedContCont or UnimixedContCont in the meta-analytic framework,\n##D # or use the information-theoretic approach\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"Bootstrap.MEP.BinBin","snippet":"### Name: Bootstrap.MEP.BinBin\n### Title: Bootstrap 95% CI around the maximum-entropy ICA and SPF\n### (surrogate predictive function)\n### Aliases: Bootstrap.MEP.BinBin\n### Keywords: Causal-Inference framework Counterfactuals BinBin Maximum\n### Entropy Bootstrap\n\n### ** Examples\n\n## Not run: \n##D # time consuming code part\n##D MEP_CI <- Bootstrap.MEP.BinBin(Data = Schizo_Bin, Surr = \"BPRS_Bin\", True = \"PANSS_Bin\",\n##D Treat = \"Treat\", M = 500, Seed=123)\n##D summary(MEP_CI)\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"CausalDiagramBinBin","snippet":"### Name: CausalDiagramBinBin\n### Title: Draws a causal diagram depicting the median informational\n### coefficients of correlation (or odds ratios) between the\n### counterfactuals for a specified range of values of the ICA in the\n### binary-binary setting.\n### Aliases: CausalDiagramBinBin\n### Keywords: Plot surrogacy Causal-Inference framework Single-trial\n### setting\n\n### ** Examples\n\n# Compute R2_H given the marginals specified as the pi's\nICA <- ICA.BinBin.Grid.Sample(pi1_1_=0.2619048, pi1_0_=0.2857143, \npi_1_1=0.6372549, pi_1_0=0.07843137, pi0_1_=0.1349206, pi_0_1=0.127451,\nSeed=1, Monotonicity=c(\"General\"), M=1000)\n\n# Obtain a causal diagram that provides the medians of the \n# correlations between the counterfactuals for the range\n# of R2_H values between 0.1 and 1\n # Assume no monotonicty \nCausalDiagramBinBin(x=ICA, Min=0.1, Max=1, Monotonicity=\"No\") \n\n # Assume monotonicty for S \nCausalDiagramBinBin(x=ICA, Min=0.1, Max=1, Monotonicity=\"Surr.Endp\") \n\n# Now only consider the results that were obtained when \n# monotonicity was assumed for the true endpoint\nCausalDiagramBinBin(x=ICA, Values=\"ORs\", Theta_T0S0=2.156, Theta_T1S1=10, \nMin=0, Max=1, Monotonicity=\"True.Endp\") \n\n\n"} {"package":"Surrogate","topic":"CausalDiagramContCont","snippet":"### Name: CausalDiagramContCont\n### Title: Draws a causal diagram depicting the median correlations between\n### the counterfactuals for a specified range of values of ICA or MICA in\n### the continuous-continuous setting\n### Aliases: CausalDiagramContCont\n### Keywords: Plot surrogacy Causal-Inference framework Single-trial\n### setting Multiple-trial setting\n\n### ** Examples\n\n## Not run: \n##D #Time consuming (>5 sec) code parts\n##D # Generate the vector of ICA values when rho_T0S0=.91, rho_T1S1=.91, and when the\n##D # grid of values {0, .1, ..., 1} is considered for the correlations\n##D # between the counterfactuals:\n##D SurICA <- ICA.ContCont(T0S0=.95, T1S1=.91, T0T1=seq(0, 1, by=.1), T0S1=seq(0, 1, by=.1), \n##D T1S0=seq(0, 1, by=.1), S0S1=seq(0, 1, by=.1))\n##D \n##D #obtain a plot of ICA\n##D \n##D # Obtain a causal diagram that provides the medians of the \n##D # correlations between the counterfactuals for the range\n##D # of ICA values between .9 and 1 (i.e., which assumed \n##D # correlations between the counterfactuals lead to a \n##D # high ICA?)\n##D CausalDiagramContCont(SurICA, Min=.9, Max=1)\n##D \n##D # Same, for low values of ICA\n##D CausalDiagramContCont(SurICA, Min=0, Max=.5)\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"ECT","snippet":"### Name: ECT\n### Title: Apply the Entropy Concentration Theorem\n### Aliases: ECT\n### Keywords: Counterfactuals Single-trial setting Binary endpoint Maximum\n### entropy Entropy Concentration Theorem\n\n### ** Examples\n\nECT_fit <- ECT(Perc = .05, H_Max = 1.981811, N=454)\nsummary(ECT_fit)\n\n\n"} {"package":"Surrogate","topic":"Fano.BinBin","snippet":"### Name: Fano.BinBin\n### Title: Evaluate the possibility of finding a good surrogate in the\n### setting where both S and T are binary endpoints\n### Aliases: Fano.BinBin\n### Keywords: Causal-Inference framework Counterfactuals Single-trial\n### setting Fano ICA MarginalProbs\n\n### ** Examples\n\n# Conduct the analysis assuming no montonicity\n# for the true endpoint, using a range of\n# upper bounds for prediction errors \nFano.BinBin(pi1_ = 0.5951 , pi_1 = 0.7745, \nfano_delta=c(0.05, 0.1, 0.2), M=1000)\n\n\n# Conduct the same analysis now sampling from\n# a range of values to allow for uncertainty\n\nFano.BinBin(pi1_ = runif(n=20,min=0.504,max=0.681), \npi_1 = runif(n=20,min=0.679,max=0.849), \nfano_delta=c(0.05, 0.1, 0.2), M=10, Seed=2)\n\n\n"} {"package":"Surrogate","topic":"FixedBinBinIT","snippet":"### Name: FixedBinBinIT\n### Title: Fits (univariate) fixed-effect models to assess surrogacy in the\n### binary-binary case based on the Information-Theoretic framework\n### Aliases: FixedBinBinIT\n### Keywords: plot Information-Theoretic BinBin Multiple-trial setting\n### Information-theoretic framework Trial-level surrogacy\n### Individual-level surrogacy Likelihood Reduction Factor (LRF)\n### Fixed-effect models Binary endpoint\n\n### ** Examples\n\n## Not run: \n##D # Time consuming (>5sec) code part\n##D # Generate data with continuous Surr and True\n##D Sim.Data.MTS(N.Total=5000, N.Trial=50, R.Trial.Target=.9, R.Indiv.Target=.9,\n##D Fixed.Effects=c(0, 0, 0, 0), D.aa=10, D.bb=10, Seed=1,\n##D Model=c(\"Full\"))\n##D # Dichtomize Surr and True\n##D Surr_Bin <- Data.Observed.MTS$Surr\n##D Surr_Bin[Data.Observed.MTS$Surr>.5] <- 1\n##D Surr_Bin[Data.Observed.MTS$Surr<=.5] <- 0\n##D True_Bin <- Data.Observed.MTS$True\n##D True_Bin[Data.Observed.MTS$True>.15] <- 1\n##D True_Bin[Data.Observed.MTS$True<=.15] <- 0\n##D Data.Observed.MTS$Surr <- Surr_Bin\n##D Data.Observed.MTS$True <- True_Bin\n##D \n##D # Assess surrogacy using info-theoretic framework\n##D Fit <- FixedBinBinIT(Dataset = Data.Observed.MTS, Surr = Surr, \n##D True = True, Treat = Treat, Trial.ID = Trial.ID, \n##D Pat.ID = Pat.ID, Number.Bootstraps=100)\n##D \n##D # Examine results\n##D summary(Fit)\n##D plot(Fit, Trial.Level = FALSE, Indiv.Level.By.Trial=TRUE)\n##D plot(Fit, Trial.Level = TRUE, Indiv.Level.By.Trial=FALSE)\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"FixedBinContIT","snippet":"### Name: FixedBinContIT\n### Title: Fits (univariate) fixed-effect models to assess surrogacy in the\n### case where the true endpoint is binary and the surrogate endpoint is\n### continuous (based on the Information-Theoretic framework)\n### Aliases: FixedBinContIT\n### Keywords: plot Information-Theoretic BinCont Multiple-trial setting\n### Information-theoretic framework Trial-level surrogacy\n### Individual-level surrogacy Likelihood Reduction Factor (LRF)\n### Fixed-effect models Binary endpoint Continuous endpoint\n\n### ** Examples\n\n## Not run: \n##D # Time consuming (>5sec) code part\n##D # Generate data with continuous Surr and True\n##D Sim.Data.MTS(N.Total=2000, N.Trial=100, R.Trial.Target=.8, \n##D R.Indiv.Target=.8, Seed=123, Model=\"Full\")\n##D \n##D # Make T binary\n##D Data.Observed.MTS$True_Bin <- Data.Observed.MTS$True\n##D Data.Observed.MTS$True_Bin[Data.Observed.MTS$True>=0] <- 1\n##D Data.Observed.MTS$True_Bin[Data.Observed.MTS$True<0] <- 0\n##D \n##D # Analyze data\n##D Fit <- FixedBinContIT(Dataset = Data.Observed.MTS, Surr = Surr, \n##D True = True_Bin, Treat = Treat, Trial.ID = Trial.ID, Pat.ID = Pat.ID, \n##D Model = \"Full\", Number.Bootstraps=50)\n##D \n##D # Examine results\n##D summary(Fit)\n##D plot(Fit, Trial.Level = FALSE, Indiv.Level.By.Trial=TRUE)\n##D plot(Fit, Trial.Level = TRUE, Indiv.Level.By.Trial=FALSE)\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"FixedContBinIT","snippet":"### Name: FixedContBinIT\n### Title: Fits (univariate) fixed-effect models to assess surrogacy in the\n### case where the true endpoint is continuous and the surrogate endpoint\n### is binary (based on the Information-Theoretic framework)\n### Aliases: FixedContBinIT\n### Keywords: plot Information-Theoretic ContBin Multiple-trial setting\n### Information-theoretic framework Trial-level surrogacy\n### Individual-level surrogacy Likelihood Reduction Factor (LRF)\n### Fixed-effect models Binary endpoint Continuous endpoint\n\n### ** Examples\n\n## Not run: \n##D # Time consuming (>5sec) code part\n##D # Generate data with continuous Surr and True\n##D Sim.Data.MTS(N.Total=2000, N.Trial=100, R.Trial.Target=.8, \n##D R.Indiv.Target=.8, Seed=123, Model=\"Full\")\n##D \n##D # Make S binary\n##D Data.Observed.MTS$Surr_Bin <- Data.Observed.MTS$Surr\n##D Data.Observed.MTS$Surr_Bin[Data.Observed.MTS$Surr>=0] <- 1\n##D Data.Observed.MTS$Surr_Bin[Data.Observed.MTS$Surr<0] <- 0\n##D \n##D # Analyze data\n##D Fit <- FixedContBinIT(Dataset = Data.Observed.MTS, Surr = Surr_Bin, \n##D True = True, Treat = Treat, Trial.ID = Trial.ID, Pat.ID = Pat.ID, \n##D Model = \"Full\", Number.Bootstraps=50)\n##D \n##D # Examine results\n##D summary(Fit)\n##D plot(Fit, Trial.Level = FALSE, Indiv.Level.By.Trial=TRUE)\n##D plot(Fit, Trial.Level = TRUE, Indiv.Level.By.Trial=FALSE)\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"FixedContContIT","snippet":"### Name: FixedContContIT\n### Title: Fits (univariate) fixed-effect models to assess surrogacy in the\n### continuous-continuous case based on the Information-Theoretic\n### framework\n### Aliases: FixedContContIT\n### Keywords: Multiple-trial setting Information-theoretic framework\n### Trial-level surrogacy Individual-level surrogacy Likelihood Reduction\n### Factor (LRF) Fixed-effect models\n\n### ** Examples\n\n# Example 1\n# Based on the ARMD data\n\ndata(ARMD)\n# Assess surrogacy based on a full fixed-effect model\n# in the information-theoretic framework:\nSur <- FixedContContIT(Dataset=ARMD, Surr=Diff24, True=Diff52, Treat=Treat, Trial.ID=Center,\nPat.ID=Id, Model=\"Full\", Number.Bootstraps=50)\n# Obtain a summary of the results:\nsummary(Sur)\n\n## Not run: \n##D #time consuming code\n##D # Example 2\n##D # Conduct an analysis based on a simulated dataset with 2000 patients, 100 trials,\n##D # and Rindiv=Rtrial=.8\n##D \n##D # Simulate the data:\n##D Sim.Data.MTS(N.Total=2000, N.Trial=100, R.Trial.Target=.8, R.Indiv.Target=.8,\n##D Seed=123, Model=\"Full\")\n##D # Assess surrogacy based on a full fixed-effect model\n##D # in the information-theoretic framework:\n##D Sur2 <- FixedContContIT(Dataset=Data.Observed.MTS, Surr=Surr, True=True, Treat=Treat,\n##D Trial.ID=Trial.ID, Pat.ID=Pat.ID, Model=\"Full\", Number.Bootstraps=50)\n##D \n##D # Show a summary of the results:\n##D summary(Sur2)\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"FixedDiscrDiscrIT","snippet":"### Name: FixedDiscrDiscrIT\n### Title: Investigates surrogacy for binary or ordinal outcomes using the\n### Information Theoretic framework\n### Aliases: FixedDiscrDiscrIT\n### Keywords: Multiple-trial setting Information-theoretic framework\n### Trial-level surrogacy Individual-level surrogacy Likelihood Reduction\n### Factor (LRF) Fixed-effect models\n\n### ** Examples\n\n\n## Not run: \n##D # Time consuming (>5sec) code part\n##D # Example 1\n##D # Conduct an analysis based on a simulated dataset with 2000 patients, 100 trials,\n##D # and Rindiv=Rtrial=.8\n##D \n##D # Simulate the data:\n##D Sim.Data.MTS(N.Total=2000, N.Trial=100, R.Trial.Target=.8, R.Indiv.Target=.8,\n##D Seed=123, Model=\"Full\")\n##D \n##D # create a binary true and ordinal surrogate outcome\n##D Data.Observed.MTS$True<-findInterval(Data.Observed.MTS$True, \n##D c(quantile(Data.Observed.MTS$True,0.5)))\n##D Data.Observed.MTS$Surr<-findInterval(Data.Observed.MTS$Surr, \n##D c(quantile(Data.Observed.MTS$Surr,0.333),quantile(Data.Observed.MTS$Surr,0.666)))\n##D \n##D # Assess surrogacy based on a full fixed-effect model\n##D # in the information-theoretic framework for a binary surrogate and ordinal true outcome:\n##D SurEval <- FixedDiscrDiscrIT(Dataset=Data.Observed.MTS, Surr=Surr, True=True, Treat=Treat,\n##D Trial.ID=Trial.ID, Setting=\"ordbin\")\n##D \n##D # Show a summary of the results:\n##D summary(SurEval)\n##D SurEval$Trial.Spec.Results\n##D SurEval$R2h\n##D SurEval$R2ht\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"ICA.BinBin.Grid.Full","snippet":"### Name: ICA.BinBin.Grid.Full\n### Title: Assess surrogacy in the causal-inference single-trial setting in\n### the binary-binary case when monotonicity for S and T is assumed using\n### the full grid-based approach\n### Aliases: ICA.BinBin.Grid.Full\n### Keywords: Causal-Inference framework Counterfactuals Single-trial\n### setting Sensitivity ICA MarginalProbs\n\n### ** Examples\n\n## Not run: \n##D # time consuming code part\n##D # Compute R2_H given the marginals, \n##D # assuming monotonicity for S and T and grids\n##D # pi_0111=seq(0, 1, by=.001) and \n##D # pi_1100=seq(0, 1, by=.001)\n##D ICA <- ICA.BinBin.Grid.Full(pi1_1_=0.2619048, pi1_0_=0.2857143, pi_1_1=0.6372549, \n##D pi_1_0=0.07843137, pi0_1_=0.1349206, pi_0_1=0.127451, \n##D pi_0111=seq(0, 1, by=.01), pi_1100=seq(0, 1, by=.01), Seed=1)\n##D \n##D # obtain plot of R2_H\n##D plot(ICA, R2_H=TRUE)\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"ICA.BinBin.Grid.Sample","snippet":"### Name: ICA.BinBin.Grid.Sample\n### Title: Assess surrogacy in the causal-inference single-trial setting in\n### the binary-binary case when monotonicity for S and T is assumed using\n### the grid-based sample approach\n### Aliases: ICA.BinBin.Grid.Sample\n### Keywords: Causal-Inference framework Counterfactuals Single-trial\n### setting Sensitivity ICA MarginalProbs\n\n### ** Examples\n\n## Not run: \n##D #time-consuming code parts\n##D # Compute R2_H given the marginals, \n##D # assuming monotonicity for S and T and grids\n##D # pi_0111=seq(0, 1, by=.001) and \n##D # pi_1100=seq(0, 1, by=.001)\n##D ICA <- ICA.BinBin.Grid.Sample(pi1_1_=0.261, pi1_0_=0.285, \n##D pi_1_1=0.637, pi_1_0=0.078, pi0_1_=0.134, pi_0_1=0.127, \n##D Monotonicity=c(\"Surr.True.Endp\"), M=2500, Seed=1)\n##D \n##D # obtain plot of R2_H\n##D plot(ICA, R2_H=TRUE)\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"ICA.BinBin.Grid.Sample.Uncert","snippet":"### Name: ICA.BinBin.Grid.Sample.Uncert\n### Title: Assess surrogacy in the causal-inference single-trial setting in\n### the binary-binary case when monotonicity for S and T is assumed using\n### the grid-based sample approach, accounting for sampling variability\n### in the marginal pi.\n### Aliases: ICA.BinBin.Grid.Sample.Uncert\n### Keywords: Causal-Inference framework Counterfactuals Single-trial\n### setting Sensitivity ICA MarginalProbs\n\n### ** Examples\n\n# Compute R2_H given the marginals (sample from uniform), \n# assuming no monotonicity \nICA_No2 <- ICA.BinBin.Grid.Sample.Uncert(pi1_1_=runif(10000, 0.3562, 0.4868),\npi0_1_=runif(10000, 0.0240, 0.0837), pi1_0_=runif(10000, 0.0240, 0.0837),\npi_1_1=runif(10000, 0.4434, 0.5742), pi_1_0=runif(10000, 0.0081, 0.0533),\npi_0_1=runif(10000, 0.0202, 0.0763), Seed=1, Monotonicity=c(\"No\"), M=1000)\n\nsummary(ICA_No2)\n\n# obtain plot of R2_H\nplot(ICA_No2)\n\n\n"} {"package":"Surrogate","topic":"ICA.BinCont.BS","snippet":"### Name: ICA.BinCont.BS\n### Title: Assess surrogacy in the causal-inference single-trial setting in\n### the binary-continuous case with an additional bootstrap procedure\n### before the assessment\n### Aliases: ICA.BinCont.BS\n### Keywords: Causal-Inference framework Counterfactuals Single-trial\n### setting Sensitivity ICA Continuous endpoint Binary endpoint\n\n### ** Examples\n\n## Not run: \n##D # Time consuming code part\n##D data(Schizo)\n##D Fit <- ICA.BinCont.BS(Dataset = Schizo, Surr = BPRS, True = PANSS_Bin, nb = 10, \n##D Theta.S_0=c(-10,-5,5,10,10,10,10,10), Theta.S_1=c(-10,-5,5,10,10,10,10,10), \n##D Treat=Treat, M=50, Seed=1)\n##D \n##D summary(Fit)\n##D plot(Fit)\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"ICA.ContCont.MultS.MPC","snippet":"### Name: ICA.ContCont.MultS.MPC\n### Title: Assess surrogacy in the causal-inference single-trial setting\n### (Individual Causal Association, ICA) using a continuous univariate T\n### and multiple continuous S, by simulating correlation matrices using a\n### modified algorithm based on partial correlations\n### Aliases: ICA.ContCont.MultS.MPC\n### Keywords: Causal-Inference framework Counterfactuals Single-trial\n### setting Sensitivity ICA Multiple surrogates Multivariate\n\n### ** Examples\n\n## Not run: \n##D \n##D # Specify matrix Sigma (var-cavar matrix T_0, T_1, S1_0, S1_1, ...)\n##D # here we have 1 true endpoint and 10 surrogates (8 of these are non-informative)\n##D \n##D Sigma = ks::invvech(\n##D c(25, NA, 17.8, NA, -10.6, NA, 0, NA, 0, NA, 0, NA, 0, NA, 0, NA, 0, NA, 0, NA, 0, NA, \n##D 4, NA, -0.32, NA, -1.32, NA, 0, NA, 0, NA, 0, NA, 0, NA, 0, NA, 0, NA, 0, NA, 0, 16, \n##D NA, -4, NA, 0, NA, 0, NA, 0, NA, 0, NA, 0, NA, 0, NA, 0, NA, 0, NA, 1, NA, 0.48, NA, \n##D 0, NA, 0, NA, 0, NA, 0, NA, 0, NA, 0, NA, 0, NA, 0, 16, NA, 0, NA, 0, NA, 0, NA, 0, \n##D NA, 0, NA, 0, NA, 0, NA, 0, NA, 1, NA, 0, NA, 0, NA, 0, NA, 0, NA, 0, NA, 0, NA, 0, \n##D NA, 0, 16, NA, 8, NA, 8, NA, 8, NA, 8, NA, 8, NA, 8, NA, 8, NA, 1, NA, 0.5, NA, 0.5, \n##D NA, 0.5, NA, 0.5, NA, 0.5, NA, 0.5, NA, 0.5, 16, NA, 8, NA, 8, NA, 8, NA, 8, NA, 8, \n##D NA, 8, NA, 1, NA, 0.5, NA, 0.5, NA, 0.5, NA, 0.5, NA, 0.5, NA, 0.5, 16, NA, 8, NA, \n##D 8, NA, 8, NA, 8, NA, 8, NA, 1,NA,0.5,NA,0.5,NA,0.5,NA,0.5,NA,0.5, 16, NA, 8, NA, 8, \n##D NA, 8, NA, 8, NA, 1, NA, 0.5, NA, 0.5, NA, 0.5, NA, 0.5, 16, NA, 8, NA, 8, NA, 8, NA,\n##D 1, NA, 0.5, NA, 0.5, NA, 0.5, 16, NA, 8, NA, 8, NA, 1, NA, 0.5, NA, 0.5, 16, NA, 8, NA,\n##D 1, NA, 0.5, 16, NA, 1)) \n##D \n##D # Conduct analysis using the PC and MPC algorithm \n##D ## first evaluating two surrogates\n##D ICA.PC.2 = ICA.ContCont.MultS.PC(M = 30000, N=200, Sigma[1:6,1:6], Seed = 123) \n##D ICA.MPC.2 = ICA.ContCont.MultS.MPC(M = 30000, N=200, Sigma[1:6,1:6],prob=NULL, \n##D Seed = 123, Save.Corr=T, Show.Progress = TRUE) \n##D \n##D ## later evaluating two surrogates\n##D ICA.PC.10 = ICA.ContCont.MultS.PC(M = 150000, N=200, Sigma, Seed = 123) \n##D ICA.MPC.10 = ICA.ContCont.MultS.MPC(M = 150000, N=200, Sigma,prob=NULL, \n##D Seed = 123, Save.Corr=T, Show.Progress = TRUE) \n##D \n##D # Explore results\n##D range(ICA.PC.2$R2_H)\n##D range(ICA.PC.10$R2_H)\n##D \n##D range(ICA.MPC.2$R2_H)\n##D range(ICA.MPC.10$R2_H)\n##D ## as we observe, the MPC algorithm displays a wider interval of possible values for the ICA\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"ICA.ContCont.MultS.PC","snippet":"### Name: ICA.ContCont.MultS.PC\n### Title: Assess surrogacy in the causal-inference single-trial setting\n### (Individual Causal Association, ICA) using a continuous univariate T\n### and multiple continuous S, by simulating correlation matrices using\n### an algorithm based on partial correlations\n### Aliases: ICA.ContCont.MultS.PC\n### Keywords: Causal-Inference framework Counterfactuals Single-trial\n### setting Sensitivity ICA Multiple surrogates Multivariate\n\n### ** Examples\n\n## Not run: \n##D \n##D # Specify matrix Sigma (var-cavar matrix T_0, T_1, S1_0, S1_1, ...)\n##D # here for 1 true endpoint and 3 surrogates\n##D \n##D s<-matrix(rep(NA, times=64),8)\n##D s[1,1] <- 450; s[2,2] <- 413.5; s[3,3] <- 174.2; s[4,4] <- 157.5; \n##D s[5,5] <- 244.0; s[6,6] <- 229.99; s[7,7] <- 294.2; s[8,8] <- 302.5\n##D s[3,1] <- 160.8; s[5,1] <- 208.5; s[7,1] <- 268.4 \n##D s[4,2] <- 124.6; s[6,2] <- 212.3; s[8,2] <- 287.1\n##D s[5,3] <- 160.3; s[7,3] <- 142.8 \n##D s[6,4] <- 134.3; s[8,4] <- 130.4 \n##D s[7,5] <- 209.3; \n##D s[8,6] <- 214.7 \n##D s[upper.tri(s)] = t(s)[upper.tri(s)]\n##D \n##D # Marix looks like (NA indicates unidentified covariances):\n##D # T_0 T_1 S1_0 S1_1 S2_0 S2_1 S2_0 S2_1\n##D # [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8]\n##D # T_0 [1,] 450.0 NA 160.8 NA 208.5 NA 268.4 NA\n##D # T_1 [2,] NA 413.5 NA 124.6 NA 212.30 NA 287.1\n##D # S1_0 [3,] 160.8 NA 174.2 NA 160.3 NA 142.8 NA\n##D # S1_1 [4,] NA 124.6 NA 157.5 NA 134.30 NA 130.4\n##D # S2_0 [5,] 208.5 NA 160.3 NA 244.0 NA 209.3 NA\n##D # S2_1 [6,] NA 212.3 NA 134.3 NA 229.99 NA 214.7\n##D # S3_0 [7,] 268.4 NA 142.8 NA 209.3 NA 294.2 NA\n##D # S3_1 [8,] NA 287.1 NA 130.4 NA 214.70 NA 302.5\n##D \n##D # Conduct analysis\n##D ICA <- ICA.ContCont.MultS.PC(M=1000, N=200, Show.Progress = TRUE,\n##D Sigma=s, Seed=c(123))\n##D \n##D # Explore results\n##D summary(ICA)\n##D plot(ICA)\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"ICA.ContCont.MultS","snippet":"### Name: ICA.ContCont.MultS\n### Title: Assess surrogacy in the causal-inference single-trial setting\n### (Individual Causal Association, ICA) using a continuous univariate T\n### and multiple continuous S\n### Aliases: ICA.ContCont.MultS\n### Keywords: Causal-Inference framework Counterfactuals Single-trial\n### setting Sensitivity ICA Multiple surrogates Multivariate\n\n### ** Examples\n\n## Not run: \n##D #time-consuming code parts\n##D # Specify matrix Sigma (var-cavar matrix T_0, T_1, S1_0, S1_1, ...)\n##D # here for 1 true endpoint and 3 surrogates\n##D \n##D s<-matrix(rep(NA, times=64),8)\n##D s[1,1] <- 450; s[2,2] <- 413.5; s[3,3] <- 174.2; s[4,4] <- 157.5; \n##D s[5,5] <- 244.0; s[6,6] <- 229.99; s[7,7] <- 294.2; s[8,8] <- 302.5\n##D s[3,1] <- 160.8; s[5,1] <- 208.5; s[7,1] <- 268.4 \n##D s[4,2] <- 124.6; s[6,2] <- 212.3; s[8,2] <- 287.1\n##D s[5,3] <- 160.3; s[7,3] <- 142.8 \n##D s[6,4] <- 134.3; s[8,4] <- 130.4 \n##D s[7,5] <- 209.3; \n##D s[8,6] <- 214.7 \n##D s[upper.tri(s)] = t(s)[upper.tri(s)]\n##D \n##D # Marix looks like (NA indicates unidentified covariances):\n##D # T_0 T_1 S1_0 S1_1 S2_0 S2_1 S2_0 S2_1\n##D # [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8]\n##D # T_0 [1,] 450.0 NA 160.8 NA 208.5 NA 268.4 NA\n##D # T_1 [2,] NA 413.5 NA 124.6 NA 212.30 NA 287.1\n##D # S1_0 [3,] 160.8 NA 174.2 NA 160.3 NA 142.8 NA\n##D # S1_1 [4,] NA 124.6 NA 157.5 NA 134.30 NA 130.4\n##D # S2_0 [5,] 208.5 NA 160.3 NA 244.0 NA 209.3 NA\n##D # S2_1 [6,] NA 212.3 NA 134.3 NA 229.99 NA 214.7\n##D # S3_0 [7,] 268.4 NA 142.8 NA 209.3 NA 294.2 NA\n##D # S3_1 [8,] NA 287.1 NA 130.4 NA 214.70 NA 302.5\n##D \n##D # Conduct analysis\n##D ICA <- ICA.ContCont.MultS(M=100, N=200, Show.Progress = TRUE,\n##D Sigma=s, G = seq(from=-1, to=1, by = .00001), Seed=c(123))\n##D \n##D # Explore results\n##D summary(ICA)\n##D plot(ICA)\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"ICA.ContCont.MultS_alt","snippet":"### Name: ICA.ContCont.MultS_alt\n### Title: Assess surrogacy in the causal-inference single-trial setting\n### (Individual Causal Association, ICA) using a continuous univariate T\n### and multiple continuous S, alternative approach\n### Aliases: ICA.ContCont.MultS_alt\n### Keywords: Causal-Inference framework Counterfactuals Single-trial\n### setting Sensitivity ICA Multiple surrogates Multivariate\n\n### ** Examples\n\n## Not run: \n##D #time-consuming code parts\n##D # Specify matrix Sigma (var-cavar matrix T_0, T_1, S1_0, S1_1, ...)\n##D # here for 1 true endpoint and 3 surrogates\n##D \n##D s<-matrix(rep(NA, times=64),8)\n##D s[1,1] <- 450; s[2,2] <- 413.5; s[3,3] <- 174.2; s[4,4] <- 157.5; \n##D s[5,5] <- 244.0; s[6,6] <- 229.99; s[7,7] <- 294.2; s[8,8] <- 302.5\n##D s[3,1] <- 160.8; s[5,1] <- 208.5; s[7,1] <- 268.4 \n##D s[4,2] <- 124.6; s[6,2] <- 212.3; s[8,2] <- 287.1\n##D s[5,3] <- 160.3; s[7,3] <- 142.8 \n##D s[6,4] <- 134.3; s[8,4] <- 130.4 \n##D s[7,5] <- 209.3; \n##D s[8,6] <- 214.7 \n##D s[upper.tri(s)] = t(s)[upper.tri(s)]\n##D \n##D # Marix looks like (NA indicates unidentified covariances):\n##D # T_0 T_1 S1_0 S1_1 S2_0 S2_1 S2_0 S2_1\n##D # [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8]\n##D # T_0 [1,] 450.0 NA 160.8 NA 208.5 NA 268.4 NA\n##D # T_1 [2,] NA 413.5 NA 124.6 NA 212.30 NA 287.1\n##D # S1_0 [3,] 160.8 NA 174.2 NA 160.3 NA 142.8 NA\n##D # S1_1 [4,] NA 124.6 NA 157.5 NA 134.30 NA 130.4\n##D # S2_0 [5,] 208.5 NA 160.3 NA 244.0 NA 209.3 NA\n##D # S2_1 [6,] NA 212.3 NA 134.3 NA 229.99 NA 214.7\n##D # S3_0 [7,] 268.4 NA 142.8 NA 209.3 NA 294.2 NA\n##D # S3_1 [8,] NA 287.1 NA 130.4 NA 214.70 NA 302.5\n##D \n##D # Conduct analysis\n##D ICA <- ICA.ContCont.MultS_alt(M=100, N=200, Show.Progress = TRUE,\n##D Sigma=s, G = seq(from=-1, to=1, by = .00001), Seed=c(123), \n##D Model = \"Delta_T ~ Delta_S1 + Delta_S2 + Delta_S3\")\n##D \n##D # Explore results\n##D summary(ICA)\n##D plot(ICA)\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"ICA.Sample.ContCont","snippet":"### Name: ICA.Sample.ContCont\n### Title: Assess surrogacy in the causal-inference single-trial setting\n### (Individual Causal Association, ICA) in the Continuous-continuous\n### case using the grid-based sample approach\n### Aliases: ICA.Sample.ContCont\n### Keywords: Causal-Inference framework Counterfactuals Single-trial\n### setting Sensitivity ICA\n\n### ** Examples\n\n# Generate the vector of ICA values when rho_T0S0=rho_T1S1=.95, \n# sigma_T0T0=90, sigma_T1T1=100,sigma_ S0S0=10, sigma_S1S1=15, and \n# min=-1 max=1 is considered for the correlations\n# between the counterfactuals:\nSurICA2 <- ICA.Sample.ContCont(T0S0=.95, T1S1=.95, T0T0=90, T1T1=100, S0S0=10, \nS1S1=15, M=5000)\n\n# Examine and plot the vector of generated ICA values:\nsummary(SurICA2)\nplot(SurICA2)\n\n\n"} {"package":"Surrogate","topic":"ICA.BinBin","snippet":"### Name: ICA.BinBin\n### Title: Assess surrogacy in the causal-inference single-trial setting in\n### the binary-binary case\n### Aliases: ICA.BinBin\n### Keywords: Causal-Inference framework Counterfactuals Single-trial\n### setting Sensitivity ICA MarginalProbs\n\n### ** Examples\n\n## Not run: \n##D # Time consuming code part\n##D # Compute R2_H given the marginals specified as the pi's, making no \n##D # assumptions regarding monotonicity (general case)\n##D ICA <- ICA.BinBin(pi1_1_=0.2619048, pi1_0_=0.2857143, pi_1_1=0.6372549, \n##D pi_1_0=0.07843137, pi0_1_=0.1349206, pi_0_1=0.127451, Seed=1, \n##D Monotonicity=c(\"General\"), Sum_Pi_f = seq(from=0.01, to=.99, by=.01), M=10000)\n##D \n##D # obtain plot of the results\n##D plot(ICA, R2_H=TRUE)\n##D \n##D # Example 2 where the uncertainty in the estimation \n##D # of the marginals is taken into account\n##D ICA_BINBIN2 <- ICA.BinBin(pi1_1_=runif(10000, 0.2573, 0.4252), \n##D pi1_0_=runif(10000, 0.1769, 0.3310), \n##D pi_1_1=runif(10000, 0.5947, 0.7779), \n##D pi_1_0=runif(10000, 0.0322, 0.1442), \n##D pi0_1_=runif(10000, 0.0617, 0.1764), \n##D pi_0_1=runif(10000, 0.0254, 0.1315),\n##D Monotonicity=c(\"General\"), \n##D Sum_Pi_f = seq(from=0.01, to=0.99, by=.01), \n##D M=50000, Seed=1)\n##D \n##D # Plot results\n##D plot(ICA_BINBIN2)\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"ICA.BinBin.CounterAssum","snippet":"### Name: ICA.BinBin.CounterAssum\n### Title: ICA (binary-binary setting) that is obtaied when the\n### counterfactual correlations are assumed to fall within some\n### prespecified ranges.\n### Aliases: ICA.BinBin.CounterAssum\n### Keywords: Plot surrogacy Causal-Inference framework Single-trial\n### setting\n\n### ** Examples\n\n## Not run: \n##D #Time consuming (>5 sec) code part\n##D # Compute R2_H given the marginals specified as the pi's, making no \n##D # assumptions regarding monotonicity (general case)\n##D ICA <- ICA.BinBin.Grid.Sample(pi1_1_=0.261, pi1_0_=0.285, \n##D pi_1_1=0.637, pi_1_0=0.078, pi0_1_=0.134, pi_0_1=0.127, \n##D Monotonicity=c(\"General\"), M=5000, Seed=1)\n##D \n##D # Obtain a density plot of R2_H, assuming that \n##D # r2_h_S0S1>=.2, r2_h_S0T1>=0, r2_h_T0T1>=.2, and r2_h_T0S1>=0\n##D ICA.BinBin.CounterAssum(ICA, r2_h_S0S1_min=.2, r2_h_S0S1_max=1, \n##D r2_h_S0T1_min=0, r2_h_S0T1_max=1, r2_h_T0T1_min=0.2, r2_h_T0T1_max=1, \n##D r2_h_T0S1_min=0, r2_h_T0S1_max=1, Monotonicity=\"General\",\n##D Type=\"Density\") \n##D \n##D # Now show the densities of R2_H under the different \n##D # monotonicity assumptions \n##D ICA.BinBin.CounterAssum(ICA, r2_h_S0S1_min=.2, r2_h_S0S1_max=1, \n##D r2_h_S0T1_min=0, r2_h_S0T1_max=1, r2_h_T0T1_min=0.2, r2_h_T0T1_max=1, \n##D r2_h_T0S1_min=0, r2_h_T0S1_max=1, Monotonicity=\"General\",\n##D Type=\"All.Densities\", MainPlot=\" \", Cex.Legend=1, \n##D Cex.Position=\"topright\", ylim=c(0, 20)) \n## End(Not run)\n\n"} {"package":"Surrogate","topic":"ICA.BinCont","snippet":"### Name: ICA.BinCont\n### Title: Assess surrogacy in the causal-inference single-trial setting in\n### the binary-continuous case\n### Aliases: ICA.BinCont\n### Keywords: Causal-Inference framework Counterfactuals Single-trial\n### setting Sensitivity ICA Continuous endpoint Binary endpoint\n\n### ** Examples\n\n## Not run: \n##D # Time consuming code part\n##D data(Schizo)\n##D Fit <- ICA.BinCont(Dataset = Schizo, Surr = BPRS, True = PANSS_Bin, \n##D Theta.S_0=c(-10,-5,5,10,10,10,10,10), Theta.S_1=c(-10,-5,5,10,10,10,10,10), \n##D Treat=Treat, M=50, Seed=1)\n##D \n##D summary(Fit)\n##D plot(Fit)\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"ICA.ContCont","snippet":"### Name: ICA.ContCont\n### Title: Assess surrogacy in the causal-inference single-trial setting\n### (Individual Causal Association, ICA) in the Continuous-continuous\n### case\n### Aliases: ICA.ContCont\n### Keywords: Causal-Inference framework Counterfactuals Single-trial\n### setting Sensitivity ICA\n\n### ** Examples\n\n## Not run: \n##D #time-consuming code parts\n##D # Generate the vector of ICA.ContCont values when rho_T0S0=rho_T1S1=.95, \n##D # sigma_T0T0=90, sigma_T1T1=100,sigma_ S0S0=10, sigma_S1S1=15, and \n##D # the grid of values {0, .2, ..., 1} is considered for the correlations\n##D # between the counterfactuals:\n##D SurICA <- ICA.ContCont(T0S0=.95, T1S1=.95, T0T0=90, T1T1=100, S0S0=10, S1S1=15,\n##D T0T1=seq(0, 1, by=.2), T0S1=seq(0, 1, by=.2), T1S0=seq(0, 1, by=.2), \n##D S0S1=seq(0, 1, by=.2))\n##D \n##D # Examine and plot the vector of generated ICA values:\n##D summary(SurICA)\n##D plot(SurICA)\n##D \n##D # Obtain the positive definite matrices than can be formed as based on the \n##D # specified (vectors) of the correlations (these matrices are used to \n##D # compute the ICA values)\n##D SurICA$Pos.Def\n##D \n##D # Same, but specify vectors for rho_T0S0 and rho_T1S1: Sample from\n##D # normal with mean .95 and SD=.05 (to account for uncertainty \n##D # in estimation) \n##D SurICA2 <- ICA.ContCont(T0S0=rnorm(n=10000000, mean=.95, sd=.05), \n##D T1S1=rnorm(n=10000000, mean=.95, sd=.05), \n##D T0T0=90, T1T1=100, S0S0=10, S1S1=15,\n##D T0T1=seq(0, 1, by=.2), T0S1=seq(0, 1, by=.2), T1S0=seq(0, 1, by=.2), \n##D S0S1=seq(0, 1, by=.2))\n##D \n##D # Examine results\n##D summary(SurICA2)\n##D plot(SurICA2)\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"ISTE.ContCont","snippet":"### Name: ISTE.ContCont\n### Title: Individual-level surrogate threshold effect for continuous\n### normally distributed surrogate and true endpoints.\n### Aliases: ISTE.ContCont\n### Keywords: Causal-Inference framework Counterfactuals Single-trial\n### setting Sensitivity Individual-level surrogate threshold effect ISTE\n\n### ** Examples\n\n# Define input for analysis using the Schizo dataset, \n# with S=BPRS and T = PANSS. \n# For each of the identifiable quantities,\n# uncertainty is accounted for by specifying a uniform\n# distribution with min, max values corresponding to\n# the 95% confidence interval of the quantity.\nT0S0 <- runif(min = 0.9524, max = 0.9659, n = 1000)\nT1S1 <- runif(min = 0.9608, max = 0.9677, n = 1000)\n\nS0S0 <- runif(min=160.811, max=204.5009, n=1000)\nS1S1 <- runif(min=168.989, max = 194.219, n=1000)\nT0T0 <- runif(min=484.462, max = 616.082, n=1000)\nT1T1 <- runif(min=514.279, max = 591.062, n=1000)\n\nMean_T0 <- runif(min=-13.455, max=-9.489, n=1000)\nMean_T1 <- runif(min=-17.17, max=-14.86, n=1000)\nMean_S0 <- runif(min=-7.789, max=-5.503, n=1000)\nMean_S1 <- runif(min=-9.600, max=-8.276, n=1000)\n\n# Do the ISTE analysis\n## Not run: \n##D ISTE <- ISTE.ContCont(Mean_T1=Mean_T1, Mean_T0=Mean_T0, \n##D Mean_S1=Mean_S1, Mean_S0=Mean_S0, N=2128, Delta_S=c(-50:50), \n##D zeta.PI=0.05, PI.Bound=0, Show.Prediction.Plots=TRUE,\n##D Save.Plots=\"No\", T0S0=T0S0, T1S1=T1S1, T0T0=T0T0, T1T1=T1T1, \n##D S0S0=S0S0, S1S1=S1S1)\n##D \n##D # Examine results:\n##D summary(ISTE)\n##D \n##D # Plots of results. \n##D # Plot ISTE\n##D plot(ISTE)\n##D # Other plots, see plot.ISTE.ContCont for details\n##D plot(ISTE, Outcome=\"MSE\")\n##D plot(ISTE, Outcome=\"gamma0\")\n##D plot(ISTE, Outcome=\"gamma1\")\n##D plot(ISTE, Outcome=\"Exp.DeltaT\")\n##D plot(ISTE, Outcome=\"Exp.DeltaT.Low.PI\")\n##D plot(ISTE, Outcome=\"Exp.DeltaT.Up.PI\")\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"LongToWide","snippet":"### Name: LongToWide\n### Title: Reshapes a dataset from the 'long' format (i.e., multiple lines\n### per patient) into the 'wide' format (i.e., one line per patient)\n### Aliases: LongToWide\n### Keywords: Transpose dataset\n\n### ** Examples\n\n# Generate a dataset in the 'long' format that contains \n# S and T values for 100 patients\nOutcome <- rep(x=c(0, 1), times=100)\nID <- rep(seq(1:100), each=2)\nTreat <- rep(seq(c(0,1)), each=100)\nOutcomes <- as.numeric(matrix(rnorm(1*200, mean=100, sd=10), \n ncol=200))\nData <- data.frame(cbind(Outcome, ID, Treat, Outcomes))\n\n# Reshapes the Data object \nLongToWide(Dataset=Data, OutcomeIndicator=Outcome, IdIndicator=ID, \n TreatIndicator=Treat, OutcomeValue=Outcomes)\n\n\n"} {"package":"Surrogate","topic":"MICA.Sample.ContCont","snippet":"### Name: MICA.Sample.ContCont\n### Title: Assess surrogacy in the causal-inference multiple-trial setting\n### (Meta-analytic Individual Causal Association; MICA) in the\n### continuous-continuous case using the grid-based sample approach\n### Aliases: MICA.Sample.ContCont\n### Keywords: Causal-Inference framework Counterfactuals Multiple-trial\n### setting Sensitivity ICA MICA\n\n### ** Examples\n\n## Not run: \n##D #Time consuming (>5 sec) code part\n##D # Generate the vector of MICA values when R_trial=.8, rho_T0S0=rho_T1S1=.8,\n##D # sigma_T0T0=90, sigma_T1T1=100,sigma_ S0S0=10, sigma_S1S1=15, D.aa=5, D.bb=10, \n##D # and when the grid of values {-1, -0.999, ..., 1} is considered for the \n##D # correlations between the counterfactuals:\n##D SurMICA <- MICA.Sample.ContCont(Trial.R=.80, D.aa=5, D.bb=10, T0S0=.8, T1S1=.8,\n##D T0T0=90, T1T1=100, S0S0=10, S1S1=15, T0T1=seq(-1, 1, by=.001), \n##D T0S1=seq(-1, 1, by=.001), T1S0=seq(-1, 1, by=.001),\n##D S0S1=seq(-1, 1, by=.001), M=10000)\n##D \n##D # Examine and plot the vector of the generated MICA values:\n##D summary(SurMICA)\n##D plot(SurMICA, ICA=FALSE, MICA=TRUE)\n##D \n##D \n##D # Same analysis, but now assume that D.aa=.5 and D.bb=.1:\n##D SurMICA <- MICA.Sample.ContCont(Trial.R=.80, D.aa=.5, D.bb=.1, T0S0=.8, T1S1=.8,\n##D T0T0=90, T1T1=100, S0S0=10, S1S1=15, T0T1=seq(-1, 1, by=.001), \n##D T0S1=seq(-1, 1, by=.001), T1S0=seq(-1, 1, by=.001),\n##D S0S1=seq(-1, 1, by=.001), M=10000)\n##D \n##D # Examine and plot the vector of the generated MICA values:\n##D summary(SurMICA)\n##D plot(SurMICA)\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"MICA.ContCont","snippet":"### Name: MICA.ContCont\n### Title: Assess surrogacy in the causal-inference multiple-trial setting\n### (Meta-analytic Individual Causal Association; MICA) in the\n### continuous-continuous case\n### Aliases: MICA.ContCont\n### Keywords: Causal-Inference framework Counterfactuals Multiple-trial\n### setting Sensitivity ICA MICA\n\n### ** Examples\n\n## Not run: \n##D #time-consuming code parts\n##D # Generate the vector of MICA values when R_trial=.8, rho_T0S0=rho_T1S1=.8,\n##D # sigma_T0T0=90, sigma_T1T1=100,sigma_ S0S0=10, sigma_S1S1=15, D.aa=5, D.bb=10, \n##D # and when the grid of values {0, .2, ..., 1} is considered for the \n##D # correlations between the counterfactuals:\n##D SurMICA <- MICA.ContCont(Trial.R=.80, D.aa=5, D.bb=10, T0S0=.8, T1S1=.8,\n##D T0T0=90, T1T1=100, S0S0=10, S1S1=15, T0T1=seq(0, 1, by=.2), \n##D T0S1=seq(0, 1, by=.2), T1S0=seq(0, 1, by=.2), S0S1=seq(0, 1, by=.2))\n##D \n##D # Examine and plot the vector of the generated MICA values:\n##D summary(SurMICA)\n##D plot(SurMICA)\n##D \n##D \n##D # Same analysis, but now assume that D.aa=.5 and D.bb=.1:\n##D SurMICA <- MICA.ContCont(Trial.R=.80, D.aa=.5, D.bb=.1, T0S0=.8, T1S1=.8,\n##D T0T0=90, T1T1=100, S0S0=10, S1S1=15, T0T1=seq(0, 1, by=.2), \n##D T0S1=seq(0, 1, by=.2), T1S0=seq(0, 1, by=.2), S0S1=seq(0, 1, by=.2))\n##D \n##D # Examine and plot the vector of the generated MICA values:\n##D summary(SurMICA)\n##D plot(SurMICA)\n##D \n##D \n##D # Same as first analysis, but specify vectors for rho_T0S0 and rho_T1S1: \n##D # Sample from normal with mean .8 and SD=.1 (to account for uncertainty \n##D # in estimation) \n##D SurMICA <- MICA.ContCont(Trial.R=.80, D.aa=5, D.bb=10, \n##D T0S0=rnorm(n=10000000, mean=.8, sd=.1), \n##D T1S1=rnorm(n=10000000, mean=.8, sd=.1),\n##D T0T0=90, T1T1=100, S0S0=10, S1S1=15, T0T1=seq(0, 1, by=.2), \n##D T0S1=seq(0, 1, by=.2), T1S0=seq(0, 1, by=.2), S0S1=seq(0, 1, by=.2))\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"MarginalProbs","snippet":"### Name: MarginalProbs\n### Title: Computes marginal probabilities for a dataset where the\n### surrogate and true endpoints are binary\n### Aliases: MarginalProbs\n### Keywords: Marginal probabilities\n\n### ** Examples\n\n# Open the ARMD dataset and recode Diff24 and Diff52 as 1\n# when the original value is above 0, and 0 otherwise\ndata(ARMD)\nARMD$Diff24_Dich <- ifelse(ARMD$Diff24>0, 1, 0)\nARMD$Diff52_Dich <- ifelse(ARMD$Diff52>0, 1, 0)\n\n# Obtain marginal probabilities and ORs\nMarginalProbs(Dataset=ARMD, Surr=Diff24_Dich, True=Diff52_Dich, \nTreat=Treat)\n\n\n\n\n\n"} {"package":"Surrogate","topic":"MaxEntICABinBin","snippet":"### Name: MaxEntICABinBin\n### Title: Use the maximum-entropy approach to compute ICA in the\n### binary-binary setting\n### Aliases: MaxEntICABinBin\n### Keywords: Causal-Inference framework Counterfactuals BinBin Maximum\n### Entropy\n\n### ** Examples\n\n# Sensitivity-based ICA results using ICA.BinBin.Grid.Sample\nICA <- ICA.BinBin.Grid.Sample(pi1_1_=0.341, pi0_1_=0.119, pi1_0_=0.254,\npi_1_1=0.686, pi_1_0=0.088, pi_0_1=0.078, Seed=1, \nMonotonicity=c(\"No\"), M=5000)\n\n# Maximum-entropy based ICA\nMaxEnt <- MaxEntICABinBin(pi1_1_=0.341, pi0_1_=0.119, pi1_0_=0.254,\npi_1_1=0.686, pi_1_0=0.088, pi_0_1=0.078)\n\n# Explore maximum-entropy results\nsummary(MaxEnt)\n\n# Plot results\nplot(x=MaxEnt, ICA.Fit=ICA)\n\n\n"} {"package":"Surrogate","topic":"MaxEntContCont","snippet":"### Name: MaxEntContCont\n### Title: Use the maximum-entropy approach to compute ICA in the\n### continuous-continuous sinlge-trial setting\n### Aliases: MaxEntContCont\n### Keywords: Causal-Inference framework Counterfactuals ContCont Maximum\n### Entropy\n\n### ** Examples\n\n## Not run: \n##D #time-consuming code parts\n##D # Compute ICA for ARMD dataset, using the grid \n##D # G={-1, -.80, ..., 1} for the undidentifiable correlations\n##D \n##D ICA <- ICA.ContCont(T0S0 = 0.769, T1S1 = 0.712, S0S0 = 188.926, \n##D S1S1 = 132.638, T0T0 = 264.797, T1T1 = 231.771, \n##D T0T1 = seq(-1, 1, by = 0.2), T0S1 = seq(-1, 1, by = 0.2), \n##D T1S0 = seq(-1, 1, by = 0.2), S0S1 = seq(-1, 1, by = 0.2))\n##D \n##D # Identify the maximum entropy ICA\n##D MaxEnt_ARMD <- MaxEntContCont(x = ICA, S0S0 = 188.926, \n##D S1S1 = 132.638, T0T0 = 264.797, T1T1 = 231.771)\n##D \n##D # Explore results using summary() and plot() functions\n##D summary(MaxEnt_ARMD)\n##D plot(MaxEnt_ARMD)\n##D plot(MaxEnt_ARMD, Entropy.By.ICA = TRUE)\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"MaxEntSPFBinBin","snippet":"### Name: MaxEntSPFBinBin\n### Title: Use the maximum-entropy approach to compute SPF (surrogate\n### predictive function) in the binary-binary setting\n### Aliases: MaxEntSPFBinBin\n### Keywords: Causal-Inference framework Counterfactuals BinBin Maximum\n### Entropy\n\n### ** Examples\n\n# Sensitivity-based ICA results using ICA.BinBin.Grid.Sample\nICA <- ICA.BinBin.Grid.Sample(pi1_1_=0.341, pi0_1_=0.119, pi1_0_=0.254,\npi_1_1=0.686, pi_1_0=0.088, pi_0_1=0.078, Seed=1, \nMonotonicity=c(\"No\"), M=5000)\n\n# Sensitivity-based SPF\nSPFSens <- SPF.BinBin(ICA)\n\n# Maximum-entropy based SPF\nSPFMaxEnt <- MaxEntSPFBinBin(pi1_1_=0.341, pi0_1_=0.119, pi1_0_=0.254,\npi_1_1=0.686, pi_1_0=0.088, pi_0_1=0.078)\n\n# Explore maximum-entropy results\nsummary(SPFMaxEnt)\n\n# Plot results\nplot(x=SPFMaxEnt, SPF.Fit=SPFSens)\n\n\n"} {"package":"Surrogate","topic":"MinSurrContCont","snippet":"### Name: MinSurrContCont\n### Title: Examine the plausibility of finding a good surrogate endpoint in\n### the Continuous-continuous case\n### Aliases: MinSurrContCont\n### Keywords: Plausibility of a good surrogate\n\n### ** Examples\n\n# Assess the plausibility of finding a good surrogate when\n# sigma_T0T0 = sigma_T1T1 = 8 and Delta = 1\n## Not run: \n##D MinSurr <- MinSurrContCont(T0T0 = 8, T1T1 = 8, Delta = 1)\n##D summary(MinSurr)\n##D plot(MinSurr)\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"MixedContContIT","snippet":"### Name: MixedContContIT\n### Title: Fits (univariate) mixed-effect models to assess surrogacy in the\n### continuous-continuous case based on the Information-Theoretic\n### framework\n### Aliases: MixedContContIT\n### Keywords: Multiple-trial setting Information-theoretic framework\n### Trial-level surrogacy Individual-level surrogacy Likelihood Reduction\n### Factor (LRF) Mixed-effect models Continuous endpoint\n\n### ** Examples\n\n# Example 1\n# Based on the ARMD data:\ndata(ARMD)\n# Assess surrogacy based on a full mixed-effect model\n# in the information-theoretic framework:\nSur <- MixedContContIT(Dataset=ARMD, Surr=Diff24, True=Diff52, Treat=Treat, Trial.ID=Center,\nPat.ID=Id, Model=\"Full\")\n# Obtain a summary of the results:\nsummary(Sur)\n\n## Not run: \n##D # Time consuming (>5sec) code part\n##D # Example 2\n##D # Conduct an analysis based on a simulated dataset with 2000 patients, 200 trials,\n##D # and Rindiv=Rtrial=.8\n##D # Simulate the data:\n##D Sim.Data.MTS(N.Total=2000, N.Trial=200, R.Trial.Target=.8, R.Indiv.Target=.8,\n##D Seed=123, Model=\"Full\")\n##D # Assess surrogacy based on a full mixed-effect model\n##D # in the information-theoretic framework:\n##D Sur2 <- MixedContContIT(Dataset=Data.Observed.MTS, Surr=Surr, True=True, Treat=Treat,\n##D Trial.ID=Trial.ID, Pat.ID=Pat.ID, Model=\"Full\")\n##D \n##D # Show a summary of the results:\n##D summary(Sur2)\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"Ovarian","snippet":"### Name: Ovarian\n### Title: The Ovarian dataset\n### Aliases: Ovarian\n### Keywords: datasets\n\n### ** Examples\n\ndata(Ovarian)\nstr(Ovarian)\nhead(Ovarian)\n\n\n"} {"package":"Surrogate","topic":"PPE.BinBin","snippet":"### Name: PPE.BinBin\n### Title: Evaluate a surrogate predictive value based on the minimum\n### probability of a prediction error in the setting where both S and T\n### are binary endpoints\n### Aliases: PPE.BinBin\n### Keywords: Causal-Inference framework Counterfactuals Single-trial\n### setting ICA RPE PPE\n\n### ** Examples\n\n# Conduct the analysis \n \n## Not run: \n##D # time consuming code part\n##D PPE.BinBin(pi1_1_=0.4215, pi0_1_=0.0538, pi1_0_=0.0538,\n##D pi_1_1=0.5088, pi_1_0=0.0307,pi_0_1=0.0482, \n##D Seed=1, M=10000) \n## End(Not run)\n\n\n\n"} {"package":"Surrogate","topic":"PROC.BinBin","snippet":"### Name: PROC.BinBin\n### Title: Evaluate the individual causal association (ICA) and reduction\n### in probability of a prediction error (RPE) in the setting where both\n### S and T are binary endpoints\n### Aliases: PROC.BinBin\n### Keywords: Causal-Inference framework Counterfactuals Single-trial\n### setting ICA RPE PPE\n\n### ** Examples\n\n# Conduct the analysis \n \n## Not run: \n##D # time consuming code part\n##D library(Surrogate)\n##D # load the CIGTS data \n##D data(CIGTS)\n##D CIGTS_25000<-PROC.BinBin(Dataset=CIGTS, Surr=IOP_12, True=IOP_96, \n##D Treat=Treat, BS=FALSE,seqs=250, MC_samples=100, Seed=1)\n## End(Not run)\n\n\n\n"} {"package":"Surrogate","topic":"plot FixedDiscrDiscrIT","snippet":"### Name: plot FixedDiscrDiscrIT\n### Title: Provides plots of trial-level surrogacy in the\n### Information-Theoretic framework\n### Aliases: 'plot FixedDiscrDiscrIT' plot.FixedDiscrDiscrIT\n### Keywords: Plot surrogacy Information-Theoretic framework Trial-level\n### surrogacy Individual-level surrogacy Multiple-trial setting\n\n### ** Examples\n\n## Not run: \n##D # Time consuming (>5sec) code part\n##D # Simulate the data:\n##D Sim.Data.MTS(N.Total=2000, N.Trial=100, R.Trial.Target=.8, R.Indiv.Target=.8,\n##D Seed=123, Model=\"Full\")\n##D \n##D # create a binary true and ordinal surrogate outcome\n##D Data.Observed.MTS$True<-findInterval(Data.Observed.MTS$True, \n##D c(quantile(Data.Observed.MTS$True,0.5)))\n##D Data.Observed.MTS$Surr<-findInterval(Data.Observed.MTS$Surr, \n##D c(quantile(Data.Observed.MTS$Surr,0.333),quantile(Data.Observed.MTS$Surr,0.666)))\n##D \n##D # Assess surrogacy based on a full fixed-effect model\n##D # in the information-theoretic framework for a binary surrogate and ordinal true outcome:\n##D SurEval <- FixedDiscrDiscrIT(Dataset=Data.Observed.MTS, Surr=Surr, True=True, Treat=Treat,\n##D Trial.ID=Trial.ID, Setting=\"ordbin\")\n##D \n##D ## Request trial-level surrogacy plot. In the trial-level plot,\n##D ## make the size of the circles proportional to the number of patients in a trial:\n##D plot(SurEval, Weighted=FALSE)\n##D \n## End(Not run)\n\n"} {"package":"Surrogate","topic":"plot PredTrialTContCont","snippet":"### Name: plot PredTrialTContCont\n### Title: Plots the expected treatment effect on the true endpoint in a\n### new trial (when both S and T are normally distributed continuous\n### endpoints)\n### Aliases: 'plot PredTrialTContCont' plot.PredTrialTContCont\n### Keywords: New trial Predict treatment effect T\n\n### ** Examples\n\n## Not run: \n##D # time consuming code part\n##D # Generate dataset\n##D Sim.Data.MTS(N.Total=2000, N.Trial=15, R.Trial.Target=.95, \n##D R.Indiv.Target=.8, D.aa=10, D.bb=50, \n##D Fixed.Effects=c(1, 2, 30, 90), Seed=1)\n##D \n##D # Evaluate surrogacy using a reduced bivariate mixed-effects model\n##D BimixedFit <- BimixedContCont(Dataset = Data.Observed.MTS, \n##D Surr = Surr, True = True, Treat = Treat, Trial.ID = Trial.ID, \n##D Pat.ID = Pat.ID, Model=\"Reduced\")\n##D \n##D # Suppose that in a new trial, it was estimated alpha_0 = 30\n##D # predict beta_0 in this trial\n##D Pred_Beta <- Pred.TrialT.ContCont(Object = BimixedFit, \n##D alpha_0 = 30)\n##D \n##D # Examine the results\n##D summary(Pred_Beta)\n##D \n##D # Plot the results\n##D plot(Pred_Beta)\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"Pos.Def.Matrices","snippet":"### Name: Pos.Def.Matrices\n### Title: Generate 4 by 4 correlation matrices and flag the positive\n### definite ones\n### Aliases: Pos.Def.Matrices\n\n### ** Examples\n\n## Generate all 4x4 matrices that can be formed using rho(T0,S0)=rho(T1,S1)=.5\n## and the grid of values 0, .2, ..., 1 for the other off-diagonal correlations: \nPos.Def.Matrices(T0T1=seq(0, 1, by=.2), T0S0=.5, T0S1=seq(0, 1, by=.2), \nT1S0=seq(0, 1, by=.2), T1S1=.5, S0S1=seq(0, 1, by=.2))\n\n## Examine the first 10 rows of the the object Generated.Matrices:\nGenerated.Matrices[1:10,]\n\n## Check how many of the generated matrices are positive definite\n## (counts and percentages):\ntable(Generated.Matrices$Pos.Def.Status)\ntable(Generated.Matrices$Pos.Def.Status)/nrow(Generated.Matrices)\n\n## Make an object PosDef which contains the positive definite matrices:\nPosDef <- Generated.Matrices[Generated.Matrices$Pos.Def.Status==1,]\n\n## Shows the 10 first matrices that are positive definite:\nPosDef[1:10,]\n\n\n"} {"package":"Surrogate","topic":"Pred.TrialT.ContCont","snippet":"### Name: Pred.TrialT.ContCont\n### Title: Compute the expected treatment effect on the true endpoint in a\n### new trial (when both S and T are normally distributed continuous\n### endpoints)\n### Aliases: Pred.TrialT.ContCont\n### Keywords: New trial Predict treatment effect T\n\n### ** Examples\n\n## Not run: \n##D #time-consuming code parts\n##D # Generate dataset\n##D Sim.Data.MTS(N.Total=2000, N.Trial=15, R.Trial.Target=.8, \n##D R.Indiv.Target=.8, D.aa=10, D.bb=50, Fixed.Effects=c(1, 2, 30, 90), \n##D Seed=1)\n##D \n##D # Evaluate surrogacy using a reduced bivariate mixed-effects model\n##D BimixedFit <- BimixedContCont(Dataset = Data.Observed.MTS, Surr = Surr, \n##D True = True, Treat = Treat, Trial.ID = Trial.ID, Pat.ID = Pat.ID, \n##D Model=\"Reduced\")\n##D \n##D # Suppose that in a new trial, it was estimated alpha_0 = 30\n##D # predict beta_0 in this trial\n##D Pred_Beta <- Pred.TrialT.ContCont(Object = BimixedFit, \n##D alpha_0 = 30)\n##D \n##D # Examine the results\n##D summary(Pred_Beta)\n##D \n##D # Plot the results\n##D plot(Pred_Beta)\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"Prentice","snippet":"### Name: Prentice\n### Title: Evaluates surrogacy based on the Prentice criteria for\n### continuous endpoints (single-trial setting)\n### Aliases: Prentice\n### Keywords: Single-trial setting Prentice criteria\n\n### ** Examples\n\n\n## Load the ARMD dataset\ndata(ARMD)\n\n## Evaluate the Prentice criteria in the ARMD dataset \nPrent <- Prentice(Dataset=ARMD, Surr=Diff24, True=Diff52, Treat=Treat, Pat.ID=Id)\n\n# Summary of results\nsummary(Prent)\n\n\n"} {"package":"Surrogate","topic":"RandVec","snippet":"### Name: RandVec\n### Title: Generate random vectors with a fixed sum\n### Aliases: RandVec\n### Keywords: RandVec\n\n### ** Examples\n\n# generate two vectors with 10 values ranging between 0 and 1\n# where each vector sums to 1\n# (uniform distribution over the whole n-cube)\nVectors <- RandVec(a=0, b=1, s=1, n=10, m=2)\nsum(Vectors$RandVecOutput[,1])\nsum(Vectors$RandVecOutput[,2])\n\n\n"} {"package":"Surrogate","topic":"Restrictions.BinBin","snippet":"### Name: Restrictions.BinBin\n### Title: Examine restrictions in pi_{f} under different montonicity\n### assumptions for binary S and T\n### Aliases: Restrictions.BinBin\n### Keywords: Causal-Inference framework Counterfactuals Single-trial\n### setting\n\n### ** Examples\n\nRestrictions.BinBin(pi1_1_=0.262, pi0_1_=0.135, pi1_0_=0.286, \npi_1_1=0.637, pi_1_0=0.078, pi_0_1=0.127)\n\n\n"} {"package":"Surrogate","topic":"SPF.BinBin","snippet":"### Name: SPF.BinBin\n### Title: Evaluate the surrogate predictive function (SPF) in the\n### binary-binary setting (sensitivity-analysis based approach)\n### Aliases: SPF.BinBin\n### Keywords: Causal-Inference framework Counterfactuals BinBin Sensitivity\n### SPF\n\n### ** Examples\n\n# Use ICA.BinBin.Grid.Sample to obtain plausible values for pi\nICA_BINBIN_Grid_Sample <- ICA.BinBin.Grid.Sample(pi1_1_=0.341, pi0_1_=0.119,\npi1_0_=0.254, pi_1_1=0.686, pi_1_0=0.088, pi_0_1=0.078, Seed=1,\nMonotonicity=c(\"General\"), M=2500)\n\n# Obtain SPF\nSPF <- SPF.BinBin(ICA_BINBIN_Grid_Sample)\n\n# examine results\nsummary(SPF)\nplot(SPF)\n\n\n"} {"package":"Surrogate","topic":"SPF.BinCont","snippet":"### Name: SPF.BinCont\n### Title: Evaluate the surrogate predictive function (SPF) in the\n### binary-continuous setting (sensitivity-analysis based approach)\n### Aliases: SPF.BinCont\n### Keywords: Causal-Inference framework Counterfactuals BinCont\n### Sensitivity SPF\n\n### ** Examples\n\n## Not run: \n##D # time consuming code part\n##D # Use ICA.BinCont to examine surrogacy\n##D data(Schizo_BinCont)\n##D Result_BinCont <- ICA.BinCont(M = 1000, Dataset = Schizo_BinCont,\n##D Surr = PANSS, True = CGI_Bin, Treat=Treat, Diff.Sigma=TRUE)\n##D \n##D # Obtain SPF\n##D Fit <- SPF.BinCont(x=Result_BinCont, a = -30, b = -3)\n##D \n##D # examine results\n##D summary(Fit1)\n##D plot(Fit1)\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"Sim.Data.Counterfactuals","snippet":"### Name: Sim.Data.Counterfactuals\n### Title: Simulate a dataset that contains counterfactuals\n### Aliases: Sim.Data.Counterfactuals\n### Keywords: Causal-Inference framework Simulate data Counterfactuals\n\n### ** Examples\n\n## Generate a dataset with 2000 patients, cor(S0,T0)=cor(S1,T1)=.5, \n## cor(T0,T1)=cor(T0,S1)=cor(T1,S0)=cor(S0,S1)=0, with means \n## 5, 9, 12, and 15 for S0, S1, T0, and T1, respectively:\nSim.Data.Counterfactuals(N=2000, T0S0=.5, T1S1=.5, T0T1=0, T0S1=0, T1S0=0, S0S1=0, \nmu_c=c(5, 9, 12, 15), Seed=1)\n\n\n"} {"package":"Surrogate","topic":"Sim.Data.CounterfactualsBinBin","snippet":"### Name: Sim.Data.CounterfactualsBinBin\n### Title: Simulate a dataset that contains counterfactuals for binary\n### endpoints\n### Aliases: Sim.Data.CounterfactualsBinBin\n### Keywords: Causal-Inference framework Simulate data Counterfactuals\n### Binary Binary setting\n\n### ** Examples\n\n## Generate a dataset with 2000 patients, and values 1/16\n## for all proabilities between the counterfactuals:\nSim.Data.CounterfactualsBinBin(N.Total=2000)\n\n\n"} {"package":"Surrogate","topic":"Sim.Data.MTS","snippet":"### Name: Sim.Data.MTS\n### Title: Simulates a dataset that can be used to assess surrogacy in the\n### multiple-trial setting\n### Aliases: Sim.Data.MTS\n### Keywords: Simulate data Multiple-trial setting\n\n### ** Examples\n\n# Simulate a dataset with 2000 patients, 50 trials, Rindiv=Rtrial=.8, D.aa=10,\n# D.bb=50, and fixed effect values 1, 2, 30, and 90:\nSim.Data.MTS(N.Total=2000, N.Trial=50, R.Trial.Target=.8, R.Indiv.Target=.8, D.aa=10, \nD.bb=50, Fixed.Effects=c(1, 2, 30, 90), Seed=1) \n\n# Sample output, the first 10 rows of Data.Observed.MTS:\nData.Observed.MTS[1:10,]\n\n# Note: When the following code is used to generate a dataset:\nSim.Data.MTS(N.Total=2000, N.Trial=99, R.Trial.Target=.5, R.Indiv.Target=.8, \nD.aa=10, D.bb=50, Fixed.Effects=c(1, 2, 30, 90), Seed=1) \n\n# R gives the following warning: \n\n# > NOTE: The number of patients per trial requested in the function call \n# > equals 20.20202 (=N.Total/N.Trial), which is not a whole number. \n# > To obtain a dataset where the number of patients per trial is balanced for \n# > all trials, the number of patients per trial was rounded to 21 to generate \n# > the dataset. Data.Observed.MTS thus contains a total of 2079 patients rather \n# > than the requested 2000 in the function call.\n\n\n"} {"package":"Surrogate","topic":"Sim.Data.STS","snippet":"### Name: Sim.Data.STS\n### Title: Simulates a dataset that can be used to assess surrogacy in the\n### single-trial setting\n### Aliases: Sim.Data.STS\n### Keywords: Simulate data Single-trial setting\n\n### ** Examples\n\n# Simulate a dataset: \nSim.Data.STS(N.Total=2000, R.Indiv.Target=.8, Means=c(1, 5, 20, 37), Seed=1) \n\n\n"} {"package":"Surrogate","topic":"Sim.Data.STSBinBin","snippet":"### Name: Sim.Data.STSBinBin\n### Title: Simulates a dataset that can be used to assess surrogacy in the\n### single trial setting when S and T are binary endpoints\n### Aliases: Sim.Data.STSBinBin\n### Keywords: Causal-Inference framework Simulate data Counterfactuals\n### Binary Binary setting\n\n### ** Examples\n\n## Generate a dataset with 2000 patients, \n## assuming no monotonicity:\nSim.Data.STSBinBin(Monotonicity=c(\"No\"), N.Total=200)\n\n\n"} {"package":"Surrogate","topic":"Single.Trial.RE.AA","snippet":"### Name: Single.Trial.RE.AA\n### Title: Conducts a surrogacy analysis based on the single-trial\n### meta-analytic framework\n### Aliases: Single.Trial.RE.AA plot.Single.Trial.RE.AA\n### Keywords: Single-trial setting Meta-analytic framework Individual-level\n### surrogacy Trial-level surrogacy Relative effect Adjusted Association\n\n### ** Examples\n\n## Not run: \n##D # time consuming code part\n##D # Example 1, based on the ARMD data:\n##D data(ARMD)\n##D \n##D # Assess surrogacy based on the single-trial meta-analytic approach:\n##D Sur <- Single.Trial.RE.AA(Dataset=ARMD, Surr=Diff24, True=Diff52, Treat=Treat, Pat.ID=Id)\n##D \n##D # Obtain a summary and plot of the results\n##D summary(Sur)\n##D plot(Sur)\n##D \n##D \n##D # Example 2\n##D # Conduct an analysis based on a simulated dataset with 2000 patients \n##D # and Rindiv=.8\n##D # Simulate the data:\n##D Sim.Data.STS(N.Total=2000, R.Indiv.Target=.8, Seed=123)\n##D \n##D # Assess surrogacy:\n##D Sur2 <- Single.Trial.RE.AA(Dataset=Data.Observed.STS, Surr=Surr, True=True, Treat=Treat, \n##D Pat.ID=Pat.ID)\n##D \n##D # Show a summary and plots of results\n##D summary(Sur2)\n##D plot(Sur2)\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"SurvSurv","snippet":"### Name: SurvSurv\n### Title: Assess surrogacy for two survival endpoints based on information\n### theory and a two-stage approach\n### Aliases: SurvSurv\n### Keywords: Multiple-trial setting Information-theoretic framework\n### Trial-level surrogacy Individual-level surrogacy Cox proportional\n### hazards model Survival endpoints Time-to-event endpoints\n\n### ** Examples\n\n# Open Ovarian dataset\ndata(Ovarian)\n\n# Conduct analysis\nFit <- SurvSurv(Dataset = Ovarian, Surr = Pfs, SurrCens = PfsInd,\nTrue = Surv, TrueCens = SurvInd, Treat = Treat, \nTrial.ID = Center)\n\n# Examine results \nplot(Fit)\nsummary(Fit)\n\n\n"} {"package":"Surrogate","topic":"Test.Mono","snippet":"### Name: Test.Mono\n### Title: Test whether the data are compatible with monotonicity for S\n### and/or T (binary endpoints)\n### Aliases: Test.Mono\n### Keywords: Monotonicity Test Monotonicity\n\n### ** Examples\n\nTest.Mono(pi1_1_=0.2619048, pi1_0_=0.2857143, pi_1_1=0.6372549, \npi_1_0=0.07843137, pi0_1_=0.1349206, pi_0_1=0.127451)\n\n\n"} {"package":"Surrogate","topic":"TrialLevelIT","snippet":"### Name: TrialLevelIT\n### Title: Estimates trial-level surrogacy in the information-theoretic\n### framework\n### Aliases: TrialLevelIT\n### Keywords: Multiple-trial setting Information-theoretic framework\n### Trial-level surrogacy\n\n### ** Examples\n\n# Generate vector treatment effects on S\nset.seed(seed = 1)\nAlpha.Vector <- seq(from = 5, to = 10, by=.1) + runif(min = -.5, max = .5, n = 51)\n\n# Generate vector treatment effects on T\nset.seed(seed=2)\nBeta.Vector <- (Alpha.Vector * 3) + runif(min = -5, max = 5, n = 51)\n\n# Apply the function to estimate R^2_{h.t}\nFit <- TrialLevelIT(Alpha.Vector=Alpha.Vector,\nBeta.Vector=Beta.Vector, N.Trial=50, Model=\"Reduced\")\n\nsummary(Fit)\nplot(Fit)\n\n\n"} {"package":"Surrogate","topic":"TrialLevelMA","snippet":"### Name: TrialLevelMA\n### Title: Estimates trial-level surrogacy in the meta-analytic framework\n### Aliases: TrialLevelMA\n### Keywords: Multiple-trial setting Meta-analytic framework Trial-level\n### surrogacy\n\n### ** Examples\n\n# Generate vector treatment effects on S\nset.seed(seed = 1)\nAlpha.Vector <- seq(from = 5, to = 10, by=.1) + runif(min = -.5, max = .5, n = 51)\n# Generate vector treatment effects on T\nset.seed(seed=2)\nBeta.Vector <- (Alpha.Vector * 3) + runif(min = -5, max = 5, n = 51)\n# Vector of sample sizes of the trials (here, all n_i=10)\nN.Vector <- rep(10, times=51)\n\n# Apply the function to estimate R^2_{trial}\nFit <- TrialLevelMA(Alpha.Vector=Alpha.Vector,\nBeta.Vector=Beta.Vector, N.Vector=N.Vector)\n\n# Plot the results and obtain summary\nplot(Fit)\nsummary(Fit)\n\n\n"} {"package":"Surrogate","topic":"TwoStageSurvSurv","snippet":"### Name: TwoStageSurvSurv\n### Title: Assess trial-level surrogacy for two survival endpoints using a\n### two-stage approach\n### Aliases: TwoStageSurvSurv\n### Keywords: Multiple-trial setting Information-theoretic framework\n### Trial-level surrogacy Cox proportional hazards model Survival\n### endpoints Time-to-event endpoints\n\n### ** Examples\n\n# Open Ovarian dataset\ndata(Ovarian)\n\n# Conduct analysis\nResults <- TwoStageSurvSurv(Dataset = Ovarian, Surr = Pfs, SurrCens = PfsInd, \nTrue = Surv, TrueCens = SurvInd, Treat = Treat, Trial.ID = Center)\n\n# Examine results of analysis\nsummary(Results)\nplot(Results)\n\n\n"} {"package":"Surrogate","topic":"UnifixedContCont","snippet":"### Name: UnifixedContCont\n### Title: Fits univariate fixed-effect models to assess surrogacy in the\n### meta-analytic multiple-trial setting (continuous-continuous case)\n### Aliases: UnifixedContCont\n### Keywords: Multiple-trial setting Meta-analytic framework Trial-level\n### surrogacy Individual-level surrogacy Fixed-effect models\n\n### ** Examples\n\n## Not run: \n##D #Time consuming (>5 sec) code parts\n##D # Example 1, based on the ARMD data\n##D data(ARMD)\n##D \n##D # Fit a full univariate fixed-effects model with weighting according to the \n##D # number of patients in stage 2 of the two stage approach to assess surrogacy:\n##D Sur <- UnifixedContCont(Dataset=ARMD, Surr=Diff24, True=Diff52, Treat=Treat, Trial.ID=Center, \n##D Pat.ID=Id, Model=\"Full\", Weighted=TRUE)\n##D \n##D # Obtain a summary and plot of the results\n##D summary(Sur)\n##D plot(Sur)\n##D \n##D # Example 2\n##D # Conduct an analysis based on a simulated dataset with 2000 patients, 100 trials, \n##D # and Rindiv=Rtrial=.8\n##D # Simulate the data:\n##D Sim.Data.MTS(N.Total=2000, N.Trial=100, R.Trial.Target=.8, R.Indiv.Target=.8,\n##D Seed=123, Model=\"Reduced\")\n##D \n##D # Fit a reduced univariate fixed-effects model without weighting to assess\n##D # surrogacy:\n##D Sur2 <- UnifixedContCont(Dataset=Data.Observed.MTS, Surr=Surr, True=True, Treat=Treat, \n##D Trial.ID=Trial.ID, Pat.ID=Pat.ID, Model=\"Reduced\", Weighted=FALSE)\n##D \n##D # Show a summary and plots of results:\n##D summary(Sur2)\n##D plot(Sur2, Weighted=FALSE)\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"UnimixedContCont","snippet":"### Name: UnimixedContCont\n### Title: Fits univariate mixed-effect models to assess surrogacy in the\n### meta-analytic multiple-trial setting (continuous-continuous case)\n### Aliases: UnimixedContCont\n### Keywords: Multiple-trial setting Meta-analytic framework Trial-level\n### surrogacy Individual-level surrogacy Mixed-effect models\n\n### ** Examples\n\n\n## Not run: \n##D #Time consuming code part\n##D # Conduct an analysis based on a simulated dataset with 2000 patients, 100 trials, \n##D # and Rindiv=Rtrial=.8\n##D # Simulate the data:\n##D Sim.Data.MTS(N.Total=2000, N.Trial=100, R.Trial.Target=.8, R.Indiv.Target=.8,\n##D Seed=123, Model=\"Reduced\")\n##D \n##D # Fit a reduced univariate mixed-effects model without weighting to assess surrogacy:\n##D Sur <- UnimixedContCont(Dataset=Data.Observed.MTS, Surr=Surr, True=True, Treat=Treat, \n##D Trial.ID=Trial.ID, Pat.ID=Pat.ID, Model=\"Reduced\", Weighted=FALSE)\n##D \n##D # Show a summary and plots of the results:\n##D summary(Sur)\n##D plot(Sur, Weighted=FALSE)\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"comb27.BinBin","snippet":"### Name: comb27.BinBin\n### Title: Assesses the surrogate predictive value of each of the 27\n### prediction functions in the setting where both S and T are binary\n### endpoints\n### Aliases: comb27.BinBin\n### Keywords: Causal-Inference framework Counterfactuals Single-trial\n### setting ICA PPE\n\n### ** Examples\n\n# Conduct the analysis assuming no montonicity\n \n## Not run: \n##D # time consuming code part\n##D comb27.BinBin(pi1_1_ = 0.3412, pi1_0_ = 0.2539, pi0_1_ = 0.119, \n##D pi_1_1 = 0.6863, pi_1_0 = 0.0882, pi_0_1 = 0.0784, \n##D Seed=1,Monotonicity=c(\"No\"), M=500000) \n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"fit_model_SurvSurv","snippet":"### Name: fit_model_SurvSurv\n### Title: Fit Survival-Survival model\n### Aliases: fit_model_SurvSurv\n\n### ** Examples\n\nif(require(Surrogate)) {\n data(\"Ovarian\")\n #For simplicity, data is not recoded to semi-competing risks format, but is\n #left in the composite event format.\n data = data.frame(Ovarian$Pfs,\n Ovarian$Surv,\n Ovarian$Treat,\n Ovarian$PfsInd,\n Ovarian$SurvInd)\n Surrogate::fit_model_SurvSurv(data = data,\n copula_family = \"clayton\",\n n_knots = 1)\n}\n\n\n\n"} {"package":"Surrogate","topic":"marginal_gof_scr","snippet":"### Name: marginal_gof_scr\n### Title: Marginal survival function goodness of fit\n### Aliases: marginal_gof_scr\n\n### ** Examples\n\nlibrary(Surrogate)\ndata(\"Ovarian\")\n#For simplicity, data is not recoded to semi-competing risks format, but is\n#left in the composite event format.\ndata = data.frame(\n Ovarian$Pfs,\n Ovarian$Surv,\n Ovarian$Treat,\n Ovarian$PfsInd,\n Ovarian$SurvInd\n)\novarian_fitted =\n fit_model_SurvSurv(data = data,\n copula_family = \"clayton\",\n n_knots = 1)\ngrid = seq(from = 0, to = 2, length.out = 200)\nmarginal_gof_scr(ovarian_fitted, data, grid)\n\n\n\n"} {"package":"Surrogate","topic":"model_fit_measures","snippet":"### Name: model_fit_measures\n### Title: Goodness of fit information for survival-survival model\n### Aliases: model_fit_measures\n\n### ** Examples\n\nlibrary(Surrogate)\ndata(\"Ovarian\")\n#For simplicity, data is not recoded to semi-competing risks format, but is\n#left in the composite event format.\ndata = data.frame(\n Ovarian$Pfs,\n Ovarian$Surv,\n Ovarian$Treat,\n Ovarian$PfsInd,\n Ovarian$SurvInd\n)\novarian_fitted =\n fit_model_SurvSurv(data = data,\n copula_family = \"clayton\",\n n_knots = 1)\nmodel_fit_measures(ovarian_fitted)\n\n\n"} {"package":"Surrogate","topic":"new_vine_copula_ss_fit","snippet":"### Name: new_vine_copula_ss_fit\n### Title: Constructor for vine copula model\n### Aliases: new_vine_copula_ss_fit\n\n### ** Examples\n\n#should not be used be the user\n\n\n"} {"package":"Surrogate","topic":"plot Causal-Inference ContCont","snippet":"### Name: plot Causal-Inference ContCont\n### Title: Plots the (Meta-Analytic) Individual Causal Association when S\n### and T are continuous outcomes\n### Aliases: 'plot Causal-Inference ContCont' plot.ICA.ContCont\n### plot.MICA.ContCont\n### Keywords: Plot surrogacy Causal-Inference framework Single-trial\n### setting Multiple-trial setting Sensitivity Plausibility of a\n### surrogate\n\n### ** Examples\n\n# Plot of ICA\n\n# Generate the vector of ICA values when rho_T0S0=rho_T1S1=.95, and when the\n# grid of values {0, .2, ..., 1} is considered for the correlations\n# between the counterfactuals:\nSurICA <- ICA.ContCont(T0S0=.95, T1S1=.95, T0T1=seq(0, 1, by=.2), T0S1=seq(0, 1, by=.2), \nT1S0=seq(0, 1, by=.2), S0S1=seq(0, 1, by=.2))\n\n# Plot the results:\nplot(SurICA)\n\n# Same plot but add the percentages of ICA values that are equal to or larger \n# than the midpoint values of the bins\nplot(SurICA, Labels=TRUE)\n\n# Plot of both ICA and MICA\n\n# Generate the vector of ICA and MICA values when R_trial=.8, rho_T0S0=rho_T1S1=.8, \n# D.aa=5, D.bb=10, and when the grid of values {0, .2, ..., 1} is considered \n# for the correlations between the counterfactuals:\nSurMICA <- MICA.ContCont(Trial.R=.80, D.aa=5, D.bb=10, T0S0=.8, T1S1=.8, \nT0T1=seq(0, 1, by=.2), T0S1=seq(0, 1, by=.2), T1S0=seq(0, 1, by=.2), \nS0S1=seq(0, 1, by=.2))\n\n# Plot the vector of generated ICA and MICA values\nplot(SurMICA, ICA=TRUE, MICA=TRUE)\n\n\n"} {"package":"Surrogate","topic":"plot.Fano.BinBin","snippet":"### Name: plot.Fano.BinBin\n### Title: Plots the distribution of R^2_{HL} either as a density or as\n### function of pi_{10} in the setting where both S and T are binary\n### endpoints\n### Aliases: plot.Fano.BinBin\n### Keywords: Causal-Inference framework Counterfactuals Single-trial\n### setting Fano ICA MarginalProbs\n\n### ** Examples\n\n# Conduct the analysis assuming no montonicity\n# for the true endpoint, using a range of\n# upper bounds for prediction errors \nFANO<-Fano.BinBin(pi1_ = 0.5951 , pi_1 = 0.7745, \nfano_delta=c(0.05, 0.1, 0.2), M=1000)\n\nplot(FANO, Type=\"Scatter\",color=c(3,4,5),Cex.Position=\"bottom\")\n\n\n"} {"package":"Surrogate","topic":"plot.ICA.ContCont.MultS","snippet":"### Name: plot ICA.ContCont.MultS\n### Title: Plots the Individual Causal Association in the setting where\n### there are multiple continuous S and a continuous T\n### Aliases: plot.ICA.ContCont.MultS plot.ICA.ContCont.MultS_alt\n### Keywords: Plot surrogacy Causal-Inference framework Single-trial\n### setting Multiple-trial setting Sensitivity Multiple surrogates\n### Multivariate setting\n\n### ** Examples\n\n## Not run: \n##D #time-consuming code parts\n##D # Specify matrix Sigma (var-cavar matrix T_0, T_1, S1_0, S1_1, ...)\n##D # here for 1 true endpoint and 3 surrogates\n##D \n##D s<-matrix(rep(NA, times=64),8)\n##D s[1,1] <- 450; s[2,2] <- 413.5; s[3,3] <- 174.2; s[4,4] <- 157.5; \n##D s[5,5] <- 244.0; s[6,6] <- 229.99; s[7,7] <- 294.2; s[8,8] <- 302.5\n##D s[3,1] <- 160.8; s[5,1] <- 208.5; s[7,1] <- 268.4 \n##D s[4,2] <- 124.6; s[6,2] <- 212.3; s[8,2] <- 287.1\n##D s[5,3] <- 160.3; s[7,3] <- 142.8 \n##D s[6,4] <- 134.3; s[8,4] <- 130.4 \n##D s[7,5] <- 209.3; \n##D s[8,6] <- 214.7 \n##D s[upper.tri(s)] = t(s)[upper.tri(s)]\n##D \n##D # Marix looks like:\n##D # T_0 T_1 S1_0 S1_1 S2_0 S2_1 S2_0 S2_1\n##D # [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8]\n##D # T_0 [1,] 450.0 NA 160.8 NA 208.5 NA 268.4 NA\n##D # T_1 [2,] NA 413.5 NA 124.6 NA 212.30 NA 287.1\n##D # S1_0 [3,] 160.8 NA 174.2 NA 160.3 NA 142.8 NA\n##D # S1_1 [4,] NA 124.6 NA 157.5 NA 134.30 NA 130.4\n##D # S2_0 [5,] 208.5 NA 160.3 NA 244.0 NA 209.3 NA\n##D # S2_1 [6,] NA 212.3 NA 134.3 NA 229.99 NA 214.7\n##D # S3_0 [7,] 268.4 NA 142.8 NA 209.3 NA 294.2 NA\n##D # S3_1 [8,] NA 287.1 NA 130.4 NA 214.70 NA 302.5\n##D \n##D # Conduct analysis\n##D ICA <- ICA.ContCont.MultS(M=100, N=200, Show.Progress = TRUE,\n##D Sigma=s, G = seq(from=-1, to=1, by = .00001), Seed=c(123), \n##D Model = \"Delta_T ~ Delta_S1 + Delta_S2 + Delta_S3\")\n##D \n##D # Explore results\n##D summary(ICA)\n##D plot(ICA)\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"plot Causal-Inference BinBin","snippet":"### Name: plot Causal-Inference BinBin\n### Title: Plots the (Meta-Analytic) Individual Causal Association and\n### related metrics when S and T are binary outcomes\n### Aliases: 'plot Causal-Inference BinBin' plot.ICA.BinBin\n### Keywords: Plot surrogacy Causal-Inference framework Single-trial\n### setting Sensitivity\n\n### ** Examples\n\n# Compute R2_H given the marginals, \n# assuming monotonicity for S and T and grids\n# pi_0111=seq(0, 1, by=.001) and \n# pi_1100=seq(0, 1, by=.001)\nICA <- ICA.BinBin.Grid.Sample(pi1_1_=0.261, pi1_0_=0.285, \npi_1_1=0.637, pi_1_0=0.078, pi0_1_=0.134, pi_0_1=0.127, \nMonotonicity=c(\"General\"), M=2500, Seed=1)\n \n# Plot the results (density of R2_H):\nplot(ICA, Type=\"Density\", R2_H=TRUE, R_H=FALSE, \nTheta_T=FALSE, Theta_S=FALSE)\n\n\n"} {"package":"Surrogate","topic":"plot Causal-Inference BinCont","snippet":"### Name: plot Causal-Inference BinCont\n### Title: Plots the (Meta-Analytic) Individual Causal Association and\n### related metrics when S is continuous and T is binary\n### Aliases: 'plot Causal-Inference BinCont' plot.ICA.BinCont\n### Keywords: Plot surrogacy Causal-Inference framework Single-trial\n### setting Sensitivity\n\n### ** Examples\n\n## Not run: \n##D # Time consuming code part\n##D Fit <- ICA.BinCont(Dataset = Schizo, Surr = BPRS, True = PANSS_Bin, \n##D Treat=Treat, M=50, Seed=1)\n##D \n##D summary(Fit)\n##D plot(Fit)\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"plot ISTE.ContCont","snippet":"### Name: plot ISTE.ContCont\n### Title: Plots the individual-level surrogate threshold effect (STE)\n### values and related metrics\n### Aliases: 'plot ISTE.ContCont' plot.ISTE.ContCont\n### Keywords: Plot ISTE Plot individual-level surrogate threshold effect\n### Causal-Inference framework Single-trial setting Sensitivity\n\n### ** Examples\n\n# Define input for analysis using the Schizo dataset, \n# with S=BPRS and T = PANSS. \n# For each of the identifiable quantities,\n# uncertainty is accounted for by specifying a uniform\n# distribution with min, max values corresponding to\n# the 95% confidence interval of the quantity.\nT0S0 <- runif(min = 0.9524, max = 0.9659, n = 1000)\nT1S1 <- runif(min = 0.9608, max = 0.9677, n = 1000)\n\nS0S0 <- runif(min=160.811, max=204.5009, n=1000)\nS1S1 <- runif(min=168.989, max = 194.219, n=1000)\nT0T0 <- runif(min=484.462, max = 616.082, n=1000)\nT1T1 <- runif(min=514.279, max = 591.062, n=1000)\n\nMean_T0 <- runif(min=-13.455, max=-9.489, n=1000)\nMean_T1 <- runif(min=-17.17, max=-14.86, n=1000)\nMean_S0 <- runif(min=-7.789, max=-5.503, n=1000)\nMean_S1 <- runif(min=-9.600, max=-8.276, n=1000)\n\n# Do the ISTE analysis\n## Not run: \n##D ISTE <- ISTE.ContCont(Mean_T1=Mean_T1, Mean_T0=Mean_T0, \n##D Mean_S1=Mean_S1, Mean_S0=Mean_S0, N=2128, Delta_S=c(-50:50), \n##D alpha.PI=0.05, PI.Bound=0, Show.Prediction.Plots=TRUE,\n##D Save.Plots=\"No\", T0S0=T0S0, T1S1=T1S1, T0T0=T0T0, T1T1=T1T1, \n##D S0S0=S0S0, S1S1=S1S1)\n##D \n##D # Examine results:\n##D summary(ISTE)\n##D \n##D # Plots of results. \n##D # Plot main ISTE results\n##D plot(ISTE)\n##D # Other plots\n##D plot(ISTE, Outcome=\"MSE\")\n##D plot(ISTE, Outcome=\"gamma0\")\n##D plot(ISTE, Outcome=\"gamma1\")\n##D plot(ISTE, Outcome=\"Exp.DeltaT\")\n##D plot(ISTE, Outcome=\"Exp.DeltaT.Low.PI\")\n##D plot(ISTE, Outcome=\"Exp.DeltaT.Up.PI\")\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"plot Information-Theoretic","snippet":"### Name: plot Information-Theoretic\n### Title: Provides plots of trial- and individual-level surrogacy in the\n### Information-Theoretic framework\n### Aliases: 'plot Information-Theoretic' plot.FixedContContIT\n### plot.MixedContContIT\n### Keywords: Plot surrogacy Information-Theoretic framework Trial-level\n### surrogacy Individual-level surrogacy Multiple-trial setting\n\n### ** Examples\n\n## Load ARMD dataset\ndata(ARMD)\n\n## Conduct a surrogacy analysis, using a weighted reduced univariate fixed effect model:\nSur <- MixedContContIT(Dataset=ARMD, Surr=Diff24, True=Diff52, Treat=Treat, Trial.ID=Center, \nPat.ID=Id, Model=c(\"Full\"))\n\n## Request both trial- and individual-level surrogacy plots. In the trial-level plot,\n## make the size of the circles proportional to the number of patients in a trial:\nplot(Sur, Trial.Level=TRUE, Weighted=TRUE, Indiv.Level=TRUE)\n\n## Make a trial-level surrogacy plot using filled blue circles that \n## are transparent (to make sure that the results of overlapping trials remain\n## visible), and modify the title and the axes labels of the plot: \nplot(Sur, pch=16, col=rgb(.3, .2, 1, 0.3), Indiv.Level=FALSE, Trial.Level=TRUE, \nWeighted=TRUE, Main.Trial=c(\"Trial-level surrogacy (ARMD dataset)\"), \nXlab.Trial=c(\"Difference in vision after 6 months (Surrogate)\"),\nYlab.Trial=c(\"Difference in vision after 12 months (True enpoint)\"))\n\n## Add the estimated R2_ht value in the previous plot at position (X=-2.2, Y=0) \n## (the previous plot should not have been closed):\nR2ht <- format(round(as.numeric(Sur$R2ht[1]), 3))\ntext(x=-2.2, y=0, cex=1.4, labels=(bquote(paste(\"R\"[ht]^{2}, \"=\"~.(R2ht)))))\n\n## Make an Individual-level surrogacy plot with red squares to depict individuals\n## (rather than black circles):\nplot(Sur, pch=15, col=\"red\", Indiv.Level=TRUE, Trial.Level=FALSE)\n\n\n"} {"package":"Surrogate","topic":"plot Information-Theoretic BinCombn","snippet":"### Name: plot Information-Theoretic BinCombn\n### Title: Provides plots of trial- and individual-level surrogacy in the\n### Information-Theoretic framework when both S and T are binary, or when\n### S is binary and T is continuous (or vice versa)\n### Aliases: 'plot Information-Theoretic BinCombn' plot.FixedBinBinIT\n### plot.FixedBinContIT plot.FixedContBinIT\n### Keywords: Plot surrogacy Information-Theoretic framework Trial-level\n### surrogacy Individual-level surrogacy Multiple-trial setting\n### Fixed-effect models Binary endpoint\n\n### ** Examples\n\n## Not run: \n##D # Time consuming (>5sec) code part\n##D # Generate data with continuous Surr and True\n##D Sim.Data.MTS(N.Total=5000, N.Trial=50, R.Trial.Target=.9, R.Indiv.Target=.9,\n##D Fixed.Effects=c(0, 0, 0, 0), D.aa=10, D.bb=10, Seed=1,\n##D Model=c(\"Full\"))\n##D # Dichtomize Surr and True\n##D Surr_Bin <- Data.Observed.MTS$Surr\n##D Surr_Bin[Data.Observed.MTS$Surr>.5] <- 1\n##D Surr_Bin[Data.Observed.MTS$Surr<=.5] <- 0\n##D True_Bin <- Data.Observed.MTS$True\n##D True_Bin[Data.Observed.MTS$True>.15] <- 1\n##D True_Bin[Data.Observed.MTS$True<=.15] <- 0\n##D Data.Observed.MTS$Surr <- Surr_Bin\n##D Data.Observed.MTS$True <- True_Bin\n##D \n##D # Assess surrogacy using info-theoretic framework\n##D Fit <- FixedBinBinIT(Dataset = Data.Observed.MTS, Surr = Surr, \n##D True = True, Treat = Treat, Trial.ID = Trial.ID, \n##D Pat.ID = Pat.ID, Number.Bootstraps=100)\n##D \n##D # Examine results\n##D summary(Fit)\n##D plot(Fit, Trial.Level = FALSE, Indiv.Level.By.Trial=TRUE)\n##D plot(Fit, Trial.Level = TRUE, Indiv.Level.By.Trial=FALSE)\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"plot.SurvSurv","snippet":"### Name: plot.SurvSurv\n### Title: Provides plots of trial- and individual-level surrogacy in the\n### Information-Theoretic framework when both S and T are time-to-event\n### endpoints\n### Aliases: plot.SurvSurv\n### Keywords: Plot surrogacy Information-Theoretic framework Trial-level\n### surrogacy Individual-level surrogacy Multiple-trial setting Survival\n### endpoint\n\n### ** Examples\n\n# Open Ovarian dataset\ndata(Ovarian)\n\n# Conduct analysis\nFit <- SurvSurv(Dataset = Ovarian, Surr = Pfs, SurrCens = PfsInd,\nTrue = Surv, TrueCens = SurvInd, Treat = Treat, \nTrial.ID = Center, Alpha=.05)\n\n# Examine results \nsummary(Fit)\nplot(Fit, Trial.Level = FALSE, Indiv.Level.By.Trial=TRUE)\nplot(Fit, Trial.Level = TRUE, Indiv.Level.By.Trial=FALSE)\n\n\n"} {"package":"Surrogate","topic":"plot MaxEnt ContCont","snippet":"### Name: plot MaxEnt ContCont\n### Title: Plots the sensitivity-based and maximum entropy based Individual\n### Causal Association when S and T are continuous outcomes in the\n### single-trial setting\n### Aliases: 'plot MaxEnt ContCont' plot.MaxEntContCont\n### Keywords: Plot surrogacy Causal-Inference framework Single-trial\n### setting Sensitivity ContCont Maximum Entropy\n\n### ** Examples\n\n## Not run: \n##D #time-consuming code parts\n##D # Compute ICA for ARMD dataset, using the grid \n##D # G={-1, -.80, ..., 1} for the undidentifiable correlations\n##D \n##D ICA <- ICA.ContCont(T0S0 = 0.769, T1S1 = 0.712, S0S0 = 188.926, \n##D S1S1 = 132.638, T0T0 = 264.797, T1T1 = 231.771, \n##D T0T1 = seq(-1, 1, by = 0.2), T0S1 = seq(-1, 1, by = 0.2), \n##D T1S0 = seq(-1, 1, by = 0.2), S0S1 = seq(-1, 1, by = 0.2))\n##D \n##D # Identify the maximum entropy ICA\n##D MaxEnt_ARMD <- MaxEntContCont(x = ICA, S0S0 = 188.926, \n##D S1S1 = 132.638, T0T0 = 264.797, T1T1 = 231.771)\n##D \n##D # Explore results using summary() and plot() functions\n##D summary(MaxEnt_ARMD)\n##D plot(MaxEnt_ARMD)\n##D plot(MaxEnt_ARMD, Entropy.By.ICA = TRUE)\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"plot MaxEntICA BinBin","snippet":"### Name: plot MaxEntICA BinBin\n### Title: Plots the sensitivity-based and maximum entropy based Individual\n### Causal Association when S and T are binary outcomes\n### Aliases: 'plot MaxEntICA BinBin' plot.MaxEntICA.BinBin\n### Keywords: Plot surrogacy Causal-Inference framework Single-trial\n### setting Sensitivity BinBin Maximum Entropy\n\n### ** Examples\n\n# Sensitivity-based ICA results using ICA.BinBin.Grid.Sample\nICA <- ICA.BinBin.Grid.Sample(pi1_1_=0.341, pi0_1_=0.119, pi1_0_=0.254,\npi_1_1=0.686, pi_1_0=0.088, pi_0_1=0.078, Seed=1, \nMonotonicity=c(\"No\"), M=5000)\n\n# Maximum-entropy based ICA\nMaxEnt <- MaxEntICABinBin(pi1_1_=0.341, pi0_1_=0.119, pi1_0_=0.254,\npi_1_1=0.686, pi_1_0=0.088, pi_0_1=0.078)\n\n# Plot results\nplot(x=MaxEnt, ICA.Fit=ICA)\n\n\n"} {"package":"Surrogate","topic":"plot MaxEntSPF BinBin","snippet":"### Name: plot MaxEntSPF BinBin\n### Title: Plots the sensitivity-based and maximum entropy based surrogate\n### predictive function (SPF) when S and T are binary outcomes.\n### Aliases: 'plot MaxEntSPF BinBin' plot.MaxEntSPF.BinBin\n### Keywords: Plot SPF Causal-Inference framework Sensitivity Maximum\n### Entropy\n\n### ** Examples\n\n# Sensitivity-based ICA results using ICA.BinBin.Grid.Sample\nICA <- ICA.BinBin.Grid.Sample(pi1_1_=0.341, pi0_1_=0.119, pi1_0_=0.254,\npi_1_1=0.686, pi_1_0=0.088, pi_0_1=0.078, Seed=1, \nMonotonicity=c(\"No\"), M=5000)\n\n# Sensitivity-based SPF\nSPFSens <- SPF.BinBin(ICA)\n\n# Maximum-entropy based SPF\nSPFMaxEnt <- MaxEntSPFBinBin(pi1_1_=0.341, pi0_1_=0.119, pi1_0_=0.254,\npi_1_1=0.686, pi_1_0=0.088, pi_0_1=0.078)\n\n# Plot results\nplot(x=SPFMaxEnt, SPF.Fit=SPFSens)\n\n\n"} {"package":"Surrogate","topic":"plot Meta-Analytic","snippet":"### Name: plot Meta-Analytic\n### Title: Provides plots of trial- and individual-level surrogacy in the\n### meta-analytic framework\n### Aliases: 'plot Meta-Analytic' plot.BifixedContCont plot.BimixedContCont\n### plot.UnifixedContCont plot.UnimixedContCont\n### Keywords: Plot surrogacy Meta-analytic framework Trial-level surrogacy\n### Individual-level surrogacy Multiple-trial setting Single-trial\n### setting Continuous endpoint\n\n### ** Examples\n\n## Not run: \n##D # time consuming code part\n##D ##### Multiple-trial setting\n##D \n##D ## Load ARMD dataset\n##D data(ARMD)\n##D \n##D ## Conduct a surrogacy analysis, using a weighted reduced univariate fixed effect model:\n##D Sur <- UnifixedContCont(Dataset=ARMD, Surr=Diff24, True=Diff52, Treat=Treat, Trial.ID=Center, \n##D Pat.ID=Id, Number.Bootstraps=100, Model=c(\"Reduced\"), Weighted=TRUE)\n##D \n##D ## Request both trial- and individual-level surrogacy plots. In the trial-level plot,\n##D ## make the size of the circles proportional to the number of patients in a trial:\n##D plot(Sur, Trial.Level=TRUE, Weighted=TRUE, Indiv.Level=TRUE)\n##D \n##D ## Make a trial-level surrogacy plot using filled blue circles that \n##D ## are transparent (to make sure that the results of overlapping trials remain\n##D ## visible), and modify the title and the axes labels of the plot: \n##D plot(Sur, pch=16, col=rgb(.3, .2, 1, 0.3), Indiv.Level=FALSE, Trial.Level=TRUE, \n##D Weighted=TRUE, Main.Trial=c(\"Trial-level surrogacy (ARMD dataset)\"), \n##D Xlab.Trial=c(\"Difference in vision after 6 months (Surrogate)\"),\n##D Ylab.Trial=c(\"Difference in vision after 12 months (True enpoint)\"))\n##D \n##D ## Add the estimated R2_trial value in the previous plot at position (X=-7, Y=11) \n##D ## (the previous plot should not have been closed):\n##D R2trial <- format(round(as.numeric(Sur$Trial.R2[1]), 3))\n##D text(x=-7, y=11, cex=1.4, labels=(bquote(paste(\"R\"[trial]^{2}, \"=\"~.(R2trial)))))\n##D \n##D ## Make an Individual-level surrogacy plot with red squares to depict individuals\n##D ## (rather than black circles):\n##D plot(Sur, pch=15, col=\"red\", Indiv.Level=TRUE, Trial.Level=FALSE)\n##D \n##D ## Same plot as before, but now with smaller squares, a y-axis with range [-40; 40], \n##D ## and the estimated R2_indiv value in the title of the plot:\n##D R2ind <- format(round(as.numeric(Sur$Indiv.R2[1]), 3))\n##D plot(Sur, pch=15, col=\"red\", Indiv.Level=TRUE, Trial.Level=FALSE, cex=.5, \n##D ylim=c(-40, 40), Main.Indiv=bquote(paste(\"R\"[indiv]^{2}, \"=\"~.(R2ind))))\n##D \n##D \n##D ##### Single-trial setting\n##D \n##D ## Conduct a surrogacy analysis in the single-trial meta-analytic setting:\n##D SurSTS <- Single.Trial.RE.AA(Dataset=ARMD, Surr=Diff24, True=Diff52, Treat=Treat, Pat.ID=Id)\n##D \n##D # Request a plot of individual-level surrogacy and a plot that depicts the Relative effect \n##D # and the constant RE assumption:\n##D plot(SurSTS, Trial.Level=TRUE, Indiv.Level=TRUE)\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"plot MinSurrContCont","snippet":"### Name: plot MinSurrContCont\n### Title: Graphically illustrates the theoretical plausibility of finding\n### a good surrogate endpoint in the continuous-continuous case\n### Aliases: 'plot MinSurrContCont' plot.MinSurrContCont\n### Keywords: Plausibility of a surrogate\n\n### ** Examples\n\n# compute rho^2_min in the setting where the variances of T in the control\n# and experimental treatments equal 100 and 120, delta is fixed at 50,\n# and the grid G={0, .01, ..., 1} is considered for the counterfactual \n# correlation rho_T0T1:\nMinSurr <- MinSurrContCont(T0T0 = 100, T1T1 = 120, Delta = 50,\nT0T1 = seq(0, 1, by = 0.01))\n\n# Plot the results (use percentages on Y-axis)\nplot(MinSurr, Type=\"Percent\")\n\n# Same plot, but add the percentages of ICA values that are equal to or \n# larger than the midpoint values of the bins\nplot(MinSurr, Labels=TRUE)\n\n\n"} {"package":"Surrogate","topic":"plot.PPE.BinBin","snippet":"### Name: plot.PPE.BinBin\n### Title: Plots the distribution of either PPE, RPE or R^2_{H} either as a\n### density or as a histogram in the setting where both S and T are\n### binary endpoints\n### Aliases: plot.PPE.BinBin\n### Keywords: Causal-Inference framework Counterfactuals Single-trial\n### setting ICA PPE\n\n### ** Examples\n\n## Not run: \n##D # Time consuming part\n##D PANSS <- PPE.BinBin(pi1_1_=0.4215, pi0_1_=0.0538, pi1_0_=0.0538,\n##D pi_1_1=0.5088, pi_1_0=0.0307,pi_0_1=0.0482, \n##D Seed=1, M=2500) \n##D \n##D plot(PANSS,Type=\"Freq\",Param=\"RPE\",color=\"grey\",Breaks=0.05,xlimits=c(0,1),main=\"PANSS\")\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"plot SPF BinBin","snippet":"### Name: plot SPF BinBin\n### Title: Plots the surrogate predictive function (SPF) in the\n### binary-binary settinf.\n### Aliases: 'plot SPF BinBin' plot.SPF.BinBin\n### Keywords: Plot SPF Causal-Inference framework Sensitivity\n\n### ** Examples\n\n## Not run: \n##D # Generate plausible values for Pi \n##D ICA <- ICA.BinBin.Grid.Sample(pi1_1_=0.341, pi0_1_=0.119,\n##D pi1_0_=0.254, pi_1_1=0.686, pi_1_0=0.088, pi_0_1=0.078, Seed=1,\n##D Monotonicity=c(\"General\"), M=2500)\n##D \n##D # Compute the surrogate predictive function (SPF)\n##D SPF <- SPF.BinBin(ICA)\n##D \n##D # Explore the results\n##D summary(SPF)\n##D \n##D # Examples of plots \n##D plot(SPF, Type=\"All.Histograms\")\n##D plot(SPF, Type=\"All.Densities\")\n##D plot(SPF, Type=\"Histogram\", Specific.Pi=\"r_0_0\")\n##D plot(SPF, Type=\"Box.Plot\", Legend.Pos=\"topleft\", Legend.Cex=.7)\n##D plot(SPF, Type=\"Lines.Mean\")\n##D plot(SPF, Type=\"Lines.Median\")\n##D plot(SPF, Type=\"3D.Mean\")\n##D plot(SPF, Type=\"3D.Median\")\n##D plot(SPF, Type=\"3D.Spinning.Mean\")\n##D plot(SPF, Type=\"3D.Spinning.Median\")\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"plot SPF BinCont","snippet":"### Name: plot SPF BinCont\n### Title: Plots the surrogate predictive function (SPF) in the\n### binary-continuous setting.\n### Aliases: 'plot SPF BinCont' plot.SPF.BinCont\n### Keywords: Plot SPF Causal-Inference framework Sensitivity BinCont\n\n### ** Examples\n\n## Not run: \n##D # time consuming code part\n##D data(Schizo_BinCont)\n##D # Use ICA.BinCont to examine surrogacy\n##D Result_BinCont <- ICA.BinCont(M = 1000, Dataset = Schizo_BinCont,\n##D Surr = PANSS, True = CGI_Bin, Treat=Treat, Diff.Sigma=TRUE)\n##D \n##D # Obtain SPF\n##D Fit <- SPF.BinCont(x=Result_BinCont, a = -30, b = -3)\n##D \n##D # examine results\n##D summary(Fit1)\n##D plot(Fit1)\n##D \n##D plot(Fit1, Type=\"Most.Likely.DeltaT\")\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"plot.TrialLevelIT","snippet":"### Name: plot TrialLevelIT\n### Title: Provides a plots of trial-level surrogacy in the\n### information-theoretic framework based on the output of the\n### 'TrialLevelIT()' function\n### Aliases: plot.TrialLevelIT\n### Keywords: Plot surrogacy Information-theoretic framework Trial-level\n### surrogacy Multiple-trial setting\n\n### ** Examples\n\n# Generate vector treatment effects on S\nset.seed(seed = 1)\nAlpha.Vector <- seq(from = 5, to = 10, by=.1) + runif(min = -.5, max = .5, n = 51)\n\n# Generate vector treatment effects on T\nset.seed(seed=2)\nBeta.Vector <- (Alpha.Vector * 3) + runif(min = -5, max = 5, n = 51)\n\n# Apply the function to estimate R^2_{h.t}\nFit <- TrialLevelIT(Alpha.Vector=Alpha.Vector,\nBeta.Vector=Beta.Vector, N.Trial=50, Model=\"Reduced\")\n\n# Plot the results\nplot(Fit)\n\n\n"} {"package":"Surrogate","topic":"plot.TrialLevelMA","snippet":"### Name: plot TrialLevelMA\n### Title: Provides a plots of trial-level surrogacy in the meta-analytic\n### framework based on the output of the 'TrialLevelMA()' function\n### Aliases: plot.TrialLevelMA\n### Keywords: Plot surrogacy Meta-analytic framework Trial-level surrogacy\n### Multiple-trial setting\n\n### ** Examples\n\n# Generate vector treatment effects on S\nset.seed(seed = 1)\nAlpha.Vector <- seq(from = 5, to = 10, by=.1) + runif(min = -.5, max = .5, n = 51)\n# Generate vector treatment effects on T\nset.seed(seed=2)\nBeta.Vector <- (Alpha.Vector * 3) + runif(min = -5, max = 5, n = 51)\n# Vector of sample sizes of the trials (here, all n_i=10)\nN.Vector <- rep(10, times=51)\n\n# Apply the function to estimate R^2_{trial}\nFit <- TrialLevelMA(Alpha.Vector=Alpha.Vector,\nBeta.Vector=Beta.Vector, N.Vector=N.Vector)\n\n# Plot the results and obtain summary\nplot(Fit)\nsummary(Fit)\n\n\n"} {"package":"Surrogate","topic":"plot.TwoStageSurvSurv","snippet":"### Name: plot TwoStageSurvSurv\n### Title: Plots trial-level surrogacy in the meta-analytic framework when\n### two survival endpoints are considered.\n### Aliases: plot.TwoStageSurvSurv\n### Keywords: Plot surrogacy Information-theoretic framework Trial-level\n### surrogacy Multiple-trial setting Single-trial setting Survival\n### endpoints\n\n### ** Examples\n\n# Open Ovarian dataset\ndata(Ovarian)\n# Conduct analysis\nResults <- TwoStageSurvSurv(Dataset = Ovarian, Surr = Pfs, SurrCens = PfsInd,\nTrue = Surv, TrueCens = SurvInd, Treat = Treat, Trial.ID = Center)\n# Examine results of analysis\nsummary(Results)\nplot(Results)\n\n\n"} {"package":"Surrogate","topic":"plot.comb27.BinBin","snippet":"### Name: plot.comb27.BinBin\n### Title: Plots the distribution of prediction error functions in\n### decreasing order of appearance.\n### Aliases: plot.comb27.BinBin\n### Keywords: Causal-Inference framework Counterfactuals Single-trial\n### setting ICA PPE\n\n### ** Examples\n\n## Not run: \n##D # time consuming code part\n##D CIGTS_27 <- comb27.BinBin(pi1_1_ = 0.3412, pi1_0_ = 0.2539, pi0_1_ = 0.119, \n##D pi_1_1 = 0.6863, pi_1_0 = 0.0882, pi_0_1 = 0.0784, \n##D Seed=1,Monotonicity=c(\"No\"), M=500000) \n##D plot.comb27.BinBin(CIGTS_27,lab=\"CIGTS\")\n## End(Not run)\n\n\n"} {"package":"Surrogate","topic":"sensitivity_analysis_SurvSurv_copula","snippet":"### Name: sensitivity_analysis_SurvSurv_copula\n### Title: Sensitivity analysis for individual causal association\n### Aliases: sensitivity_analysis_SurvSurv_copula\n\n### ** Examples\n\nlibrary(Surrogate)\ndata(\"Ovarian\")\n# For simplicity, data is not recoded to semi-competing risks format, but the\n# data are left in the composite event format.\ndata = data.frame(\n Ovarian$Pfs,\n Ovarian$Surv,\n Ovarian$Treat,\n Ovarian$PfsInd,\n Ovarian$SurvInd\n)\novarian_fitted =\n fit_model_SurvSurv(data = data,\n copula_family = \"clayton\",\n n_knots = 1)\n# Illustration with small number of replications and low precision\nsensitivity_analysis_SurvSurv_copula(ovarian_fitted,\n n_sim = 5,\n n_prec = 2000,\n copula_family2 = \"clayton\",\n cond_ind = TRUE)\n\n\n\n\n"} {"package":"SPEDInstabR","topic":"ContrBB","snippet":"### Name: ContrBB\n### Title: BEANPLOTS AND BOXPLOTS OF CONTRIBUTION OF FACTORS\n### Aliases: ContrBB\n### Keywords: ContrBB\n\n### ** Examples\n\n\ndata(FishFC)\n\nContrBB(data=FishFC, vars=c(\"Altitude\",\"Aspect\",\"BIO1\",\"BIO12\", \"BIO14\",\"BIO15\",\"BIO18\",\n\"BIO19\",\"BIO2\",\"BIO3\",\"BIO4\",\"BIO8\",\"Pop\",\"TPP\",\"Slope\",\"TH24\",\"VI\"), graph=\"boxplot\")\n\n\n"} {"package":"SPEDInstabR","topic":"PreExt","snippet":"### Name: PreExt\n### Title: COMPARISON OF THE FREQUENCIES OF A FACTOR BETWEEN PRESENCES AND\n### THE EXTENT\n### Aliases: PreExt\n### Keywords: PreExt\n\n### ** Examples\n\n\ndata(Instability)\n\nPreExt(data=Instability, var=\"Variable\", envar=\"Altitude\", Interval=\"Interval\",\nInterval.Value=\"Interval.Value\", Pre=\"Presence.Prop\", Extent=\"Extent.Prop\",\nXLAB=\"Altitude (m)\")\n\n\n"} {"package":"SPEDInstabR","topic":"Rmap","snippet":"### Name: Rmap\n### Title: RASTER MAP\n### Aliases: Rmap\n### Keywords: Rmap\n\n### ** Examples\n\n## Not run: \n##D \n##D #Example 1\n##D \n##D #If using RWizard, for a better quality of the geographic\n##D #coordinates, replace data(adworld) by @_Build_AdWorld_\n##D data(adworld)\n##D data(VI)\n##D Rmap(data=VI, colscale=rev(heat.colors(100)),\n##D main= \"Percentage of contribution of vegetation index\")\n##D \n##D #Example 2. Only to be used with RWizard and the map is exported to a jpg\n##D \n##D data(VI)\n##D @_Build_AdWorld_\n##D Rmap(data = VI , Area = c(\"Argentina\", \"Bolivia\", \"Brazil\", \"Chile\", \"Colombia\",\n##D \"Ecuador\", \"French Guiana\", \"Guyana\", \"Paraguay\", \"Peru\", \"Suriname\",\n##D \"Uruguay\", \"Venezuela\",\"Panama\",\"Nicaragua\",\"Costa Rica\"),\n##D main = \"Percentage of contribution of vegetation index\", jpg=TRUE)\n## End(Not run)\n\n\n"} {"package":"future.batchtools","topic":"batchtools_custom","snippet":"### Name: batchtools_custom\n### Title: Batchtools futures for custom batchtools configuration\n### Aliases: batchtools_custom\n\n### ** Examples\n\noptions(error = function(...) {\n print(traceback())\n})\n\ncf <- batchtools::makeClusterFunctionsInteractive(external = TRUE)\nprint(cf)\nstr(cf)\nplan(batchtools_custom, cluster.functions = cf)\nprint(plan())\nprint(nbrOfWorkers())\n\n## Create explicit future\nf <- future({\n cat(\"PID:\", Sys.getpid(), \"\\n\")\n 42L\n})\nprint(f)\nv <- value(f)\nprint(v)\n\noptions(error = NULL)\n\n\n## Create explicit future\nf <- future({\n cat(\"PID:\", Sys.getpid(), \"\\n\")\n 42L\n})\nprint(f)\nv <- value(f)\nprint(v)\n\n\n\n## Create explicit future\nf <- future({\n cat(\"PID:\", Sys.getpid(), \"\\n\")\n 42L\n})\nv <- value(f)\nprint(v)\n\n\n"} {"package":"future.batchtools","topic":"batchtools_local","snippet":"### Name: batchtools_local\n### Title: batchtools local and interactive futures\n### Aliases: batchtools_local batchtools_interactive batchtools_bash\n\n### ** Examples\n\n## Use local batchtools futures\nplan(batchtools_local)\n\n## A global variable\na <- 1\n\n## Create explicit future\nf <- future({\n b <- 3\n c <- 2\n a * b * c\n})\nv <- value(f)\nprint(v)\n\n\n## Create implicit future\nv %<-% {\n b <- 3\n c <- 2\n a * b * c\n}\nprint(v)\n\n\n"} {"package":"future.batchtools","topic":"future.batchtools","snippet":"### Name: future.batchtools\n### Title: future.batchtools: A Future for batchtools\n### Aliases: future.batchtools future.batchtools-package\n\n### ** Examples\n\nlibrary(future.batchtools)\n\n## Use local batchtools futures\nplan(batchtools_local)\n\n## A global variable\na <- 1\n\nv %<-% {\n b <- 3\n c <- 2\n a * b * c\n}\n\nprint(v)\n## No test: \nplan(batchtools_local)\ndemo(\"mandelbrot\", package = \"future\", ask = FALSE)\n## End(No test)\n\n\n\n"} {"package":"future.batchtools","topic":"future.batchtools.options","snippet":"### Name: future.batchtools.options\n### Title: Options used for batchtools futures\n### Aliases: future.batchtools.options future.cache.path future.delete\n### R_FUTURE_CACHE_PATH R_FUTURE_DELETE future.batchtools.expiration.tail\n### future.batchtools.output future.batchtools.workers\n### R_FUTURE_BATCHTOOLS_EXPIRATION_TAIL R_FUTURE_BATCHTOOLS_OUTPUT\n### R_FUTURE_BATCHTOOLS_WORKERS\n\n### ** Examples\n\n# Set an R option:\noptions(future.cache.path = \"/cluster-wide/folder/.future\")\n\n\n\n"} {"package":"permPATH","topic":"perm.path","snippet":"### Name: perm.path\n### Title: Perform Permutation Based Pathway Analysis\n### Aliases: perm.path\n\n### ** Examples\n\nset.seed(1234)\n\n## Generate toy phenotype and gene expression data sets\n## This example consists of 40 genes grouped into 5 pathways and 100 patients\n## grp is a binary trait (e.g., case vs control)\n## bp is a continuous trait (e.g., blood pressure)\n## g is a group indicator\n\nn = 100\nK = 40\ngrp = rep(1:0,each=n/2)\nbp = rnorm(n)\ng = rep(1:(n/20), rep(20,n/20))\n\npdat = data.frame(grp, bp, g)\nrm(grp, bp)\nexpdat = matrix(rnorm(K*n),K,n)\n\n## Assign marker names g1,...,gK to the expression data set and\n## patient ids id1,...,idn to the expression and phenotype data\ngnames = paste(\"g\",1:K,sep=\"\")\nrownames(expdat) = gnames\npatid = paste(\"id\",1:n,sep=\"\")\nrownames(pdat) = patid\ncolnames(expdat) = patid\n\n#Group the K genes into M pathways of sizes n1,...,nM\nM = 5\np = runif(M)\np = p/sum(p)\nnM = rmultinom(1, size=K, prob=p)\ngset = lapply(nM, function(x){gnames[sample(x)]})\nnames(gset) = paste(\"pathway\",1:M,sep=\"\")\n\n## Carry out permutation analysis with grp as the outcome\n## using the two-sample Wilcoxon with B=100 random permutations\nperm.path(expdat, y=pdat[[\"grp\"]], local.test=\"wilcoxon\", global.test=\"maxmean\", B=100, \ngset=gset, min.num=2, max.num=50, sort=\"score\")\n\n## Carry out permutation analysis with g as the outcome\n## using the JT test with B=100 random permutations\nperm.path(expdat, y=pdat[[\"g\"]], local.test=\"jt\", global.test=\"maxmean\", B=100, \ngset=gset, min.num=2, max.num=50, sort=\"score\")\n\n\n"} {"package":"permPATH","topic":"permPATH2HTML","snippet":"### Name: permPATH2HTML\n### Title: This is a function for creating an HTML file\n### Aliases: permPATH2HTML\n\n### ** Examples\n\n## Generate toy phenotype and gene expression data sets\n## This example consists of 40 genes grouped into 5 pathways and 100 patients\n## grp is a binary trait (e.g., case vs control)\n## bp is a continuous trait (e.g., blood pressure)\nset.seed(1234)\nn = 100\nK = 40\ngrp = rep(1:0,each=n/2)\nbp = rnorm(n)\n\npdat = data.frame(grp, bp)\nrm(grp, bp)\nexpdat = matrix(rnorm(K*n),K,n)\n\n## Assign marker names g1,...,gK to the expression data set and\n## patient ids id1,...,idn to the expression and phenotype data\ngnames = paste(\"g\",1:K,sep=\"\")\nrownames(expdat) = gnames\npatid = paste(\"id\",1:n,sep=\"\")\nrownames(pdat) = patid\ncolnames(expdat) = patid\n\n#Group the K genes into M pathways of sizes n1,...,nM\nM = 5\np = runif(M)\np = p/sum(p)\nnM = rmultinom(1, size=K, prob=p)\ngset = lapply(nM, function(x){gnames[sample(x)]})\nnames(gset) = paste(\"pathway\",1:M,sep=\"\")\n\n## Carry out permutation analysis with grp as the outcome\n## using the two-sample Wilcoxon with B=100 random permutations\nres = perm.path(expdat, y=pdat[[\"grp\"]], local.test=\"wilcoxon\", global.test=\"maxmean\", \nB=100, gset=gset, min.num=2, max.num=50, sort=\"score\")\n\n# create an html file\n#epermPATH2HTML(rstab, dir=\"/dir/\", fname=\"tophits\")\n\n\n"} {"package":"sregsurvey","topic":"sreg_ber","snippet":"### Name: sreg_ber\n### Title: Semiparametric Model-Assisted Estimation under a Bernoulli\n### Sampling Design\n### Aliases: sreg_ber\n\n### ** Examples\n\n#This example use the data set 'apipop' of the survey package.\nlibrary(sregsurvey)\nlibrary(survey)\nlibrary(magrittr)\nlibrary(dplyr)\nlibrary(gamlss)\ndata(api)\nattach(apipop)\nApipop <- filter(apipop,full!= 'NA')\nApipop <- filter(Apipop, stype == 'H')\nApipop <- Apipop %>% dplyr::select(api00,grad.sch,full)\nfit <- sreg_ber(api00 ~ pb(grad.sch), scale_formula = ~ full - 1, data= Apipop, pi=0.2)\nfit\n# The total population value is\ntrue_total <- sum(Apipop$api00)\n# The estimated relative bias in percentage is\nround(abs((fit$estimated_total_y_sreg - true_total)/true_total),3)*100\n\n\n"} {"package":"sregsurvey","topic":"sreg_pips","snippet":"### Name: sreg_pips\n### Title: Semiparametric Model-Assisted Estimation under a Proportional to\n### Size Sampling Design\n### Aliases: sreg_pips\n\n### ** Examples\n\nlibrary(sregsurvey)\nlibrary(survey)\nlibrary(dplyr)\nlibrary(gamlss)\ndata(api)\nattach(apipop)\nApipop <- filter(apipop,full!= 'NA')\nApipop <- filter(Apipop, stype == 'H')\nApipop <- Apipop %>% dplyr::select(api00,grad.sch,full,api99)\nn=ceiling(0.2*dim(Apipop)[1])\naux_var <- Apipop %>% dplyr::select(api99)\nfit <- sreg_pips(api00 ~ pb(grad.sch), scale_formula = ~ full - 1, data= Apipop, x= aux_var, n=n)\nfit\n# The total population value is\ntrue_total <- sum(Apipop$api00)\n# The estimated relative bias in percentage is\nround(abs((fit$estimated_total_y_sreg - true_total)/true_total),3)*100\n\n\n"} {"package":"sregsurvey","topic":"sreg_poisson","snippet":"### Name: sreg_poisson\n### Title: Semiparametric Model-Assisted Estimation under a Poisson\n### Sampling Design\n### Aliases: sreg_poisson\n\n### ** Examples\n\nlibrary(sregsurvey)\nlibrary(survey)\nlibrary(dplyr)\nlibrary(gamlss)\ndata(api)\nattach(apipop)\nApipop <- filter(apipop,full!= 'NA')\nApipop <- filter(Apipop, stype == 'H')\nApipop <- Apipop %>% dplyr::select(api00,grad.sch,full)\nfit <- sreg_poisson(api00 ~ pb(grad.sch), scale_formula = ~ full - 1, data= Apipop)\nfit\n# The total population value is\ntrue_total <- sum(Apipop$api00)\n# The estimated relative bias in percentage is\nround(abs((fit$estimated_total_y_sreg - true_total)/true_total),3)*100\n\n\n"} {"package":"sregsurvey","topic":"sreg_srswr","snippet":"### Name: sreg_srswr\n### Title: Semiparametric Model-Assisted Estimation under a Simple Random\n### Sampling Without Replace Sampling Design\n### Aliases: sreg_srswr\n\n### ** Examples\n\nlibrary(sregsurvey)\nlibrary(survey)\nlibrary(dplyr)\nlibrary(gamlss)\ndata(api)\nattach(apipop)\nApipop <- filter(apipop,full!= 'NA')\nApipop <- filter(Apipop, stype == 'H')\nApipop <- Apipop %>% dplyr::select(api00,grad.sch,full)\nfit <- sreg_srswr(api00 ~ pb(grad.sch), scale_formula = ~ full - 1, data= Apipop, fraction=0.25)\n# The total population value is\ntrue_total <- sum(Apipop$api00)\n# The estimated relative bias in percentage is\nround(abs((fit$estimated_total_y_sreg - true_total)/true_total),3)*100\n\n\n"} {"package":"sregsurvey","topic":"sreg_stsi","snippet":"### Name: sreg_stsi\n### Title: Semiparametric Model-Assisted Estimation under a Stratified\n### Sampling with Simple Random Sampling Without Replace in each stratum.\n### Aliases: sreg_stsi\n\n### ** Examples\n\nlibrary(sregsurvey)\nlibrary(survey)\nlibrary(dplyr)\nlibrary(magrittr)\nlibrary(gamlss)\ndata(api)\nattach(apipop)\nApipop <- filter(apipop,full!= 'NA')\nApipop <- Apipop %>% dplyr::select(api00,grad.sch,full,stype)\ndim(Apipop)\nfit <- sreg_stsi(api00~ pb(grad.sch), scale_formula =~ full-1, n=400, stratum='stype', data=Apipop)\nfit\n# The total population value is\ntrue_total <- sum(Apipop$api00)\n# The estimated relative bias in percentage is\nround(abs((fit$estimated_total_y_sreg - true_total)/true_total),3)*100\n\n\n\n"} {"package":"gen3sis","topic":"create_input_config","snippet":"### Name: create_input_config\n### Title: Creates either an empty configuration or a pre-filled\n### configuration object from a config file\n### Aliases: create_input_config\n\n### ** Examples\n\n# create empty config object\nconfig_empty <- create_input_config(config_file = NA)\n\n# create a config object from config_file\n# get path to example config\ndatapath <- system.file(file.path(\"extdata\", \"WorldCenter\"), package = \"gen3sis\")\npath_config <- file.path(datapath, \"config/config_worldcenter.R\")\nconfig_object <- create_input_config(config_file = path_config)\n\n# change seed of config_worldcenter config object\nconfig_object$gen3sis$general$random_seed <- 2020\n\n# run the model for config_object\n## No test: \n sim <- run_simulation(config = config_object, \n landscape = file.path(datapath, \"landscape\"), \n output_directory = tempdir())\n## End(No test)\n\n\n"} {"package":"gen3sis","topic":"create_input_landscape","snippet":"### Name: create_input_landscape\n### Title: create an landscape input from a named list of rasters or raster\n### files\n### Aliases: create_input_landscape\n\n### ** Examples\n\n\n# load needed library\nlibrary(raster)\n\n# get path containing example rasters\ndatapath <- system.file(file.path(\"extdata\", \"WorldCenter\"), package=\"gen3sis\")\n\n# create raster bricks\ntemperature_brick <- brick(file.path(datapath, \"input_rasters/temp_rasters.grd\"))\naridity_brick <- brick(file.path(datapath, \"input_rasters/arid_rasters.grd\"))\narea_brick <- brick(file.path(datapath, \"input_rasters/area_rasters.grd\"))\n\n# create sub-list of environmental variables for fast example \n# (i.e. 4 time-steps)\nlandscapes_sub_list <- list(temp=NULL, arid=NULL, area=NULL)\nfor(i in 1:4){\n landscapes_sub_list$temp <- c(landscapes_sub_list$temp, temperature_brick[[i]])\n landscapes_sub_list$arid <- c(landscapes_sub_list$arid, aridity_brick[[i]])\n landscapes_sub_list$area <- c(landscapes_sub_list$area, area_brick[[i]])\n}\n\n# define cost function, crossing water as double as land sites\ncost_function_water <- function(source, habitable_src, dest, habitable_dest) {\n if(!all(habitable_src, habitable_dest)) {\n return(2/1000)\n } else {\n return(1/1000)\n }\n}\n\n## Not run: \n##D # create input landscape ready for gen3sis from sub-list \n##D # (i.e. 10 time-steps) and only local-distances.\n##D create_input_landscape(\n##D landscapes = landscapes_sub_list,\n##D cost_function = cost_function_water,\n##D output_directory = file.path(tempdir(), \"landscape_sub\"),\n##D directions = 8, # surrounding sites for each site\n##D timesteps = paste0(round(150:147,2), \"Ma\"),\n##D calculate_full_distance_matrices = FALSE) # full distance matrix\n##D \n##D \n##D # create list of all environmental variables available\n##D landscapes_list <- list(temp=NULL, arid=NULL, area=NULL)\n##D for(i in 1:nlayers(temperature_brick)){\n##D landscapes_list$temp <- c(landscapes_list$temp, temperature_brick[[i]])\n##D landscapes_list$arid <- c(landscapes_list$arid, aridity_brick[[i]])\n##D landscapes_list$area <- c(landscapes_list$area, area_brick[[i]])\n##D }\n##D \n##D # create input landscape ready for gen3sis (~ 3min run-time)\n##D # and full distance matrix\n##D create_input_landscape(\n##D landscapes = landscapes_list,\n##D cost_function = cost_function_water,\n##D output_directory = file.path(tempdir(), \"landscape_WorldCenter_5\"),\n##D directions = 8, # surrounding sites for each site\n##D timesteps = paste0(round(150:100,2), \"Ma\"),\n##D crs=\"+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0\",\n##D calculate_full_distance_matrices = FALSE) # full distance matrix\n## End(Not run)\n\n\n"} {"package":"gen3sis","topic":"create_species","snippet":"### Name: create_species\n### Title: Creates a new species\n### Aliases: create_species\n\n### ** Examples\n\n## Not run: \n##D # inside a create_ancestor_species function of a config taking a landscape and a config\n##D # create_species creates a new species\n##D \n##D # define range of species for the entire world in this case lat long system\n##D range <- c(-180, 180, -90, 90)\n##D \n##D ## select coordinates within the range stipulated above\n##D # takes landscape coordinates\n##D co <- landscape$coordinates\n##D # select coordinates within the range\n##D selection <- co[, \"x\"] >= range[1] &\n##D co[, \"x\"] <= range[2] &\n##D co[, \"y\"] >= range[3] &\n##D co[, \"y\"] <= range[4]\n##D # get the initial cells\n##D initial_cells <- rownames(co)[selection]\n##D \n##D # call create_species\n##D new_species <- create_species(initial_cells, config)\n##D \n##D # extra: set local adaptation to max optimal temp equals local temp\n##D new_species$traits[ , \"temp\"] <- landscape$environment[,\"temp\"]\n##D \n##D # extra: set a certaintrait (e.g. traitX) to one on all populations of this species\n##D new_species$traits[ , \"tratiX\"] <- 1\n##D \n## End(Not run)\n\n\n"} {"package":"gen3sis","topic":"gen3sis","snippet":"### Name: gen3sis\n### Title: gen3sis: General Engine for Eco-Evolutionary Simulations\n### Aliases: gen3sis\n### Keywords: IO iteration methods programming utilities\n\n### ** Examples\n\n## Not run: \n##D \n##D # 1. Load gen3sis and all necessary input data is set (landscape and config).\n##D \n##D library(gen3sis)\n##D \n##D # get path to example input inside package\n##D datapath <- system.file(file.path(\"extdata\", \"WorldCenter\"), package = \"gen3sis\")\n##D path_config <- file.path(datapath, \"config/config_worldcenter.R\")\n##D path_landscape <- file.path(datapath, \"landscape\")\n##D \n##D # 2. Run simulation\n##D \n##D sim <- run_simulation(config = path_config, landscape = path_landscape)\n##D \n##D # 3. Visualize the outputs\n##D \n##D # plot summary of entire simulation\n##D plot_summary(sim)\n##D \n##D # plot richness at a given time-step \n##D # this only works if species is saved for this time-step\n##D landscape_t_150 <- readRDS(file.path(datapath, \n##D \"output\", \"config_worldcenter\", \"landscapes\", \"landscape_t_150.rds\")) \n##D species_t_150 <- readRDS(file.path(datapath, \n##D \"output\", \"config_worldcenter\", \"species\", \"species_t_150.rds\")) \n##D plot_richness(species_t_150, landscape_t_150) \n##D \n## End(Not run)\n\n\n"} {"package":"gen3sis","topic":"get_divergence_matrix","snippet":"### Name: get_divergence_matrix\n### Title: Returns the full divergence matrix for a given species (site x\n### site).\n### Aliases: get_divergence_matrix\n\n### ** Examples\n\n# get path containing example rasters\ndatapath <- system.file(file.path(\"extdata\", \"WorldCenter\"), package=\"gen3sis\")\n# get species at t0\nspecies_t_0 <- readRDS(file.path(datapath, \"output/config_worldcenter/species/species_t_0.rds\"))\n# get divergence matrix from species 1\ndivergence_sp1_t0 <- get_divergence_matrix(species_t_0[[1]])\n# get divergence matrix from species 12\ndivergence_sp12_t0 <- get_divergence_matrix(species_t_0[[12]])\n# note that species 1 has no divergence between it's populations, while 12 has.\n\n\n"} {"package":"gen3sis","topic":"get_geo_richness","snippet":"### Name: get_geo_richness\n### Title: calculate the richness of a list of species over a given\n### landscape\n### Aliases: get_geo_richness\n\n### ** Examples\n\n# get path containing example rasters\ndatapath <- system.file(file.path(\"extdata\", \"WorldCenter\"), package=\"gen3sis\")\n# get species at t0\nspecies_t_0 <- readRDS(file.path(datapath, \n \"output/config_worldcenter/species/species_t_0.rds\"))\n# get landscape at t0\nlandscape_t_0 <- readRDS(file.path(datapath, \n \"output/config_worldcenter/landscapes/landscape_t_0.rds\"))\n# get geo richness\nrichness_t_0 <- get_geo_richness(species_t_0, landscape_t_0)\n\n# histogram of richness at t0\nhist(richness_t_0)\n\n## plot richness using raster and gen3sis color_richness (see plot_richness for alternative)\n# combine richness and geographical coordinates\ngeo_richness_t_0 <- cbind(landscape_t_0$coordinates, richness_t_0)\nlibrary(raster)\nplot(rasterFromXYZ(geo_richness_t_0), col=color_richness(20))\n\n\n"} {"package":"gen3sis","topic":"plot_ranges","snippet":"### Name: plot_ranges\n### Title: Plot species ranges of the given list of species on a landscape\n### Aliases: plot_ranges\n\n### ** Examples\n\n## plot from saved outputs\n# get path containing outputs\ndatapath <- system.file(file.path(\"extdata\", \"WorldCenter\"), package=\"gen3sis\")\n# get species at t0\nspecies_t_50 <- readRDS(file.path(datapath,\n \"output/config_worldcenter/species/species_t_50.rds\"))\n# get landscape at t0\nlandscape_t_50 <- readRDS(file.path(datapath,\n \"output/config_worldcenter/landscapes/landscape_t_50.rds\"))\n# plot range\nplot_ranges(species_t_50, landscape_t_50)\n\n# get species at t0\nspecies_t_25 <- readRDS(file.path(datapath, \n \"output/config_worldcenter/species/species_t_25.rds\"))\n# get landscape at t0\nlandscape_t_25 <- readRDS(file.path(datapath, \n \"output/config_worldcenter/landscapes/landscape_t_25.rds\"))\n# plot ranges at intermediate time-step\nplot_ranges(species_t_25, landscape_t_25, disturb = 2, max_sps = 20)\n\n## plot from within observer\n# call plot_richness from inside the end_of_timestep_observer function \n# at the config file:\n## Not run: \n##D plot_ranges(data$all_species, data$landscape)\n## End(Not run) \n\n\n"} {"package":"gen3sis","topic":"plot_raster_single","snippet":"### Name: plot_raster_single\n### Title: Plot a single set of values onto a given landscape\n### Aliases: plot_raster_single\n\n### ** Examples\n\n# get path to output objects\ndatapath <- system.file(file.path(\"extdata\", \"WorldCenter\"), package = \"gen3sis\")\n\n# plot environmental variables at a given step\nlandscape_t_25 <- readRDS(\n file.path(datapath, \"output\", \"config_worldcenter\", \"landscapes\", \"landscape_t_25.rds\"))\noldpar <- par(no.readonly = TRUE)\npar(mfrow=c(1,2))\nplot_raster_single(landscape_t_25$environment[,\"temp\"], landscape_t_25, \"Temperature\", NA)\n# use col to change the color\nplot_raster_single(landscape_t_25$environment[,\"arid\"], landscape_t_25, \"Aridity\", NA, \n col=topo.colors(5))\npar(oldpar)\n# note that these values were scaled by the configuration object\n\n\n"} {"package":"gen3sis","topic":"plot_richness","snippet":"### Name: plot_richness\n### Title: Plot the richness of the given list of species on a landscape\n### Aliases: plot_richness\n\n### ** Examples\n\n## plot from saved outputs\n# get path containing example rasters\ndatapath <- system.file(file.path(\"extdata\", \"WorldCenter\"), package=\"gen3sis\")\n# get species at t0\nspecies_t_0 <- readRDS(file.path(datapath, \n \"output/config_worldcenter/species/species_t_0.rds\"))\n# get landscape at t0\nlandscape_t_0 <- readRDS(file.path(datapath, \n \"output/config_worldcenter/landscapes/landscape_t_0.rds\"))\n# plot richness\nplot_richness(species_t_0, landscape_t_0)\n\n\n## plot from within observer\n# call plot_richness from inside the end_of_timestep_observer function \n# at the config file:\n## Not run: \n##D plot_richness(data$all_species, data$landscape)\n## End(Not run) \n\n\n"} {"package":"gen3sis","topic":"plot_species_abundance","snippet":"### Name: plot_species_abundance\n### Title: Plot a species' abundance on a given landscape\n### Aliases: plot_species_abundance\n\n### ** Examples\n\n# get path to output objects\ndatapath <- system.file(file.path(\"extdata\", \"WorldCenter\"), package = \"gen3sis\")\n\n# load landscape and species at time step zero\nlandscape_t_0 <- readRDS(\n file.path(datapath, \"output/config_worldcenter\", \"landscapes\", \"landscape_t_0.rds\"))\nspecies_t_0 <- readRDS(\n file.path(datapath, \"output/config_worldcenter\", \"species\", \"species_t_0.rds\"))\n\n# plot species 13 range and abundances\nplot_species_abundance(species_t_0[[13]], landscape_t_0)\n# oh, a South American one!\n\n# plot ranges and abundances of 3 species (i.e. 1, 21 and 32)\noldpar <- par(no.readonly = TRUE)\npar(mfrow=c(1,3))\nplot_species_abundance(species_t_0[[1]], landscape_t_0)\nplot_species_abundance(species_t_0[[7]], landscape_t_0)\nplot_species_abundance(species_t_0[[11]], landscape_t_0)\npar(oldpar)\n\n\n"} {"package":"gen3sis","topic":"plot_species_presence","snippet":"### Name: plot_species_presence\n### Title: Plot a species' presence on a given landscape\n### Aliases: plot_species_presence\n\n### ** Examples\n\n# get path to output objects\ndatapath <- system.file(file.path(\"extdata\", \"WorldCenter\"), package = \"gen3sis\")\n\n# load landscape and species at time step zero\nlandscape_t_0 <- readRDS(\n file.path(datapath, \"output/config_worldcenter\", \"landscapes\", \"landscape_t_0.rds\"))\nspecies_t_0 <- readRDS(\n file.path(datapath, \"output/config_worldcenter\", \"species\", \"species_t_0.rds\"))\n\n# plot species 13 range\nplot_species_presence(species_t_0[[13]], landscape_t_0)\n# oh, a South American one!\n\n# plot ranges of 3 species (i.e. 1, 21 and 32)\noldpar <- par(no.readonly = TRUE)\npar(mfrow=c(1,3))\nplot_species_presence(species_t_0[[1]], landscape_t_0)\nplot_species_presence(species_t_0[[7]], landscape_t_0)\nplot_species_presence(species_t_0[[11]], landscape_t_0)\npar(oldpar)\n\n\n"} {"package":"gen3sis","topic":"plot_summary","snippet":"### Name: plot_summary\n### Title: Plot simulation default summary object\n### Aliases: plot_summary\n\n### ** Examples\n\n# load existing summary example\ndatapath <- system.file(file.path(\"extdata\", \"WorldCenter\"), package = \"gen3sis\")\noutput <- readRDS(file.path(datapath, \"output/config_worldcenter/sgen3sis.rds\"))\n# plot output summary\nplot_summary(output)\n\nplot_summary(output, summary_title=\"Example\")\n\n## No test: \n## run simulation and plot summary\n# get path or correct input objects\ndatapath <- system.file(file.path(\"extdata\", \"CaseStudy1\"), package=\"gen3sis\")\n# run simulation and store summary object to output\noutput <- run_simulation(config = file.path(datapath,\"config/config_fast.R\"), \n landscape = file.path(datapath,\"landscape\"),\n output_directory = tempdir())\n# plot output summary\nplot_summary(output)\n## End(No test)\n\n\n"} {"package":"gen3sis","topic":"prepare_directories","snippet":"### Name: prepare_directories\n### Title: Checks if the necessary directories exist, and otherwise creates\n### them\n### Aliases: prepare_directories\n\n### ** Examples\n\n## Not run: \n##D # this is an internal function used to attribute directories by deduction\n##D # called at the start of a simulation run\n##D datapath <- system.file(file.path(\"extdata\", \"WorldCenter\"), package = \"gen3sis\")\n##D # deducing input directory and setting output directory\n##D prepare_directories(config_file = file.path(datapath, \"config/config_worldcenter.R\"))\n##D # setting output directory\n##D prepare_directories(config_file = file.path(datapath, \"config/config_worldcenter.R\"), \n##D input_directory = file.path(datapath, \"landscape\"))\n## End(Not run)\n\n\n"} {"package":"gen3sis","topic":"run_simulation","snippet":"### Name: run_simulation\n### Title: Run a simulation in gen3sis and return a summary object possibly\n### saving outputs and plots to the output folder\n### Aliases: run_simulation\n\n### ** Examples\n\n## No test: \n# get path or correct input objects\ndatapath <- system.file(file.path(\"extdata\", \"CaseStudy1\"), package=\"gen3sis\")\n\n# run simulation and store summary obejct to sim\nsim <- run_simulation(config = file.path(datapath,\"config/config_fast.R\"), \n landscape = file.path(datapath,\"landscape\"),\n output_directory = tempdir())\n\n# plot summary object\nplot_summary(sim)\n## End(No test)\n\n\n"} {"package":"gen3sis","topic":"save_abundance","snippet":"### Name: save_abundance\n### Title: This function can be called within the observer function to save\n### the species abundances.\n### Aliases: save_abundance\n\n### ** Examples\n\n## Not run: \n##D ## save abundances from within observer\n##D # this functions should be called inside the end_of_timestep_observer function at the config file:\n##D save_abundance()\n## End(Not run)\n\n\n"} {"package":"gen3sis","topic":"save_divergence","snippet":"### Name: save_divergence\n### Title: This function can be called within the observer function to save\n### the compressed species divergence.\n### Aliases: save_divergence\n\n### ** Examples\n\n## Not run: \n##D ## save divergences from within observer for each species\n##D # this functions should be called inside the end_of_timestep_observer function at the config file:\n##D save_divergence()\n## End(Not run)\n\n\n"} {"package":"gen3sis","topic":"save_landscape","snippet":"### Name: save_landscape\n### Title: This function can be called within the observer function to save\n### the current landscape, can be called independently by the user and is\n### called by other observer functions relying on the landscape to be\n### present (e.g. save_species)\n### Aliases: save_landscape\n\n### ** Examples\n\n## Not run: \n##D ## save landscape from within observer for each species\n##D # this functions should be called inside the end_of_timestep_observer function at the config file:\n##D save_landscape()\n## End(Not run)\n\n\n"} {"package":"gen3sis","topic":"save_occupancy","snippet":"### Name: save_occupancy\n### Title: This function can be called within the observer function to save\n### the current occupancy pattern\n### Aliases: save_occupancy\n\n### ** Examples\n\n## Not run: \n##D ## save occupancies from within observer\n##D # this functions should be called inside the end_of_timestep_observer function at the config file:\n##D save_occupancy()\n## End(Not run)\n\n\n"} {"package":"gen3sis","topic":"save_phylogeny","snippet":"### Name: save_phylogeny\n### Title: This function can be called within the observer function to save\n### the current phylogeny.\n### Aliases: save_phylogeny\n\n### ** Examples\n\n## Not run: \n##D ## save phylogeny as a nexus tree from within observer for each species\n##D # this functions should be called inside the end_of_timestep_observer function at the config file:\n##D save_phylogeny()\n## End(Not run)\n\n\n"} {"package":"gen3sis","topic":"save_richness","snippet":"### Name: save_richness\n### Title: This function can be called within the observer function to save\n### the current richness pattern\n### Aliases: save_richness\n\n### ** Examples\n\n## Not run: \n##D ## save the current richness pattern from within observer for each species\n##D # this functions should be called inside the end_of_timestep_observer function at the config file:\n##D save_richness()\n## End(Not run)\n\n\n"} {"package":"gen3sis","topic":"save_species","snippet":"### Name: save_species\n### Title: This function can be called within the observer function to save\n### the full species list.\n### Aliases: save_species\n\n### ** Examples\n\n## Not run: \n##D #adding the call to the end_of_timestep_observer function at the config file or object \n##D #will automatically save all the species at an rds file at the outputfolder/species folder\n##D # and the respective landscape at outputfolder/landscapes for the times steps the observer \n##D # function is called (i.e. call_observer parameter at the run_simulation function)\n##D save_species()\n## End(Not run)\n\n\n"} {"package":"gen3sis","topic":"save_traits","snippet":"### Name: save_traits\n### Title: This function can be called within the observer function to save\n### the species traits.\n### Aliases: save_traits\n\n### ** Examples\n\n## Not run: \n##D ## save the current traits pattern from within observer for each population of each species\n##D # this functions should be called inside the end_of_timestep_observer function at the config file:\n##D save_traits()\n## End(Not run)\n\n\n"} {"package":"gen3sis","topic":"verify_config","snippet":"### Name: verify_config\n### Title: Verifies if all required config fields are provided\n### Aliases: verify_config\n\n### ** Examples\n\n# get path to input config\ndatapath <- system.file(file.path(\"extdata\", \"WorldCenter\"), package=\"gen3sis\")\npath_config <- file.path(datapath, \"config/config_worldcenter.R\")\n# create config object\nconfig_object <- create_input_config(path_config)\n# check class\nclass(config_object)\n# verify config\nverify_config(config_object) # TRUE! this is a valid config\n\n# break config_object, change name random_seed to r4nd0m_s33d\nnames(config_object$gen3sis$general)[1] <- \"r4nd0m_s33d\"\nverify_config(config_object) # FALSE! this is an invalid config\n\n\n"} {"package":"gen3sis","topic":"write_config_skeleton","snippet":"### Name: write_config_skeleton\n### Title: Writes out a config skeleton\n### Aliases: write_config_skeleton\n\n### ** Examples\n\n# set config_empty.R file path\nconfig_file_path <- file.path(tempdir(), \"config_empty.R\")\n#writes out a config skeleton\nwrite_config_skeleton(config_file_path)\n\n\n"} {"package":"choroplethrMaps","topic":"country.map","snippet":"### Name: country.map\n### Title: A world map\n### Aliases: country.map\n\n### ** Examples\n\n## Not run: \n##D # render the map with ggplot2\n##D library(ggplot2)\n##D \n##D data(country.map)\n##D ggplot(country.map, aes(long, lat, group=group)) + geom_polygon()\n## End(Not run)\n\n\n"} {"package":"choroplethrMaps","topic":"country.regions","snippet":"### Name: country.regions\n### Title: Names of all regions on the country.map data.frame. A data.frame\n### that includes both English names and their iso2c equivalents.\n### Aliases: country.regions\n\n### ** Examples\n\ndata(country.regions)\nhead(country.regions)\n\n\n"} {"package":"choroplethrMaps","topic":"county.map","snippet":"### Name: county.map\n### Title: Map of the counties of each of the 50 US states plus the\n### district of columbia.\n### Aliases: county.map\n\n### ** Examples\n\n## Not run: \n##D # render the map with ggplot2\n##D library(ggplot2)\n##D \n##D data(county.map)\n##D ggplot(county.map, aes(long, lat, group=group)) + geom_polygon()\n## End(Not run)\n\n\n"} {"package":"choroplethrMaps","topic":"county.regions","snippet":"### Name: county.regions\n### Title: A data.frame consisting of the name of each region in the map\n### county.map as well as their FIPS codes and state names.\n### Aliases: county.regions\n\n### ** Examples\n\ndata(county.regions)\nhead(county.regions)\n\n\n"} {"package":"choroplethrMaps","topic":"state.map","snippet":"### Name: state.map\n### Title: Map of the 50 US states plus the district of columbia.\n### Aliases: state.map\n\n### ** Examples\n\n## Not run: \n##D # render the map with ggplot2\n##D library(ggplot2)\n##D \n##D data(state.map)\n##D ggplot(state.map, aes(long, lat, group=group)) + geom_polygon()\n## End(Not run)\n\n\n"} {"package":"choroplethrMaps","topic":"state.regions","snippet":"### Name: state.regions\n### Title: A data.frame consisting of each region on the map state.map plus\n### their postal code abbreviations and FIPS codes.\n### Aliases: state.regions\n\n### ** Examples\n\ndata(state.regions)\nhead(state.regions)\n\n\n"} {"package":"eply","topic":"eply-package","snippet":"### Name: eply-package\n### Title: The eply package provides ways to call 'eval(parse(text = ...))'\n### in bulk. The 'evals()' function is a vectorized version of\n### 'eval(parse(text = ...))'. 'eply()' is like 'apply(MARGIN = 1)'\n### except that the elements of each row are 'eval(parse(text = ...))\"ed\n### before being supplied to the function.\n### Aliases: eply-package\n\n### ** Examples\n\n# Get an example data frame of commands that evaluate to function arguments.\n.expr <- example.expr()\n.fun <- example.fun # Get an example collection of functions.\n# Get an example list of supporting data. Could be an environment.\n.with <- example.with()\n# Row-by-row, evaluate the code in .expr and feed the results to the function.\neply(.fun = .fun, .expr = .expr, .with = .with)\nevals(x = c(\"a + 1\", \"b + 2\"), .with = .with)\n\n\n"} {"package":"eply","topic":"eply","snippet":"### Name: eply\n### Title: Function 'eply'\n### Aliases: eply\n\n### ** Examples\n\n# Get an example data frame of commands that evaluate to function arguments.\n.expr <- example.expr()\n.fun <- example.fun # Get an example collection of functions.\n# Get an example list of supporting data. Could be an environment.\n.with <- example.with()\n# Row-by-row, evaluate the code in .expr and feed the results to the function.\neply(.fun = .fun, .expr = .expr, .with = .with)\n\n\n"} {"package":"eply","topic":"evals","snippet":"### Name: evals\n### Title: Function 'evals'\n### Aliases: evals\n\n### ** Examples\n\n# Get an example list of supporting data. Could be an environment.\n.with <- example.with()\n# Row-by-row, evaluate the code in .expr and feed the results to the function.\nevals(x = c(\"a + 1\", \"b + 2\"), .with = .with)\n\n\n"} {"package":"eply","topic":"example.expr","snippet":"### Name: example.expr\n### Title: 'example.expr'\n### Aliases: example.expr\n\n### ** Examples\n\n#' Get an example .expr argument to eply().\n#' See the examples of the eply() function for usage.\nexample.expr()\n\n\n"} {"package":"eply","topic":"example.fun","snippet":"### Name: example.fun\n### Title: 'example.fun'\n### Aliases: example.fun\n\n### ** Examples\n\n#' Get an example .fun argument to eply().\n#' See the examples of the eply() function for usage.\nexample.fun\nexample.fun(x = c(4, 2), y = c(2, 2))\n\n\n"} {"package":"eply","topic":"example.with","snippet":"### Name: example.with\n### Title: 'example.with'\n### Aliases: example.with\n\n### ** Examples\n\n#' Get an example .with argument to eply() and evals().\n#' See the examples of the eply() and evals() functions for usage.\nexample.with()\n\n\n"} {"package":"eply","topic":"help_eply","snippet":"### Name: help_eply\n### Title: Function 'help_eply'\n### Aliases: help_eply\n\n### ** Examples\n\nhelp_eply()\n\n\n"} {"package":"eply","topic":"quotes","snippet":"### Name: quotes\n### Title: Function 'quotes'\n### Aliases: quotes\n\n### ** Examples\n\nquotes(letters[1:3])\nquotes(letters[1:3], single = TRUE)\nquotes(letters[1:3], single = FALSE)\n\n\n"} {"package":"eply","topic":"strings","snippet":"### Name: strings\n### Title: Function 'strings'\n### Aliases: strings\n\n### ** Examples\n\nstrings(a, b, bee)\n\n\n"} {"package":"eply","topic":"unquote","snippet":"### Name: unquote\n### Title: Function 'unquote'\n### Aliases: unquote\n\n### ** Examples\n\nunquote(c(\"x\", \"'y'\", \"\\\"why\\\"\", \"'''z'''\"))\nunquote(c(\"x\", \"'y'\", \"\\\"why\\\"\", \"'''z'''\"), deep = FALSE)\nunquote(c(\"x\", \"'y'\", \"\\\"why\\\"\", \"'''z'''\"), deep = TRUE)\n\n\n"} {"package":"drpop","topic":"informat","snippet":"### Name: informat\n### Title: A function to check whether a given data table/matrix/data frame\n### is in the appropriate for drpop.\n### Aliases: informat\n\n### ** Examples\n\ndata = matrix(sample(c(0,1), 2000, replace = TRUE), ncol = 2)\nx = matrix(rnorm(nrow(data)*3, 2,1), nrow = nrow(data))\n\ninformat(data = data)\n#this returns TRUE\n\ndata = cbind(data, x)\ninformat(data = data)\n#this returns TRUE\n\ninformat(data = data, K = 3)\n#this returns FALSE\n\n\n"} {"package":"drpop","topic":"plotci","snippet":"### Name: plotci\n### Title: Plot estimated confidence interval of total population size from\n### object of class 'popsize' or 'popsize_cond'.\n### Aliases: plotci\n\n### ** Examples\n\n## No test: \ndata = simuldata(n = 10000, l = 1)$data_xstar\n\np = popsize(data = data, funcname = c(\"logit\", \"gam\"))\nplotci(p)\n\ndata = simuldata(n = 10000, l = 1, categorical = TRUE)$data_xstar\np = popsize_cond(data = data, condvar = 'catcov')\nplotci(p)\n## End(No test)\n\n\n"} {"package":"drpop","topic":"popsize","snippet":"### Name: popsize\n### Title: Estimate total population size and capture probability using\n### user provided set of models or user provided nuisance estimates.\n### Aliases: popsize\n\n### ** Examples\n\n## No test: \ndata = simuldata(1000, l = 3)$data\nqhat = popsize(data = data, funcname = c(\"logit\", \"gam\"), nfolds = 2, margin = 0.005)\npsin_estimate = popsize(data = data, getnuis = qhat$nuis, idfold = qhat$idfold)\n\ndata = simuldata(n = 6000, l = 3)$data\npsin_estimate = popsize(data = data[,1:2])\n#this returns the basic plug-in estimate since covariates are absent.\n\npsin_estimate = popsize(data = data, funcname = c(\"gam\", \"rangerlogit\"))\n## End(No test)\n\n\n"} {"package":"drpop","topic":"popsize_cond","snippet":"### Name: popsize_cond\n### Title: Estimate total population size and capture probability using\n### user provided set of models conditioned on an attribute.\n### Aliases: popsize_cond\n\n### ** Examples\n\n## No test: \ndata = simuldata(n = 10000, l = 2, categorical = TRUE)$data\n\npsin_estimate = popsize_cond(data = data, funcname = c(\"logit\", \"gam\"),\n condvar = 'catcov', PLUGIN = TRUE, TMLE = TRUE)\n#this returns the plug-in, the bias-corrected and the tmle estimate for the\n#two models conditioned on column catcov\n## End(No test)\n\n\n"} {"package":"drpop","topic":"popsize_simul","snippet":"### Name: popsize_simul\n### Title: Estimate the total population size and capture probabilities\n### using perturbed true nuisance functions.\n### Aliases: popsize_simul\n\n### ** Examples\n\nsimulresult = simuldata(n = 2000, l = 2)\ndata = simulresult$data\n\npsin_estimate = popsize_simul(data = data,\n pi1 = simulresult$pi1, pi2 = simulresult$pi2,\n alpha = 0.25, omega = 1)\n\n\n\n"} {"package":"drpop","topic":"qhat_gam","snippet":"### Name: qhat_gam\n### Title: Estimate marginal and joint distribution of lists j and k using\n### generalized additive models.\n### Aliases: qhat_gam\n\n### ** Examples\n\n## Not run: \n##D qhat = qhat_gam(List.train = List.train, List.test = List.test, margin = 0.005)\n##D q1 = qhat$q1\n##D q2 = qhat$q2\n##D q12 = qhat$q12\n## End(Not run)\n\n\n"} {"package":"drpop","topic":"qhat_logit","snippet":"### Name: qhat_logit\n### Title: Estimate marginal and joint distribution of lists j and k using\n### logistic regression.\n### Aliases: qhat_logit\n\n### ** Examples\n\n## Not run: \n##D qhat = qhat_logit(List.train = List.train, List.test = List.test, margin = 0.005)\n##D q1 = qhat$q1\n##D q2 = qhat$q2\n##D q12 = qhat$q12\n## End(Not run)\n\n\n"} {"package":"drpop","topic":"qhat_mlogit","snippet":"### Name: qhat_mlogit\n### Title: Estimate marginal and joint distribution of lists j and k using\n### multinomial logistic model.\n### Aliases: qhat_mlogit\n\n### ** Examples\n\n## Not run: \n##D qhat = qhat_mlogit(List.train = List.train, List.test = List.test, margin = 0.005)\n##D q1 = qhat$q1\n##D q2 = qhat$q2\n##D q12 = qhat$q12\n## End(Not run)\n\n\n"} {"package":"drpop","topic":"qhat_ranger","snippet":"### Name: qhat_ranger\n### Title: Estimate marginal and joint distribution of lists j and k using\n### ranger.\n### Aliases: qhat_ranger\n\n### ** Examples\n\n## Not run: \n##D qhat = qhat_ranger(List.train = List.train, List.test = List.test, margin = 0.005)\n##D q1 = qhat$q1\n##D q2 = qhat$q2\n##D q12 = qhat$q12\n## End(Not run)\n\n\n"} {"package":"drpop","topic":"qhat_rangerlogit","snippet":"### Name: qhat_rangerlogit\n### Title: Estimate marginal and joint distribution of lists j and k using\n### ensemble of ranger and logit.\n### Aliases: qhat_rangerlogit\n\n### ** Examples\n\n## Not run: \n##D qhat = qhat_ranger(List.train = List.train, List.test = List.test, margin = 0.005)\n##D q1 = qhat$q1\n##D q2 = qhat$q2\n##D q12 = qhat$q12\n## End(Not run)\n\n\n"} {"package":"drpop","topic":"qhat_sl","snippet":"### Name: qhat_sl\n### Title: Estimate marginal and joint distribution of lists j and k using\n### super learner.\n### Aliases: qhat_sl\n\n### ** Examples\n\n## Not run: \n##D qhat = qhat_sl(List.train = List.train, List.test = List.test, margin = 0.005, num_cores = 1)\n##D q1 = qhat$q1\n##D q2 = qhat$q2\n##D q12 = qhat$q12\n## End(Not run)\n\n\n"} {"package":"drpop","topic":"reformat","snippet":"### Name: reformat\n### Title: A function to reorder the columns of a data table/matrix/data\n### frame and to change factor variables to numeric.\n### Aliases: reformat\n\n### ** Examples\n\ndata = matrix(sample(c(0,1), 2000, replace = TRUE), ncol = 2)\nx = matrix(rnorm(nrow(data)*3, 2, 1), nrow = nrow(data))\n\ndata = cbind(x, data)\nresult<- reformat(data = data, capturelists = c(4,5))\n\n\n"} {"package":"drpop","topic":"simuldata","snippet":"### Name: simuldata\n### Title: A function to reorder the columns of a data table/matrix/data\n### frame and to change factor variables to numeric.\n### Aliases: simuldata\n\n### ** Examples\n\ndata = simuldata(n = 1000, l = 2)$data\npsi0 = simuldata(n = 10000, l = 2)$psi0\n\n\n"} {"package":"drpop","topic":"tmle","snippet":"### Name: tmle\n### Title: Returns the targeted maximum likelihood estimates for the\n### nuisance functions\n### Aliases: tmle\n\n### ** Examples\n\ndata = matrix(sample(c(0,1), 2000, replace = TRUE), ncol = 2)\nxmat = matrix(runif(nrow(data)*3, 0, 1), nrow = nrow(data))\ndatmat = cbind(data, data[,1]*data[,2], xmat)\ncolnames(datmat) = c(\"yj\", \"yk\", \"yjk\", \"q10\", \"q02\", \"q12\")\ndatmat = as.data.frame(datmat)\nresult = tmle(datmat, margin = 0.005, stop_margin = 0.00001, twolist = TRUE)\n\n\n"} {"package":"micromapST","topic":"BuildBorderGroup","snippet":"### Name: BuildBorderGroup\n### Title: Building new border groups for Linked Micromap created by the\n### micromapST package\n### Aliases: BuildBorderGroup\n\n### ** Examples\n\n\n# Load libraries needed.\nstt1 <- Sys.time()\nlibrary(stringr)\nlibrary(readxl)\nlibrary(sf)\n\n# Generate a Kentucky County Border Group\n#\n# Read the county boundary files. (Set up system directories. \n# Replace with your directories to run.)\nTempD<-\"c:/projects/statnet/\" # my private test PDF directory exist, \n #don't use temp.\n# get a temp directory for the output PDF files for the example.\nif (!dir.exists(TempD)) {\n TempD <- paste0(tempdir(),\"/\") \n DataD <- paste0(system.file(\"extdata\",package=\"micromapST\"),\"/\")\n} else {\n DataD <- \"c:/projects/statnet/r code/micromapST-3.0.0/inst/extdata/\"\n}\n\ncat(\"Temporary Directory:\",TempD,\"\\n\")\n# get working data directory\n#cat(\"Working Data Directory:\",DataD,\"\\n\")\n\nKYCoBG <- \"KYCountyBG\" # Border Group name\nKYCoCen <- \"KY_County\" # shape file name(s)\n\nKYCoShp <- st_read(DataD,KYCoCen)\nst_crs(KYCoShp) <- st_crs(\"+proj=lonlat +datum=NAD83 +ellipse=WGS84 +no_defs\")\n\n# inspect name table\nKYNTname <- paste0(DataD,\"/\",KYCoCen,\"_NameTable.xlsx\")\n#cat(\"KYNTname:\",KYNTname,\"\\n\")\n\nKYCoNT <- as.data.frame(read_xlsx(KYNTname))\n#head(KYCoNT)\nspt1 <- Sys.time()\ncat(\"Time to get data and boundaries for Counties:\",spt1-stt1,\"\\n\")\n## Not run: \n##D #\n##D # building border group for all counties in Kentucky\n##D #\n##D stt2 <- Sys.time()\n##D # Build Border Group\n##D BuildBorderGroup(ShapeFile = KYCoShp,\n##D ShapeLinkName = \"NAME\",\n##D NameTableLink = \"Name\",\n##D NameTableDir = DataD,\n##D NameTableFile = paste0(KYCoCen,\"_NameTable.xlsx\"),\n##D BorderGroupName = KYCoBG,\n##D BorderGroupDir = TempD,\n##D MapHdr = c(\"\",\"KY Counties\"),\n##D IDHdr = c(\"KY Co.\"),\n##D ReducePC = 0.9\n##D )\n##D \n##D # Setup MicromapST graphic\n##D spt2 <- Sys.time()\n##D cat(\"Time to build KY Co BG:\",spt2-stt2,\"\\n\")\n##D stt3 <- spt2\n##D KYCoData <- as.data.frame(read_xlsx(paste0(DataD,\"/\",\n##D \"KY_County_Population_1900-2020.xlsx\")))\n##D #head(KYCoData)\n##D \n##D KY_Co_PD <- data.frame(stringsAsFactors=FALSE,\n##D type=c(\"map\",\"id\",\"dot\",\"dot\"),\n##D lab1=c(NA,NA,\"2010 Pop\",\"2020 Pop\"),\n##D col1=c(NA,NA,\"2010\",\"2020\")\n##D )\n##D \n##D KYCoTitle <- c(\"Ez23ax-Kentucky County\",\"Pop 2010 and 2020\")\n##D OutCoPDF <- paste0(TempD,\"Ez23ax-KY Co 2010-2020 Pop.pdf\")\n##D grDevices::pdf(OutCoPDF,width=10,height=13) # on 11 x 14 paper.\n##D \n##D micromapST(KYCoData,KY_Co_PD,sortVar=c(\"2020\"), ascend=FALSE,\n##D rowNames=\"full\", rowNamesCol = c(\"Name\"),\n##D bordDir = TempD, bordGrp = KYCoBG, \n##D title = KYCoTitle\n##D )\n##D \n##D x <- dev.off()\n##D spt3 <- Sys.time()\n##D cat(\"Time to micromapST KY Co graph:\",spt3-stt3,\"\\n\")\n## End(Not run) # end of dontrun.\n\nstt4 <- Sys.time()\n\n# Aggregate Kentucky Counties into ADD areas\n#\n# The regions in the Kentucky County Name Table (KYCoNT) are the ADD districts\n# the county was assigned to.\n# The KYCoShp has the county boundaries.\n#\nKYCoShp$NAME <- str_to_upper(KYCoShp$NAME)\nKYCoNT$NameCap <- str_to_upper(KYCoNT$Name)\n\naggInx <- match(KYCoShp$NAME,KYCoNT$NameCap)\n#print(aggInx)\n\nxm <- is.na(aggInx) # which polygons did not match the name table?\nif (any(xm)) {\n cat(\"ERROR: One or more polygons/counties in the shape file did not match\\n\",\n \"the entries in the KY County name table. They are:\\n\")\n LLMiss <- KYCoNT[xm,\"Name\"]\n print(LLMiss)\n stop()\n}\n# \n\n#####\n# aggFUN - a function to inspect the data.frame columns and determine\n# an appropriate aggregation method - copy or sum.\n#\naggFUN <- function(z) { ifelse (is.character(z[1]), z[1], sum(as.numeric(z))) } \n#\n#####\n\n#\naggList <- KYCoNT$regID[aggInx]\n#print(aggList)\n\nKYADDShp <- aggregate(KYCoShp, by=list(aggList), FUN = aggFUN)\nnames(KYADDShp)[1] <- \"regID\" # change first column name to \"regNames\"\nrow.names(KYADDShp) <- KYADDShp$regID\n\nKeepAttr <- c(\"regID\",\"AREA\",\"PERIMETER\",\"STATE\",\"geometry\")\nKYADDShp <- KYADDShp[,KeepAttr]\nst_geometry(KYADDShp) <- st_cast(st_geometry(KYADDShp),\"MULTIPOLYGON\")\n\n#plot(st_geometry(KYADDShp))\nspt4 <- Sys.time()\ncat(\"Time to aggregate KY ADDs from Cos:\",spt4-stt4,\"\\n\")\nstt5 <- spt4\n# Build Border Group\n\nBuildBorderGroup(ShapeFile = KYADDShp, \n # sf structure of shapefile of combined counties into AD Districts\n ShapeLinkName = \"regID\",\n NameTableFile = \"KY_ADD_NameTable.xlsx\",\n NameTableDir = DataD,\n NameTableLink = \"Index\", \n BorderGroupName = \"KYADDBG\",\n BorderGroupDir = TempD,\n MapHdr = c(\"\",\"KY ADDs\"),\n IDHdr = c(\"KY ADDs\"),\n ReducePC = 0.9\n )\n\nspt5 <- Sys.time()\ncat(\"Time to build ADD BG:\",spt5-stt5,\"\\n\")\nstt6 <- spt5\n# Test micromapST\nKYADDData <- as.data.frame(readxl::read_xlsx(\n paste0(DataD,\"KY_ADD_Population-2020.xlsx\")),\n stringsAsFactors=FALSE)\n#\nKY_ADD_PD <- data.frame(stringsAsFactors=FALSE,\n type=c(\"map\",\"id\",\"dot\",\"dot\"),\n lab1=c(NA,NA,\"Pop\",\"Proj. Pop\"),\n lab2=c(NA,NA,\"2020\",\"2030\"),\n col1=c(NA,NA,\"DecC2020\",\"Proj2030\")\n )\n#\nKyTitle <- c(\"Ez23cx-KY Area Development Dist.\",\n \"Pop 2020 and proj Pop 2023\")\nOutPDF2 <- paste0(TempD,\"Ez23cx-KY ADD Pop.pdf\")\n\ngrDevices::pdf(OutPDF2,width=10,height=7.5)\n\nmicromapST(KYADDData,KY_ADD_PD,sortVar=\"DecC2020\",ascend=FALSE,\n rowNames= \"full\", rowNamesCol = \"ADD_Name\",\n bordDir = TempD,\n bordGrp = \"KYADDBG\",\n title = KyTitle\n )\nx <- grDevices::dev.off()\nspt6 <- Sys.time()\ncat(\"Time to do micromapST of KY ADDs:\",spt6-stt6,\"\\n\")\n\n\n"} {"package":"micromapST","topic":"micromapST","snippet":"### Name: micromapST\n### Title: Linked Micromap Graphics Package\n### Aliases: micromapST micromapST.Version\n\n### ** Examples\n\n\n###\n#\n# micromapST - Example # 01 - map with no cumulative shading,\n# 2 columns of statistics: dot with 95% confidence interval, \n# boxplot sorted in descending order by state rates, using \n# the default border group of \"USStatesBG\", with default symbols.\n###\n\n# load sample data, compute boxplot\nTDir<-\"c:/projects/statnet/\" # my private test PDF directory exist, don't use temp.\nif (!dir.exists(TDir)) {TDir <- paste0(tempdir(),\"/\") } # get a temp directory for the output \n # PDF files for the example.\ncat(\"TempDir:\",TDir,\"\\n\")\n\n # replace this directory name with the location if you want to same \n # the output from the examples.\n\nutils::data(wflung00and95,wflung00and95US,wflung00cnty,envir=environment()) \n\nwfboxlist = graphics::boxplot(split(wflung00cnty$rate,wflung00cnty$stabr),\n plot=FALSE) \n\n# set up 4 column page layout\n\npanelDesc01 <- data.frame(\n type=c(\"map\",\"id\",\"dotconf\",\"boxplot\"), \n lab1=c(\"\",\"\",\"State Rate\",\"County Rates\"), \n lab2=c(\"\",\"\",\"and 95% CI\",\"(suppressed if 1-9 deaths)\"), \n lab3=c(\"\",\"\",\"Deaths per 100,000\",\"Deaths per 100,000\"), \n col1=c(NA,NA,1,NA),col2=c(NA,NA,3,NA),col3=c(NA,NA,4,NA), \n refVals=c(NA,NA,NA,wflung00and95US[1,1]), \n refTexts=c(NA,NA,NA,\"US Rate 2000-4\"), \n panelData= c(\"\",\"\",\"\",\"wfboxlist\") \n ) \npanelDesc <- panelDesc01\n# set up PDF output file, call package\n\nExTitle <- c(\"Ex01-US White Female Lung Cancer Mortality, 2000-2004\", \n \"State Rates & County Boxplots\")\n \ngrDevices::pdf(file=paste0(TDir,\"Ex01-US-WFLung-2000-2004-St-DotCf-Co-Box.pdf\"),\n width=7.5,height=10)\n\nmicromapST(wflung00and95, panelDesc01, sortVar=1, ascend=FALSE,\n title=ExTitle\n ) \n\nx <- grDevices::dev.off()\n#\n### End Example 01\n\n###\n#\n# micromapST - Example # 02 - map with cumulative shading \n# from top down (mapcum), arrow and bar charts, \n# sorted in descending order by starting\n# value of arrows (1950-69 rates) using default\n# border group of \"USStatesDF\". This \n# example also provides custom colors for the \n# linked micromaps, highlights, etc.\n# \n###\n\n# Load example data from package.\nutils::data(wmlung5070,wmlung5070US,envir=environment()) \n\npanelDesc02 <- data.frame(\n type=c(\"mapcum\",\"id\",\"arrow\",\"bar\"),\t\t\n lab1=c(\"\",\"\",\"Rates in\",\"Percent Change\"), \n lab2=c(\"\",\"\",\"1950-69 and 1970-94\",\"1950-69 To 1970-94\"), \n lab3=c(\"MAPCUM\",\"\",\"Deaths per 100,000\",\"Percent\"),\n col1=c(NA,NA,\"RATEWM_50\",\"PERCENT\"), \t\t\n col2=c(NA,NA,\"RATEWM_70\",NA)\t\t\n )\n \ncolorsRgb = matrix(c( # the basic 7 colors.\n 213, 62, 79, #region 1: red\t #D53E4F - Rust Red\n 252, 141, 89, #region 2: orange\t #FC8D59 - Brn/Org\n 253, 225, 139, #region 3: green\t #FEE08B - Pale Brn\n 153, 213, 148, #region 4: greenish blue #99D594 - med Green\n 50, 136, 189, #region 5: lavendar \t #3288BD - Blue\n 255, 0, 255, #region 6 #FF00FF - Magenta \n .00, .00, .00, #region 7: black for median #000000 - Black\n 230, 245, 152, #non-highlighted foreground #E6F598 - YellowGreen\n 255, 174, 185, # alternate shape upper #FFAEB9 - Mauve\n 191, 239, 255, # alternate shape lower #BFEFFF - Cyan\n 242, 242, 242, # lightest grey for non-referenced sub-areas #F2F2F2\n 234, 234, 234), # lighter grey for bkg - non-active sub-areas. #EAEAEA\n \n ncol=3,byrow=TRUE)\n\nxcolors = c( grDevices::rgb(colorsRgb[,1],colorsRgb[,2],colorsRgb[,3],\n maxColorValue=255),\n # set solid colors\n grDevices::rgb(colorsRgb[,1],colorsRgb[,2],colorsRgb[,3],64,\n maxColorValue=255)) \n # set translucent colors for time series.\n\n# set up reference names for color set\nnames(xcolors) =c(\"rustred\",\"orange\",\"lightbrown\",\"mediumgreen\", \n \"blue\",\"magenta\", \"black\",\"yellowgreen\",\n \"mauve\",\"cyan\",\"lightest grey\",\"lighter grey\",\n \"l_rustred\",\"l_orange\",\"vlightbrown\",\"lightgreen\", \n \"lightblue\",\"l_black\",\"l_yelgreen\",\"l_mauve\",\n \"l_cyan\",\"l_lightest grey\",\"l_lighter grey\") \n\nExTitle <- c(\"Ex02-US Change in White Male Lung Cancer Mortality Rates\",\n \"from 1950-69 to 1970-94-Diff colors\")\n\ngrDevices::pdf(file=paste0(TDir,\"Ex02-US WmLung50-70-Arrow-Bar.pdf\"),width=7.5,height=10)\n\nmicromapST(wmlung5070,panelDesc02,sortVar=1,ascend=FALSE,\n title=ExTitle, colors=xcolors\n ) \n\nx <- grDevices::dev.off()\n#\n### End Example 02\n\n## Not run: \n##D ###\n##D #\n##D # micromapST - Example # 03 - Time Series Line Plots with \n##D # Confidence Bands maptail option highlights states from extremes \n##D # to middle state read in time series data set example using the \n##D # default border group of \"USStatesDF\".\n##D #\n##D ###\n##D \n##D # Load example data from package.\n##D utils::data(TSdata,envir=environment()) \n##D temprates <- data.frame(TSdata[,,2]) \n##D \n##D # TSdata structure is array of size c(51,15,4), \n##D # dimensions = 51 states, 15 years, (year label, point value, low limit, \n##D # high limit)\n##D \n##D panelDesc03 <- data.frame( \n##D type=c(\"maptail\",\"id\",\"tsconf\",\"dot\"), \n##D lab1=c(\"\",\"\",\"Time Series\",\"Female\"), \n##D lab2=c(\"\",\"\",\"Annual Rate per 100,000\",\"Most Recent Rate (2010)\"), \n##D lab3=c(\"\",\"\",\"Years\",\"Deaths per 100,000\"), \n##D lab4=c(\"\",\"\",\"Rate\",\"\"),\t\t \n##D col1=c(NA,NA,NA,15), \n##D panelData =c(NA,NA,\"TSdata\",NA)\n##D )\n##D ExTitle <- c(\"Ex03-US Time Series with Confidence bands\",\n##D \"Annual Female Lung Cancer Mortality Rates, 1996-2010\")\n##D \n##D grDevices::pdf(file=paste0(TDir,\"Ex03-US Time-Series-with-Conf.pdf\"),\n##D width=7.5,height=10)\n##D \n##D micromapST(temprates,panelDesc03,sortVar=\"P15\",ascend=FALSE,\n##D title=ExTitle) \n##D \n##D x <- grDevices::dev.off()\n##D #\n##D ### End Example 03\n## End(Not run)\n\n###\n#\n# micromapST - Example # 03a - Time Series Line Plots with \n# Confidence Bands maptail option highlights states from extremes \n# to middle state read in time series data set example using the \n# default border group of \"USStatesDF\".\n#\n# Specify the x-Axis values are dates and to format them as dates.\n###\n\n# Load example data from package.\nutils::data(TSdata,envir=environment()) \ntemprates <- data.frame(TSdata[,,2]) # y rate\n\n# In the original package TS data, the x data was not \n# a date value, it was the year number. To be able to demonstrate\n# the X-Axis Date format labeling, these were changed to Date values\n# by effectively substracting 1970-1-1 from the year value. \n\n####\n#\n# Example 3a - Building TS conf array and converting years\n# into date values for the X-Axis and labels.\n# \n# example of build a TS Conf array.\n#\n# Using the old TSdata array as a starting point and source of data,\n# but build an entirely new TS array structure in a similar manner\n# that might be used to build your own time series array.\n#\n\ndata(TSdata) # get old array\nTSAreas <- row.names(TSdata) # one per area (index 1)\nNewArray <- array(dim=c(51,15,4),dimnames=list(TSAreas)) \n # this is for 51 states, 15 samples/observations, and 4 values per sample.\n\nfor (inx in seq(1,length(TSAreas))) { # loop once per area\n \n Samp <- TSdata[inx,,] # samples for an area\n # each sample has 15 observations of 4 values.\n # value 1 is the X axis data or the DATE of the observation\n Samp[,1] <- as.Date(paste0(as.character(Samp[,1]),\"-01-01\")) \n # convert simple year number to date\n NewArray[inx,,] <- Samp\n \n}\n\n# setting the attribute \"xIsDate\" on array to TRUE, signals micromapST \n# the user wants to see the x-axis values as dates.\n\nattr(NewArray,\"xIsDate\") <- TRUE\n\n# TSdata and NewArray structures are arrays of size c(51,15,4), \n# dimensions = 51 states, 15 years, (year label, point value, low limit, high limit)\n\npanelDesc03a <- data.frame( \n type=c(\"maptail\",\"id\",\"tsconf\",\"dot\"), \n lab1=c(\"\",\"\",\"Time Series (MMM-YY)\",\"Female\"), \n # recommend adding to the column title a note about the date format used.\n lab2=c(\"\",\"\",\"Annual Rate per 100,000\",\"Most Recent Rate (2010)\"), \n lab3=c(\"\",\"\",\"Years\",\"Deaths per 100,000\"), \n lab4=c(\"\",\"\",\"Rate\",\"\"),\t\t \n col1=c(NA,NA,NA,15), \n panelData =c(NA,NA,\"NewArray\",NA)\n )\n \nExTitle <- c(\"Ex03a-US Time Series with Confidence bands with time (mmm-yy)\",\n \"Annual Female Lung Cancer Mortality Rates, 1996-2010\")\n\ngrDevices::pdf(file=paste0(TDir,\"Ex03a-US Time-Series-with-Conf wDates.pdf\"),\n width=7.5,height=10)\n\nmicromapST(temprates,panelDesc03a,sortVar=\"P15\",ascend=FALSE,\n axisScale=\"s\",\n title=ExTitle) \n\nx <- grDevices::dev.off()\n#\n### End Example 03a\n\n###\n#\n# micromapST - Example 04 - dot followed by a scatter dot columns\n# use same data as Example 3 to compare 1996 & 2010 rates\n# mapmedian option shades states above or below the median \n# (light yellow) using the default border group of \"USStatesBG\"\n#\n# USES data loaded for Example 03 (temprates).\n#\n####\n\n# Load example data from package.\nutils::data(TSdata,envir=environment()) \ntemprates <- data.frame(TSdata[,,2]) # y rate\n\npanelDesc04 <- data.frame( \n type=c(\"mapmedian\",\"id\",\"dot\",\"scatdot\"), \n lab1=c(\"\",\"\",\"Female Lung Cancer Mortality\",\"Comparison of Rates\"), \n lab2=c(\"\",\"\",\"Rate in 1996 (Sort Variable)\",\n \"in 1996 (x axis) and 2010 (y axis)\"), \n lab3=c(\"\",\"\",\"Deaths per 100,000\",\"Deaths per 100,000 in 1996\"), \n lab4=c(\"\",\"\",\"\",\"Rate in 2010\"),\t\n col1=c(NA,NA,1,1), \n col2=c(NA,NA,NA,15)\t\t\n )\n \nExTitle <- c(\"Ex04-US Dot Plot for 1996, Scatter Plot Comparing 1996 to 2010\",\n \"Female Lung Cancer Mortality Rates\")\n\nFName <- paste0(TDir,\"Ex04-US FLCMR Scatter-Dots-1996-2010.pdf\")\ngrDevices::pdf(file=FName,width=7.5,height=10)\n\nmicromapST(temprates,panelDesc04,sortVar=1,ascend=FALSE,title=ExTitle) \n\nx <- grDevices::dev.off()\n#\n### End Example 04\n\n###\n#\n# micromapST - Example 05 - horizontal stacked (segmented) bars\n# segbar plots the input data, normbar plots percent of total\n# package computes the percents from input data\n# input for the categories for each state must be in consecutive \n# columns of the input data.frame using the default border group \n# of \"USStatesBG\"\n####\n\n# Load example data from package.\nutils::data(statePop2010,envir=environment())\n\npanelDesc05 <- data.frame( \n type=c(\"map\",\"id\",\"segbar\",\"normbar\"), \n lab1=c(\"\",\"\",\"Stacked Bar\",\"Normalized Stacked Bar\"), \n lab2=c(\"\",\"\",\"Counts\",\"Percent\"), \n col1=c(NA,NA,\"Hisp\",\"Hisp\"), \n col2=c(NA,NA,\"OtherWBH\",\"OtherWBH\")\t\t \n )\nExTitle <- c(\"Ex05-Stkd Norm Bars: 2010 Census Pop by Race, Sorted by Cnt Other Race\",\n \"Cat-L to R: Hispanic, non-Hisp White, Black, Other-sn-varbar\")\n\ngrDevices::pdf(file=paste0(TDir,\"Ex05-US Stkd-Norm Bar-var-height.pdf\"),\n width=7.5,height=10)\n\nmicromapST(statePop2010, panelDesc05, sortVar=\"OtherWBH\", ascend=FALSE,\n title= ExTitle,\n details=list(SNBar.varht=TRUE), axisScale=\"sn\" ) \n \nx <- grDevices::dev.off()\n#\n### End Example 05\n\n## Not run: \n##D ###\n##D #\n##D # micromapST - Example 06 - horizontal stacked (segmented) bars\n##D # segbar plots the input data, normbar plots percent of total\n##D # package computes the percents from input data\n##D # input for the categories for each state must be in consecutive \n##D # columns of the input data.frame using the default border group\n##D # of \"USStatesBG\".\n##D #\n##D # Turning off the variable bar height and the midpoint dot features\n##D # in the horizontal stacked bars (segmented)\n##D #\n##D # USES data loaded for Example 05 above - statePop2010.\n##D #\n##D ###\n##D \n##D # Reuse data loaded for Example 5 above.\n##D \n##D panelDesc06= data.frame( \n##D type=c(\"map\",\"id\",\"segbar\",\"normbar\"), \n##D lab1=c(\"\",\"\",\"Stacked Bar\",\"Normalized Stacked Bar\"), \n##D lab2=c(\"\",\"\",\"Counts\",\"Percent\"), \n##D col1=c(NA,NA,\"Hisp\",\"Hisp\"), \n##D col2=c(NA,NA,\"OtherWBH\",\"OtherWBH\")\t\t \n##D )\n##D \n##D ExTitle <- c(\"Ex06-Stacked Norm Bars: 2010 Census Pop by Race, Sorted by Other Race\",\n##D \"Cat-L to R: Hisp, non-Hisp White, Black, Other,ID-diamond\")\n##D \n##D grDevices::pdf(file=paste0(TDir,\"Ex06-Stkd-Norm-Bar-fixedheight-nodot.pdf\"),\n##D width=7.5,height=10)\n##D \n##D micromapST(statePop2010,panelDesc06,sortVar=4,ascend=FALSE,\n##D title= ExTitle,\n##D details=list(SNBar.Middle.Dot=FALSE,SNBar.varht=FALSE,Id.Dot.pch=23)\n##D ) \n##D x <- grDevices::dev.off()\n##D #\n##D ### End Example 06\n## End(Not run)\n\n###\n#\n# micromapST - Example 07 - centered (diverging) stacked bars\n#\n# National 8th grade Math Proficiency NAEP Test Scores Data for 2011\n# source: National Center for Education Statistics, \n# http://nces.ed.gov/nationsreportcard/naepdata/\n# bar segment values - % in each of 4 categories: \n# % < Basic, % at Basic, % Proficient, % Advanced\n# using the default border group of \"USStatesBG\".\n####\n\n# Load example data from package.\nutils::data(Educ8thData,envir=environment()) \n\n# columns = State abbrev, State name, Avg Score, %s \\21, NA, indx2)\nifelse(is.na(indx1), indx2, # none after, take before\n ifelse(is.na(indx2), indx1, #none before\n ifelse(abs(data2$lab.dt[indx2]- data1$entry.dt) <\n abs(data2$lab.dt[indx1]- data1$entry.dt), indx2, indx1)))\n\n\n"} {"package":"survival","topic":"nsk","snippet":"### Name: nsk\n### Title: Natural splines with knot heights as the basis.\n### Aliases: nsk\n### Keywords: smooth\n\n### ** Examples\n\n# make some dummy data\ntdata <- data.frame(x= lung$age, y = 10*log(lung$age-35) + rnorm(228, 0, 2))\nfit1 <- lm(y ~ -1 + nsk(x, df=4, intercept=TRUE) , data=tdata)\nfit2 <- lm(y ~ nsk(x, df=3), data=tdata)\n\n# the knots (same for both fits)\nknots <- unlist(attributes(fit1$model[[2]])[c('Boundary.knots', 'knots')])\nsort(unname(knots))\nunname(coef(fit1)) # predictions at the knot points\n\nunname(coef(fit1)[-1] - coef(fit1)[1]) # differences: yhat[2:4] - yhat[1]\nunname(coef(fit2))[-1] # ditto\n\n## Not run: \n##D plot(y ~ x, data=tdata)\n##D points(sort(knots), coef(fit1), col=2, pch=19)\n##D coef(fit)[1] + c(0, coef(fit)[-1])\n## End(Not run)\n\n\n"} {"package":"survival","topic":"nwtco","snippet":"### Name: nwtco\n### Title: Data from the National Wilm's Tumor Study\n### Aliases: nwtco\n### Keywords: datasets\n\n### ** Examples\n\nwith(nwtco, table(instit,histol))\nanova(coxph(Surv(edrel,rel)~histol+instit,data=nwtco))\nanova(coxph(Surv(edrel,rel)~instit+histol,data=nwtco))\n\n\n"} {"package":"survival","topic":"pbcseq","snippet":"### Name: pbcseq\n### Title: Mayo Clinic Primary Biliary Cirrhosis, sequential data\n### Aliases: pbcseq\n### Keywords: datasets\n\n### ** Examples\n\n# Create the start-stop-event triplet needed for coxph\nfirst <- with(pbcseq, c(TRUE, diff(id) !=0)) #first id for each subject\nlast <- c(first[-1], TRUE) #last id\n\ntime1 <- with(pbcseq, ifelse(first, 0, day))\ntime2 <- with(pbcseq, ifelse(last, futime, c(day[-1], 0)))\nevent <- with(pbcseq, ifelse(last, status, 0))\n\nfit1 <- coxph(Surv(time1, time2, event) ~ age + sex + log(bili), pbcseq)\n\n\n"} {"package":"survival","topic":"plot.cox.zph","snippet":"### Name: plot.cox.zph\n### Title: Graphical Test of Proportional Hazards\n### Aliases: plot.cox.zph\n### Keywords: survival\n\n### ** Examples\n\nvfit <- coxph(Surv(time,status) ~ trt + factor(celltype) + \n karno + age, data=veteran, x=TRUE) \ntemp <- cox.zph(vfit) \nplot(temp, var=3) # Look at Karnofsy score, old way of doing plot \nplot(temp[3]) # New way with subscripting \nabline(0, 0, lty=3) \n# Add the linear fit as well \nabline(lm(temp$y[,3] ~ temp$x)$coefficients, lty=4, col=3) \ntitle(main=\"VA Lung Study\") \n\n\n"} {"package":"survival","topic":"plot.survfit","snippet":"### Name: plot.survfit\n### Title: Plot method for 'survfit' objects\n### Aliases: plot.survfit\n### Keywords: survival hplot\n\n### ** Examples\n\nleukemia.surv <- survfit(Surv(time, status) ~ x, data = aml) \nplot(leukemia.surv, lty = 2:3) \nlegend(100, .9, c(\"Maintenance\", \"No Maintenance\"), lty = 2:3) \ntitle(\"Kaplan-Meier Curves\\nfor AML Maintenance Study\") \nlsurv2 <- survfit(Surv(time, status) ~ x, aml, type='fleming') \nplot(lsurv2, lty=2:3, fun=\"cumhaz\", \n\txlab=\"Months\", ylab=\"Cumulative Hazard\") \n\n\n"} {"package":"survival","topic":"predict.coxph","snippet":"### Name: predict.coxph\n### Title: Predictions for a Cox model\n### Aliases: predict.coxph predict.coxph.penal\n### Keywords: survival\n\n### ** Examples\n\noptions(na.action=na.exclude) # retain NA in predictions\nfit <- coxph(Surv(time, status) ~ age + ph.ecog + strata(inst), lung)\n#lung data set has status coded as 1/2\nmresid <- (lung$status-1) - predict(fit, type='expected') #Martingale resid \npredict(fit,type=\"lp\")\npredict(fit,type=\"expected\")\npredict(fit,type=\"risk\",se.fit=TRUE)\npredict(fit,type=\"terms\",se.fit=TRUE)\n\n# For someone who demands reference='zero'\npzero <- function(fit)\n predict(fit, reference=\"sample\") + sum(coef(fit) * fit$means, na.rm=TRUE)\n\n\n"} {"package":"survival","topic":"predict.survreg","snippet":"### Name: predict.survreg\n### Title: Predicted Values for a 'survreg' Object\n### Aliases: predict.survreg predict.survreg.penal\n### Keywords: survival\n\n### ** Examples\n\n# Draw figure 1 from Escobar and Meeker, 1992.\nfit <- survreg(Surv(time,status) ~ age + I(age^2), data=stanford2, \n\tdist='lognormal') \nwith(stanford2, plot(age, time, xlab='Age', ylab='Days', \n\txlim=c(0,65), ylim=c(.1, 10^5), log='y', type='n'))\nwith(stanford2, points(age, time, pch=c(2,4)[status+1], cex=.7))\npred <- predict(fit, newdata=list(age=1:65), type='quantile', \n\t p=c(.1, .5, .9)) \nmatlines(1:65, pred, lty=c(2,1,2), col=1) \n\n# Predicted Weibull survival curve for a lung cancer subject with\n# ECOG score of 2\nlfit <- survreg(Surv(time, status) ~ ph.ecog, data=lung)\npct <- 1:98/100 # The 100th percentile of predicted survival is at +infinity\nptime <- predict(lfit, newdata=data.frame(ph.ecog=2), type='quantile',\n p=pct, se=TRUE)\nmatplot(cbind(ptime$fit, ptime$fit + 2*ptime$se.fit,\n ptime$fit - 2*ptime$se.fit)/30.5, 1-pct,\n xlab=\"Months\", ylab=\"Survival\", type='l', lty=c(1,2,2), col=1)\n\n\n"} {"package":"survival","topic":"pseudo","snippet":"### Name: pseudo\n### Title: Pseudo values for survival.\n### Aliases: pseudo\n### Keywords: survival\n\n### ** Examples\n\nfit1 <- survfit(Surv(time, status) ~ 1, data=lung)\nyhat <- pseudo(fit1, times=c(365, 730))\ndim(yhat)\nlfit <- lm(yhat[,1] ~ ph.ecog + age + sex, data=lung)\n\n# Restricted Mean Time in State (RMST) \nrms <- pseudo(fit1, times= 730, type='RMST') # 2 years\nrfit <- lm(rms ~ ph.ecog + sex, data=lung)\nrhat <- predict(rfit, newdata=expand.grid(ph.ecog=0:3, sex=1:2), se.fit=TRUE)\n# print it out nicely\ntemp1 <- cbind(matrix(rhat$fit, 4,2))\ntemp2 <- cbind(matrix(rhat$se.fit, 4, 2))\ntemp3 <- cbind(temp1[,1], temp2[,1], temp1[,2], temp2[,2])\ndimnames(temp3) <- list(paste(\"ph.ecog\", 0:3), \n c(\"Male RMST\", \"(se)\", \"Female RMST\", \"(se)\"))\n\nround(temp3, 1)\n# compare this to the fully non-parametric estimate\nfit2 <- survfit(Surv(time, status) ~ ph.ecog, data=lung)\nprint(fit2, rmean=730)\n# the estimate for ph.ecog=3 is very unstable (n=1), pseudovalues smooth it.\n#\n# In all the above we should be using the robust variance, e.g., svyglm, but\n# a recommended package can't depend on external libraries.\n# See the vignette for a more complete exposition.\n\n\n"} {"package":"survival","topic":"pspline","snippet":"### Name: pspline\n### Title: Smoothing splines using a pspline basis\n### Aliases: pspline psplineinverse\n### Keywords: survival\n\n### ** Examples\n\nlfit6 <- survreg(Surv(time, status)~pspline(age, df=2), lung)\nplot(lung$age, predict(lfit6), xlab='Age', ylab=\"Spline prediction\")\ntitle(\"Cancer Data\")\nfit0 <- coxph(Surv(time, status) ~ ph.ecog + age, lung)\nfit1 <- coxph(Surv(time, status) ~ ph.ecog + pspline(age,3), lung)\nfit3 <- coxph(Surv(time, status) ~ ph.ecog + pspline(age,8), lung)\nfit0\nfit1\nfit3\n\n\n"} {"package":"survival","topic":"pyears","snippet":"### Name: pyears\n### Title: Person Years\n### Aliases: pyears\n### Keywords: survival\n\n### ** Examples\n\n# Look at progression rates jointly by calendar date and age\n# \ntemp.yr <- tcut(mgus$dxyr, 55:92, labels=as.character(55:91)) \ntemp.age <- tcut(mgus$age, 34:101, labels=as.character(34:100))\nptime <- ifelse(is.na(mgus$pctime), mgus$futime, mgus$pctime)\npstat <- ifelse(is.na(mgus$pctime), 0, 1)\npfit <- pyears(Surv(ptime/365.25, pstat) ~ temp.yr + temp.age + sex, mgus,\n data.frame=TRUE) \n# Turn the factor back into numerics for regression\ntdata <- pfit$data\ntdata$age <- as.numeric(as.character(tdata$temp.age))\ntdata$year<- as.numeric(as.character(tdata$temp.yr))\nfit1 <- glm(event ~ year + age+ sex +offset(log(pyears)),\n data=tdata, family=poisson)\n## Not run: \n##D # fit a gam model \n##D gfit.m <- gam(y ~ s(age) + s(year) + offset(log(time)), \n##D family = poisson, data = tdata) \n## End(Not run)\n\n# Example #2 Create the hearta data frame: \nhearta <- by(heart, heart$id, \n function(x)x[x$stop == max(x$stop),]) \nhearta <- do.call(\"rbind\", hearta) \n# Produce pyears table of death rates on the surgical arm\n# The first is by age at randomization, the second by current age\nfit1 <- pyears(Surv(stop/365.25, event) ~ cut(age + 48, c(0,50,60,70,100)) + \n surgery, data = hearta, scale = 1)\nfit2 <- pyears(Surv(stop/365.25, event) ~ tcut(age + 48, c(0,50,60,70,100)) + \n surgery, data = hearta, scale = 1)\nfit1$event/fit1$pyears #death rates on the surgery and non-surg arm\n\nfit2$event/fit2$pyears #death rates on the surgery and non-surg arm\n\n\n"} {"package":"survival","topic":"quantile.survfit","snippet":"### Name: quantile.survfit\n### Title: Quantiles from a survfit object\n### Aliases: quantile.survfit quantile.survfitms median.survfit\n### Keywords: survival\n\n### ** Examples\n\nfit <- survfit(Surv(time, status) ~ ph.ecog, data=lung)\nquantile(fit)\n\ncfit <- coxph(Surv(time, status) ~ age + strata(ph.ecog), data=lung)\ncsurv<- survfit(cfit, newdata=data.frame(age=c(40, 60, 80)),\n conf.type =\"none\")\ntemp <- quantile(csurv, 1:5/10)\ntemp[2,3,] # quantiles for second level of ph.ecog, age=80\nquantile(csurv[2,3], 1:5/10) # quantiles of a single curve, same result\n\n\n"} {"package":"survival","topic":"reliability","snippet":"### Name: reliability\n### Title: Reliability data sets\n### Aliases: reliability capacitor cracks genfan ifluid imotor turbine\n### valveSeat\n### Keywords: datasets\n\n### ** Examples\n\nsurvreg(Surv(time, status) ~ temperature + voltage, capacitor)\n\n\n"} {"package":"survival","topic":"residuals.coxph.penal","snippet":"### Name: residuals.coxph\n### Title: Calculate Residuals for a 'coxph' Fit\n### Aliases: residuals.coxph.penal residuals.coxph.null residuals.coxph\n### residuals.coxphms\n### Keywords: survival\n\n### ** Examples\n\n\n fit <- coxph(Surv(start, stop, event) ~ (age + surgery)* transplant,\n data=heart)\n mresid <- resid(fit, collapse=heart$id)\n\n\n"} {"package":"survival","topic":"residuals.survfit","snippet":"### Name: residuals.survfit\n### Title: IJ residuals from a survfit object.\n### Aliases: residuals.survfit\n\n### ** Examples\n\nfit <- survfit(Surv(time, status) ~ x, aml)\nresid(fit, times=c(24, 48), type=\"RMTS\")\n\n\n"} {"package":"survival","topic":"residuals.survreg","snippet":"### Name: residuals.survreg\n### Title: Compute Residuals for 'survreg' Objects\n### Aliases: residuals.survreg residuals.survreg.penal\n### Keywords: survival\n\n### ** Examples\n\nfit <- survreg(Surv(futime, death) ~ age + sex, mgus2)\nsummary(fit) # age and sex are both important\n\nrr <- residuals(fit, type='matrix')\nsum(rr[,1]) - with(mgus2, sum(log(futime[death==1]))) # loglik\n\nplot(mgus2$age, rr[,2], col= (1+mgus2$death)) # ldresp\n\n\n"} {"package":"survival","topic":"retinopathy","snippet":"### Name: retinopathy\n### Title: Diabetic Retinopathy\n### Aliases: retinopathy\n### Keywords: datasets\n\n### ** Examples\n\ncoxph(Surv(futime, status) ~ type + trt, cluster= id, retinopathy)\n\n\n"} {"package":"survival","topic":"rhDNase","snippet":"### Name: rhDNase\n### Title: rhDNASE data set\n### Aliases: rhDNase\n### Keywords: datasets\n\n### ** Examples\n\n# Build the start-stop data set for analysis, and\n# replicate line 2 of table 8.13 in the book\nfirst <- subset(rhDNase, !duplicated(id)) #first row for each subject\ndnase <- tmerge(first, first, id=id, tstop=as.numeric(end.dt -entry.dt))\n\n# Subjects whose fu ended during the 6 day window are the reason for\n# this next line\ntemp.end <- with(rhDNase, pmin(ivstop+6, end.dt-entry.dt))\ndnase <- tmerge(dnase, rhDNase, id=id,\n infect=event(ivstart),\n end= event(temp.end))\n# toss out the non-at-risk intervals, and extra variables\n# 3 subjects had an event on their last day of fu, infect=1 and end=1\ndnase <- subset(dnase, (infect==1 | end==0), c(id:trt, fev:infect))\nagfit <- coxph(Surv(tstart, tstop, infect) ~ trt + fev, cluster=id,\n data=dnase)\n\n\n"} {"package":"survival","topic":"ridge","snippet":"### Name: ridge\n### Title: Ridge regression\n### Aliases: ridge\n### Keywords: survival\n\n### ** Examples\n\n\ncoxph(Surv(futime, fustat) ~ rx + ridge(age, ecog.ps, theta=1),\n\t ovarian)\n\nlfit0 <- survreg(Surv(time, status) ~1, lung)\nlfit1 <- survreg(Surv(time, status) ~ age + ridge(ph.ecog, theta=5), lung)\nlfit2 <- survreg(Surv(time, status) ~ sex + ridge(age, ph.ecog, theta=1), lung)\nlfit3 <- survreg(Surv(time, status) ~ sex + age + ph.ecog, lung)\n\n\n\n"} {"package":"survival","topic":"rotterdam","snippet":"### Name: rotterdam\n### Title: Breast cancer data set used in Royston and Altman (2013)\n### Aliases: rotterdam\n### Keywords: datasets survival\n\n### ** Examples\n\n# liberal definition of rfs (count later deaths)\nrfs <- pmax(rotterdam$recur, rotterdam$death)\nrfstime <- with(rotterdam, ifelse(recur==1, rtime, dtime))\nfit1 <- coxph(Surv(rfstime, rfs) ~ pspline(age) + meno + size + \n pspline(nodes) + er, data = rotterdam)\n\n# conservative (no deaths after last fu for recurrence)\nignore <- with(rotterdam, recur ==0 & death==1 & rtime < dtime)\ntable(ignore)\nrfs2 <- with(rotterdam, ifelse(recur==1 | ignore, recur, death))\nrfstime2 <- with(rotterdam, ifelse(recur==1 | ignore, rtime, dtime))\nfit2 <- coxph(Surv(rfstime2, rfs2) ~ pspline(age) + meno + size + \n pspline(nodes) + er, data = rotterdam)\n\n# Note: Both age and nodes show non-linear effects.\n# Royston and Altman used fractional polynomials for the nonlinear terms\n\n\n"} {"package":"survival","topic":"royston","snippet":"### Name: royston\n### Title: Compute Royston's D for a Cox model\n### Aliases: royston\n### Keywords: survival\n\n### ** Examples\n\n# An example used in Royston and Sauerbrei\npbc2 <- na.omit(pbc) # no missing values\ncfit <- coxph(Surv(time, status==2) ~ age + log(bili) + edema + albumin +\n stage + copper, data=pbc2, ties=\"breslow\")\nroyston(cfit)\n\n\n"} {"package":"survival","topic":"rttright","snippet":"### Name: rttright\n### Title: Compute redistribute-to-the-right weights\n### Aliases: rttright\n### Keywords: survival\n\n### ** Examples\n\nafit <- survfit(Surv(time, status) ~1, data=aml)\nrwt <- rttright(Surv(time, status) ~1, data=aml)\n\n# Reproduce a Kaplan-Meier\nindex <- order(aml$time)\ncdf <- cumsum(rwt[index]) # weighted CDF\ncdf <- cdf[!duplicated(aml$time[index], fromLast=TRUE)] # remove duplicate times\ncbind(time=afit$time, KM= afit$surv, RTTR= 1-cdf)\n\n# Hormonal patients have a diffent censoring pattern\nwt2 <- rttright(Surv(dtime, death) ~ hormon, rotterdam, times= 365*c(3, 5))\ndim(wt2)\n\n\n"} {"package":"survival","topic":"solder","snippet":"### Name: solder\n### Title: Data from a soldering experiment\n### Aliases: solder\n### Keywords: datasets\n\n### ** Examples\n\n# The balanced subset used by Chambers and Hastie\n# contains the first 180 of each mask and deletes mask A6. \nindex <- 1 + (1:nrow(solder)) - match(solder$Mask, solder$Mask)\nsolder.balance <- droplevels(subset(solder, Mask != \"A6\" & index <= 180))\n\n\n"} {"package":"survival","topic":"statefig","snippet":"### Name: statefig\n### Title: Draw a state space figure.\n### Aliases: statefig\n### Keywords: survival hplot\n\n### ** Examples\n\n# Draw a simple competing risks figure\nstates <- c(\"Entry\", \"Complete response\", \"Relapse\", \"Death\")\nconnect <- matrix(0, 4, 4, dimnames=list(states, states))\nconnect[1, -1] <- c(1.1, 1, 0.9)\nstatefig(c(1, 3), connect)\n\n\n"} {"package":"survival","topic":"strata","snippet":"### Name: strata\n### Title: Identify Stratification Variables\n### Aliases: strata\n### Keywords: survival\n\n### ** Examples\n\na <- factor(rep(1:3,4), labels=c(\"low\", \"medium\", \"high\"))\nb <- factor(rep(1:4,3))\nlevels(strata(b))\nlevels(strata(a,b,shortlabel=TRUE))\n\ncoxph(Surv(futime, fustat) ~ age + strata(rx), data=ovarian) \n\n\n"} {"package":"survival","topic":"summary.aareg","snippet":"### Name: summary.aareg\n### Title: Summarize an aareg fit\n### Aliases: summary.aareg\n### Keywords: survival\n\n### ** Examples\n\nafit <- aareg(Surv(time, status) ~ age + sex + ph.ecog, data=lung,\n dfbeta=TRUE)\nsummary(afit)\n## Not run: \n##D slope test se(test) robust se z p \n##D Intercept 5.05e-03 1.9 1.54 1.55 1.23 0.219000\n##D age 4.01e-05 108.0 109.00 106.00 1.02 0.307000\n##D sex -3.16e-03 -19.5 5.90 5.95 -3.28 0.001030\n##D ph.ecog 3.01e-03 33.2 9.18 9.17 3.62 0.000299\n##D \n##D Chisq=22.84 on 3 df, p=4.4e-05; test weights=aalen\n## End(Not run)\n\nsummary(afit, maxtime=600)\n## Not run: \n##D slope test se(test) robust se z p \n##D Intercept 4.16e-03 2.13 1.48 1.47 1.450 0.146000\n##D age 2.82e-05 85.80 106.00 100.00 0.857 0.392000\n##D sex -2.54e-03 -20.60 5.61 5.63 -3.660 0.000256\n##D ph.ecog 2.47e-03 31.60 8.91 8.67 3.640 0.000271\n##D \n##D Chisq=27.08 on 3 df, p=5.7e-06; test weights=aalen\n## End(Not run)\n\n"} {"package":"survival","topic":"summary.coxph","snippet":"### Name: summary.coxph\n### Title: Summary method for Cox models\n### Aliases: summary.coxph\n### Keywords: survival\n\n### ** Examples\n\nfit <- coxph(Surv(time, status) ~ age + sex, lung) \nsummary(fit)\n\n\n"} {"package":"survival","topic":"summary.survfit","snippet":"### Name: summary.survfit\n### Title: Summary of a Survival Curve\n### Aliases: summary.survfit\n### Keywords: survival\n\n### ** Examples\n\nsummary( survfit( Surv(futime, fustat)~1, data=ovarian))\nsummary( survfit( Surv(futime, fustat)~rx, data=ovarian))\n\n\n"} {"package":"survival","topic":"survSplit","snippet":"### Name: survSplit\n### Title: Split a survival data set at specified times\n### Aliases: survSplit\n### Keywords: survival utilities\n\n### ** Examples\n\nfit1 <- coxph(Surv(time, status) ~ karno + age + trt, veteran)\nplot(cox.zph(fit1)[1])\n# a cox.zph plot of the data suggests that the effect of Karnofsky score\n# begins to diminish by 60 days and has faded away by 120 days.\n# Fit a model with separate coefficients for the three intervals.\n#\nvet2 <- survSplit(Surv(time, status) ~., veteran,\n cut=c(60, 120), episode =\"timegroup\")\nfit2 <- coxph(Surv(tstart, time, status) ~ karno* strata(timegroup) +\n age + trt, data= vet2)\nc(overall= coef(fit1)[1],\n t0_60 = coef(fit2)[1],\n t60_120= sum(coef(fit2)[c(1,4)]),\n t120 = sum(coef(fit2)[c(1,5)]))\n\n\n"} {"package":"survival","topic":"survcondense","snippet":"### Name: survcondense\n### Title: Shorten a (time1, time2) survival dataset\n### Aliases: survcondense\n### Keywords: survival\n\n### ** Examples\n\ndim(aml)\ntest1 <- survSplit(Surv(time, status) ~ ., data=aml, \n cut=c(10, 20, 30), id=\"newid\")\ndim(test1)\n\n# remove the added rows\ntest2 <- survcondense(Surv(tstart, time, status) ~ x, test1, id=newid)\ndim(test2)\n\n\n"} {"package":"survival","topic":"survdiff","snippet":"### Name: survdiff\n### Title: Test Survival Curve Differences\n### Aliases: survdiff print.survdiff\n### Keywords: survival\n\n### ** Examples\n\n## Two-sample test\nsurvdiff(Surv(futime, fustat) ~ rx,data=ovarian)\n\n## Stratified 7-sample test\n\nsurvdiff(Surv(time, status) ~ pat.karno + strata(inst), data=lung)\n\n## Expected survival for heart transplant patients based on\n## US mortality tables\nexpect <- survexp(futime ~ 1, data=jasa, cohort=FALSE,\n rmap= list(age=(accept.dt - birth.dt), sex=1, year=accept.dt),\n ratetable=survexp.us)\n## actual survival is much worse (no surprise)\nsurvdiff(Surv(jasa$futime, jasa$fustat) ~ offset(expect))\n\n# The free light chain data set is close to the population.\ne2 <- survexp(futime ~ 1, data=flchain, cohort=FALSE,\n rmap= list(age= age*365.25, sex=sex, \n year=as.Date(paste0(sample.yr, \"-07-01\"))),\n ratetable= survexp.mn)\nsurvdiff(Surv(futime, death) ~ offset(e2), flchain)\n\n\n"} {"package":"survival","topic":"survexp","snippet":"### Name: survexp\n### Title: Compute Expected Survival\n### Aliases: survexp print.survexp\n### Keywords: survival\n\n### ** Examples\n\n# \n# Stanford heart transplant data\n# We don't have sex in the data set, but know it to be nearly all males.\n# Estimate of conditional survival \nfit1 <- survexp(futime ~ 1, rmap=list(sex=\"male\", year=accept.dt, \n age=(accept.dt-birth.dt)), method='conditional', data=jasa)\nsummary(fit1, times=1:10*182.5, scale=365) #expected survival by 1/2 years\n\n# Estimate of expected survival stratified by prior surgery \nsurvexp(~ surgery, rmap= list(sex=\"male\", year=accept.dt, \n\tage=(accept.dt-birth.dt)), method='ederer', data=jasa,\n times=1:10 * 182.5) \n\n## Compare the survival curves for the Mayo PBC data to Cox model fit\n## \npfit <-coxph(Surv(time,status>0) ~ trt + log(bili) + log(protime) + age +\n platelet, data=pbc)\nplot(survfit(Surv(time, status>0) ~ trt, data=pbc), mark.time=FALSE)\nlines(survexp( ~ trt, ratetable=pfit, data=pbc), col='purple')\n\n\n"} {"package":"survival","topic":"survexp.us","snippet":"### Name: ratetables\n### Title: Census Data Sets for the Expected Survival and Person Years\n### Functions\n### Aliases: survexp.us survexp.usr survexp.mn\n### Keywords: survival datasets\n\n### ** Examples\n\nsurvexp.uswhite <- survexp.usr[,,\"white\",]\n\n\n"} {"package":"survival","topic":"survfit.formula","snippet":"### Name: survfit.formula\n### Title: Compute a Survival Curve for Censored Data\n### Aliases: survfit.formula [.survfit\n### Keywords: survival\n\n### ** Examples\n\n#fit a Kaplan-Meier and plot it \nfit <- survfit(Surv(time, status) ~ x, data = aml) \nplot(fit, lty = 2:3) \nlegend(100, .8, c(\"Maintained\", \"Nonmaintained\"), lty = 2:3) \n\n#fit a Cox proportional hazards model and plot the \n#predicted survival for a 60 year old \nfit <- coxph(Surv(futime, fustat) ~ age, data = ovarian) \nplot(survfit(fit, newdata=data.frame(age=60)),\n xscale=365.25, xlab = \"Years\", ylab=\"Survival\") \n\n# Here is the data set from Turnbull\n# There are no interval censored subjects, only left-censored (status=3),\n# right-censored (status 0) and observed events (status 1)\n#\n# Time\n# 1 2 3 4\n# Type of observation\n# death 12 6 2 3\n# losses 3 2 0 3\n# late entry 2 4 2 5\n#\ntdata <- data.frame(time =c(1,1,1,2,2,2,3,3,3,4,4,4),\n status=rep(c(1,0,2),4),\n n =c(12,3,2,6,2,4,2,0,2,3,3,5))\nfit <- survfit(Surv(time, time, status, type='interval') ~1, \n data=tdata, weight=n)\n\n#\n# Three curves for patients with monoclonal gammopathy.\n# 1. KM of time to PCM, ignoring death (statistically incorrect)\n# 2. Competing risk curves (also known as \"cumulative incidence\")\n# 3. Multi-state, showing Pr(in each state, at time t)\n#\nfitKM <- survfit(Surv(stop, event=='pcm') ~1, data=mgus1,\n subset=(start==0))\nfitCR <- survfit(Surv(stop, event) ~1,\n data=mgus1, subset=(start==0))\nfitMS <- survfit(Surv(start, stop, event) ~ 1, id=id, data=mgus1)\n## Not run: \n##D # CR curves show the competing risks\n##D plot(fitCR, xscale=365.25, xmax=7300, mark.time=FALSE,\n##D col=2:3, xlab=\"Years post diagnosis of MGUS\",\n##D ylab=\"P(state)\")\n##D lines(fitKM, fun='event', xmax=7300, mark.time=FALSE,\n##D conf.int=FALSE)\n##D text(3652, .4, \"Competing risk: death\", col=3)\n##D text(5840, .15,\"Competing risk: progression\", col=2)\n##D text(5480, .30,\"KM:prog\")\n## End(Not run)\n\n\n"} {"package":"survival","topic":"survfit.matrix","snippet":"### Name: survfit.matrix\n### Title: Create Aalen-Johansen estimates of multi-state survival from a\n### matrix of hazards.\n### Aliases: survfit.matrix\n### Keywords: survival\n\n### ** Examples\n\netime <- with(mgus2, ifelse(pstat==0, futime, ptime))\nevent <- with(mgus2, ifelse(pstat==0, 2*death, 1))\nevent <- factor(event, 0:2, labels=c(\"censor\", \"pcm\", \"death\"))\n\ncfit1 <- coxph(Surv(etime, event==\"pcm\") ~ age + sex, mgus2)\ncfit2 <- coxph(Surv(etime, event==\"death\") ~ age + sex, mgus2)\n\n# predicted competing risk curves for a 72 year old with mspike of 1.2\n# (median values), male and female.\n# The survfit call is a bit faster without standard errors.\nnewdata <- expand.grid(sex=c(\"F\", \"M\"), age=72, mspike=1.2)\n\nAJmat <- matrix(list(), 3,3)\nAJmat[1,2] <- list(survfit(cfit1, newdata, std.err=FALSE))\nAJmat[1,3] <- list(survfit(cfit2, newdata, std.err=FALSE))\ncsurv <- survfit(AJmat, p0 =c(entry=1, PCM=0, death=0))\n\n\n"} {"package":"survival","topic":"survobrien","snippet":"### Name: survobrien\n### Title: O'Brien's Test for Association of a Single Variable with\n### Survival\n### Aliases: survobrien\n### Keywords: survival\n\n### ** Examples\n\nxx <- survobrien(Surv(futime, fustat) ~ age + factor(rx) + I(ecog.ps), \n\t\t\t data=ovarian) \ncoxph(Surv(time, status) ~ age + strata(.strata.), data=xx) \n\n\n"} {"package":"survival","topic":"survreg","snippet":"### Name: survreg\n### Title: Regression for a Parametric Survival Model\n### Aliases: survreg model.frame.survreg labels.survreg print.survreg.penal\n### print.summary.survreg survReg anova.survreg anova.survreglist\n### Keywords: survival\n\n### ** Examples\n\n# Fit an exponential model: the two fits are the same\nsurvreg(Surv(futime, fustat) ~ ecog.ps + rx, ovarian, dist='weibull',\n scale=1)\nsurvreg(Surv(futime, fustat) ~ ecog.ps + rx, ovarian,\n dist=\"exponential\")\n\n#\n# A model with different baseline survival shapes for two groups, i.e.,\n# two different scale parameters\nsurvreg(Surv(time, status) ~ ph.ecog + age + strata(sex), lung)\n\n# There are multiple ways to parameterize a Weibull distribution. The survreg \n# function embeds it in a general location-scale family, which is a \n# different parameterization than the rweibull function, and often leads\n# to confusion.\n# survreg's scale = 1/(rweibull shape)\n# survreg's intercept = log(rweibull scale)\n# For the log-likelihood all parameterizations lead to the same value.\ny <- rweibull(1000, shape=2, scale=5)\nsurvreg(Surv(y)~1, dist=\"weibull\")\n\n# Economists fit a model called `tobit regression', which is a standard\n# linear regression with Gaussian errors, and left censored data.\ntobinfit <- survreg(Surv(durable, durable>0, type='left') ~ age + quant,\n\t data=tobin, dist='gaussian')\n\n\n"} {"package":"survival","topic":"survreg.distributions","snippet":"### Name: survreg.distributions\n### Title: Parametric Survival Distributions\n### Aliases: survreg.distributions\n### Keywords: survival\n\n### ** Examples\n\n# time transformation\nsurvreg(Surv(time, status) ~ ph.ecog + sex, dist='weibull', data=lung)\n# change the transformation to work in years\n# intercept changes by log(365), everything else stays the same\nmy.weibull <- survreg.distributions$weibull\nmy.weibull$trans <- function(y) log(y/365)\nmy.weibull$itrans <- function(y) 365*exp(y)\nsurvreg(Surv(time, status) ~ ph.ecog + sex, lung, dist=my.weibull)\n\n# Weibull parametrisation\ny<-rweibull(1000, shape=2, scale=5)\nsurvreg(Surv(y)~1, dist=\"weibull\")\n# survreg scale parameter maps to 1/shape, linear predictor to log(scale)\n\n# Cauchy fit\nmycauchy <- list(name='Cauchy',\n init= function(x, weights, ...) \n c(median(x), mad(x)),\n density= function(x, parms) {\n temp <- 1/(1 + x^2)\n cbind(.5 + atan(x)/pi, .5+ atan(-x)/pi,\n temp/pi, -2 *x*temp, 2*temp*(4*x^2*temp -1))\n },\n quantile= function(p, parms) tan((p-.5)*pi),\n deviance= function(...) stop('deviance residuals not defined')\n )\nsurvreg(Surv(log(time), status) ~ ph.ecog + sex, lung, dist=mycauchy)\n\n\n"} {"package":"survival","topic":"survregDtest","snippet":"### Name: survregDtest\n### Title: Verify a survreg distribution\n### Aliases: survregDtest\n### Keywords: survival\n\n### ** Examples\n\n# An invalid distribution (it should have \"init =\" on line 2)\n# surveg would give an error message\nmycauchy <- list(name='Cauchy',\n init<- function(x, weights, ...) \n c(median(x), mad(x)),\n density= function(x, parms) {\n temp <- 1/(1 + x^2)\n cbind(.5 + atan(temp)/pi, .5+ atan(-temp)/pi,\n temp/pi, -2 *x*temp, 2*temp^2*(4*x^2*temp -1))\n },\n quantile= function(p, parms) tan((p-.5)*pi),\n deviance= function(...) stop('deviance residuals not defined')\n )\n\nsurvregDtest(mycauchy, TRUE)\n\n\n"} {"package":"survival","topic":"tcut","snippet":"### Name: tcut\n### Title: Factors for person-year calculations\n### Aliases: tcut [.tcut levels.tcut\n### Keywords: survival\n\n### ** Examples\n\n# For pyears, all time variable need to be on the same scale; but\n# futime is in months and age is in years\ntest <- mgus2\ntest$years <- test$futime/30.5 # follow-up in years\n\n# first grouping based on years from starting age (= current age)\n# second based on years since enrollment (all start at 0)\ntest$agegrp <- tcut(test$age, c(0,60, 70, 80, 100), \n c(\"<=60\", \"60-70\", \"70-80\", \">80\"))\ntest$fgrp <- tcut(rep(0, nrow(test)), c(0, 1, 5, 10, 100),\n c(\"0-1yr\", \"1-5yr\", \"5-10yr\", \">10yr\"))\n\n# death rates per 1000, by age group\npfit1 <- pyears(Surv(years, death) ~ agegrp, scale =1000, data=test)\nround(pfit1$event/ pfit1$pyears) \n\n#death rates per 100, by follow-up year and age\n# there are excess deaths in the first year, within each age stratum\npfit2 <- pyears(Surv(years, death) ~ fgrp + agegrp, scale =1000, data=test)\nround(pfit2$event/ pfit2$pyears) \n\n\n"} {"package":"survival","topic":"tmerge","snippet":"### Name: tmerge\n### Title: Time based merge for survival data\n### Aliases: tmerge\n### Keywords: survival\n\n### ** Examples\n\n# The pbc data set contains baseline data and follow-up status\n# for a set of subjects with primary biliary cirrhosis, while the\n# pbcseq data set contains repeated laboratory values for those\n# subjects. \n# The first data set contains data on 312 subjects in a clinical trial plus\n# 106 that agreed to be followed off protocol, the second data set has data\n# only on the trial subjects.\ntemp <- subset(pbc, id <= 312, select=c(id:sex, stage)) # baseline data\npbc2 <- tmerge(temp, temp, id=id, endpt = event(time, status))\npbc2 <- tmerge(pbc2, pbcseq, id=id, ascites = tdc(day, ascites),\n bili = tdc(day, bili), albumin = tdc(day, albumin),\n protime = tdc(day, protime), alk.phos = tdc(day, alk.phos))\n\nfit <- coxph(Surv(tstart, tstop, endpt==2) ~ protime + log(bili), data=pbc2)\n\n\n"} {"package":"survival","topic":"tobin","snippet":"### Name: tobin\n### Title: Tobin's Tobit data\n### Aliases: tobin\n### Keywords: datasets\n\n### ** Examples\n\ntfit <- survreg(Surv(durable, durable>0, type='left') ~age + quant,\n data=tobin, dist='gaussian')\n\npredict(tfit,type=\"response\")\n\n\n\n"} {"package":"survival","topic":"transplant","snippet":"### Name: transplant\n### Title: Liver transplant waiting list\n### Aliases: transplant\n### Keywords: datasets\n\n### ** Examples\n\n#since event is a factor, survfit creates competing risk curves\npfit <- survfit(Surv(futime, event) ~ abo, transplant)\npfit[,2] #time to liver transplant, by blood type\nplot(pfit[,2], mark.time=FALSE, col=1:4, lwd=2, xmax=735,\n xscale=30.5, xlab=\"Months\", ylab=\"Fraction transplanted\",\n xaxt = 'n')\ntemp <- c(0, 6, 12, 18, 24)\naxis(1, temp*30.5, temp)\nlegend(450, .35, levels(transplant$abo), lty=1, col=1:4, lwd=2)\n\n# competing risks for type O\nplot(pfit[4,], xscale=30.5, xmax=735, col=1:3, lwd=2)\nlegend(450, .4, c(\"Death\", \"Transpant\", \"Withdrawal\"), col=1:3, lwd=2)\n\n\n"} {"package":"survival","topic":"udca","snippet":"### Name: udca\n### Title: Data from a trial of usrodeoxycholic acid\n### Aliases: udca udca1 udca2\n### Keywords: datasets\n\n### ** Examples\n\n# values found in table 8.3 of the book\nfit1 <- coxph(Surv(futime, status) ~ trt + log(bili) + stage,\n cluster =id , data=udca1)\nfit2 <- coxph(Surv(futime, status) ~ trt + log(bili) + stage +\n strata(endpoint), cluster=id, data=udca2)\n\n\n\n"} {"package":"survival","topic":"untangle.specials","snippet":"### Name: untangle.specials\n### Title: Help Process the 'specials' Argument of the 'terms' Function.\n### Aliases: untangle.specials\n### Keywords: survival\n\n### ** Examples\n\nformula <- Surv(tt,ss) ~ x + z*strata(id)\ntms <- terms(formula, specials=\"strata\")\n## the specials attribute\nattr(tms, \"specials\")\n## main effects \nuntangle.specials(tms, \"strata\")\n## and interactions\nuntangle.specials(tms, \"strata\", order=1:2)\n\n\n"} {"package":"survival","topic":"uspop2","snippet":"### Name: uspop2\n### Title: Projected US Population\n### Aliases: uspop2\n### Keywords: datasets\n\n### ** Examples\n\nus50 <- uspop2[51:101,, \"2000\"] #US 2000 population, 50 and over\nage <- as.integer(dimnames(us50)[[1]])\nsmat <- model.matrix( ~ factor(floor(age/5)) -1)\nustot <- t(smat) %*% us50 #totals by 5 year age groups\ntemp <- c(50,55, 60, 65, 70, 75, 80, 85, 90, 95)\ndimnames(ustot) <- list(c(paste(temp, temp+4, sep=\"-\"), \"100+\"),\n c(\"male\", \"female\"))\n\n\n"} {"package":"survival","topic":"xtfrm.Surv","snippet":"### Name: xtfrm.Surv\n### Title: Sorting order for Surv objects\n### Aliases: xtfrm.Surv sort.Surv order.Surv\n### Keywords: survival\n\n### ** Examples\n\ntest <- c(Surv(c(10, 9,9, 8,8,8,7,5,5,4), rep(1:0, 5)), Surv(6.2, NA))\ntest\nsort(test)\n\n\n"} {"package":"survival","topic":"yates","snippet":"### Name: yates\n### Title: Population prediction\n### Aliases: yates\n### Keywords: models survival\n\n### ** Examples\n\nfit1 <- lm(skips ~ Solder*Opening + Mask, data = solder)\nyates(fit1, ~Opening, population = \"factorial\")\n\nfit2 <- coxph(Surv(time, status) ~ factor(ph.ecog)*sex + age, lung)\nyates(fit2, ~ ph.ecog, predict=\"risk\") # hazard ratio\n\n\n"} {"package":"adaptDiag","topic":"binom_sample_size","snippet":"### Name: binom_sample_size\n### Title: Calculate the minimum number of samples required for a one-sided\n### exact binomial test\n### Aliases: binom_sample_size\n\n### ** Examples\n\n\n# The minimum number of reference positive cases required to demonstrate\n# the true sensitivity is >0.7, assuming that the true value is 0.824, with\n# 90% power is\n\nbinom_sample_size(alpha = 0.05, power = 0.9, p0 = 0.7, p1 = 0.824)\n\n# With a sample size of n = 104, if the true prevalence is 0.2, we would\n# require a sample size of at least n = 520 randomly sampled subjects to\n# have adequate power to demonstrate the sensitivity of the new test.\n\n# The minimum number of reference negative cases required to demonstrate\n# the true specificity is >0.9, assuming that the true value is 0.963, with\n# 90% power is\n\nbinom_sample_size(alpha = 0.05, power = 0.9, p0 = 0.9, p1 = 0.963)\n\n# The proposed total sample size of n = 520 would be sufficient to\n# demonstrate both endpoint goals are met.\n\n\n\n"} {"package":"adaptDiag","topic":"multi_trial","snippet":"### Name: multi_trial\n### Title: Simulate and analyse multiple trials\n### Aliases: multi_trial\n\n### ** Examples\n\n\nmulti_trial(\n sens_true = 0.9,\n spec_true = 0.95,\n prev_true = 0.1,\n endpoint = \"both\",\n sens_pg = 0.8,\n spec_pg = 0.8,\n prior_sens = c(0.1, 0.1),\n prior_spec = c(0.1, 0.1),\n prior_prev = c(0.1, 0.1),\n succ_sens = 0.95,\n succ_spec = 0.95,\n n_at_looks = c(200, 400, 600, 800, 1000),\n n_mc = 10000,\n n_trials = 2,\n ncores = 1\n)\n\n\n\n"} {"package":"adaptDiag","topic":"summarise_trials","snippet":"### Name: summarise_trials\n### Title: Summarise results of multiple simulated trials to give the\n### operating characteristics\n### Aliases: summarise_trials\n\n### ** Examples\n\ndata <- multi_trial(\n sens_true = 0.9,\n spec_true = 0.95,\n prev_true = 0.1,\n endpoint = \"both\",\n sens_pg = 0.8,\n spec_pg = 0.8,\n prior_sens = c(1, 1),\n prior_spec = c(1, 1),\n prior_prev = c(1, 1),\n succ_sens = 0.95,\n succ_spec = 0.95,\n n_at_looks = c(200, 400, 600, 800, 1000),\n n_mc = 10000,\n n_trials = 20,\n ncores = 1\n )\n\nsummarise_trials(data, fut = 0.05, min_pos = 10)\n\n\n"} {"package":"gumboot","topic":"CAMELS_bootjack","snippet":"### Name: CAMELS_bootjack\n### Title: Jackknife after bootstrap for all CAMELS sites\n### Aliases: CAMELS_bootjack\n\n### ** Examples\n\n## Not run: \n##D camels <- CAMELS_bootjack(CAMELS_sites = sites, NetCDF_file = \"CAMELS_flow.nc\")\n## End(Not run)\n\n\n\n"} {"package":"gumboot","topic":"bootjack","snippet":"### Name: bootjack\n### Title: Bootstrap-jacknife of flow calibration statistics\n### Aliases: bootjack\n\n### ** Examples\n\nNSE_stats <- bootjack(flows_1030500, \"NSE\")\n\n\n"} {"package":"gumboot","topic":"ggplot_estimate_uncertainties","snippet":"### Name: ggplot_estimate_uncertainties\n### Title: Plots uncertainties in model error estimates\n### Aliases: ggplot_estimate_uncertainties\n\n### ** Examples\n\n## Not run: p <- ggplot_estimate_uncertainties(all_stats, \"orange\")\n\n\n\n"} {"package":"gumboot","topic":"read_CAMELS","snippet":"### Name: read_CAMELS\n### Title: Reads simulated and observed values from CAMELS netcdf file for\n### a single location\n### Aliases: read_CAMELS\n\n### ** Examples\n\n## Not run: \n##D flows <- read_CAMELS(nc_file = \"CAMELS_flow.nc\", site = 1030500)\n## End(Not run)\n\n\n"} {"package":"ymlthis","topic":"as_yml","snippet":"### Name: as_yml\n### Title: Convert to yml object\n### Aliases: as_yml\n\n### ** Examples\n\n\nx <- as_yml(\"\n author: Hadley Wickham\n date: '2014-09-12'\n title: Tidy Data\n keywords:\n - data cleaning\n - data tidying\n - relational databases\n - R\")\n\n x\n\n x %>%\n yml_subtitle(\"Hadley's Tidy Data Paper\")\n\n\n\n"} {"package":"ymlthis","topic":"code_chunk","snippet":"### Name: code_chunk\n### Title: Write code chunks programmatically\n### Aliases: code_chunk setup_chunk\n\n### ** Examples\n\n## No test: \nsetup_chunk()\n\ncode_chunk({\n yml() %>%\n yml_output(pdf_document())\n}, chunk_name = \"yml_example\")\n## End(No test)\n\n\n"} {"package":"ymlthis","topic":"draw_yml_tree","snippet":"### Name: draw_yml_tree\n### Title: Draw an tree of YAML hierarchy\n### Aliases: draw_yml_tree\n\n### ** Examples\n\n# draw the most recently used `yml`\ndraw_yml_tree()\n## No test: \nyml() %>%\n yml_output(\n pdf_document(keep_tex = TRUE),\n html_document()\n ) %>%\n draw_yml_tree()\n## End(No test)\n\n\n"} {"package":"ymlthis","topic":"has_field","snippet":"### Name: has_field\n### Title: Check if field exists in YAML\n### Aliases: has_field\n\n### ** Examples\n\n\nhas_field(yml(), \"author\")\nhas_field(yml(), \"toc\")\n\n\n\n"} {"package":"ymlthis","topic":"includes2","snippet":"### Name: includes2\n### Title: Include content within output\n### Aliases: includes2\n\n### ** Examples\n\n## No test: \nyml() %>%\n yml_output(\n pdf_document(includes = includes2(after_body = \"footer.tex\"))\n )\n## End(No test)\n\n\n"} {"package":"ymlthis","topic":"last_yml","snippet":"### Name: last_yml\n### Title: Return the most recently printed YAML\n### Aliases: last_yml\n\n### ** Examples\n\nyml() %>%\n yml_author(\"Yihui Xie\")\n\nlast_yml()\n\n\n\n"} {"package":"ymlthis","topic":"pagedown_business_card_template","snippet":"### Name: pagedown_business_card_template\n### Title: Generate a full YAML template for your pagedown business card\n### Aliases: pagedown_business_card_template pagedown_person\n\n### ** Examples\n\npagedown_business_card_template(\n name = \"Jane Doe\",\n title = \"Miss Nobody\",\n phone = \"+1 123-456-7890\",\n email = \"jane.doe@example.com\",\n url = \"www.example.com\",\n address = \"2020 South Street,\n Sunshine, CA 90000\",\n logo = \"logo.png\",\n .repeat = 12\n)\n\npagedown_business_card_template(\n phone = \"+1 123-456-7890\",\n url = \"www.example.com\",\n address = \"2020 South Street,\n Sunshine, CA 90000\",\n logo = \"logo.png\",\n person = list(\n pagedown_person(\n name = \"Jane Doe\",\n title = \"Miss Nobody\",\n email = \"jane.doe@example.com\",\n .repeat = 6\n ),\n pagedown_person(\n name = \"John Doe\",\n title = \"Mister Nobody\",\n phone = \"+1 777-777-7777\", # overrides the default phone\n email = \"john.doe@example.com\",\n .repeat = 6\n )\n ),\n paperwidth = \"8.5in\",\n paperheight = \"11in\",\n cols = 4,\n rows = 3\n)\n\n\n\n"} {"package":"ymlthis","topic":"pkgdown_template","snippet":"### Name: pkgdown_template\n### Title: Generate a full YAML template for your pkgdown site\n### Aliases: pkgdown_template\n\n### ** Examples\n\n## Not run: \n##D # requires this to be a package directory\n##D pkgdown_template() %>%\n##D use_pkgdown_yml()\n## End(Not run)\n\n\n\n"} {"package":"ymlthis","topic":"yml","snippet":"### Name: yml\n### Title: Create a new yml object\n### Aliases: yml yml_empty\n\n### ** Examples\n\n\nyml()\n\nyml(date = FALSE)\n\n\"author: Hadley Wickham\\ndate: 2014-09-12\" %>%\n yml() %>%\n yml_title(\"Tidy Data\") %>%\n yml_keywords(\n c(\"data cleaning\", \"data tidying\", \"relational databases\", \"R\")\n )\n## No test: \nyml() %>%\n yml_author(\n c(\"Yihui Xie\", \"Hadley Wickham\"),\n affiliation = rep(\"RStudio\", 2)\n ) %>%\n yml_date(\"07/04/2019\") %>%\n yml_output(\n pdf_document(\n keep_tex = TRUE,\n includes = includes2(after_body = \"footer.tex\")\n )\n ) %>%\n yml_latex_opts(biblio_style = \"apalike\")\n## End(No test)\n\n\n"} {"package":"ymlthis","topic":"yml_author","snippet":"### Name: yml_author\n### Title: Set Top-level R Markdown YAML Fields\n### Aliases: yml_author yml_date yml_title yml_subtitle yml_abstract\n### yml_keywords yml_subject yml_description yml_category yml_lang\n### yml_toplevel\n\n### ** Examples\n\nyml_empty() %>%\n yml_author(\"Yihui Xie\") %>%\n yml_date(\"02-02-2002\") %>%\n yml_title(\"R Markdown: An Introduction\") %>%\n yml_subtitle(\"Introducing ymlthis\") %>%\n yml_abstract(\"This paper will discuss a very important topic\") %>%\n yml_keywords(c(\"r\", \"reproducible research\")) %>%\n yml_subject(\"R Markdown\") %>%\n yml_description(\"An R Markdown reader\") %>%\n yml_category(\"r\") %>%\n yml_lang(\"en-US\")\n\n\n\n"} {"package":"ymlthis","topic":"yml_blank","snippet":"### Name: yml_blank\n### Title: Return a blank object to be discarded from YAML\n### Aliases: yml_blank is_yml_blank\n\n### ** Examples\n\n\nyml() %>%\n yml_replace(author = yml_blank()) %>%\n yml_discard(~is_yml_blank(.x))\n\n\n\n\n"} {"package":"ymlthis","topic":"yml_blogdown_opts","snippet":"### Name: yml_blogdown_opts\n### Title: Set Top-level YAML options for blogdown\n### Aliases: yml_blogdown_opts\n\n### ** Examples\n\n\nyml() %>%\n yml_blogdown_opts(\n draft = TRUE,\n slug = \"blog-post\"\n )\n\n\n"} {"package":"ymlthis","topic":"yml_bookdown_opts","snippet":"### Name: yml_bookdown_opts\n### Title: Set Top-level YAML options for bookdown\n### Aliases: yml_bookdown_opts yml_bookdown_site\n\n### ** Examples\n\n\nyml_empty() %>%\n yml_bookdown_opts(\n book_filename = \"my-book.Rmd\",\n before_chapter_script = c(\"script1.R\", \"script2.R\"),\n after_chapter_script = \"script3.R\",\n edit = \"https =//github.com/rstudio/bookdown-demo/edit/master/%s\",\n output_dir = \"book-output\",\n clean = c(\"my-book.bbl\", \"R-packages.bib\")\n )\n\nyml_empty() %>%\n yml_bookdown_opts(\n rmd_files = list(\n html = c(\"index.Rmd\", \"abstract.Rmd\", \"intro.Rmd\"),\n latex = c(\"abstract.Rmd\", \"intro.Rmd\")\n )\n )\n\n x <- yml_empty() %>%\n yml_title(\"A Minimal Book Example\") %>%\n yml_date(yml_code(Sys.Date())) %>%\n yml_author(\"Yihui Xie\") %>%\n yml_bookdown_site() %>%\n yml_latex_opts(\n documentclass = \"book\",\n bibliography = c(\"book.bib\", \"packages.bib\"),\n biblio_style = \"apalike\"\n ) %>%\n yml_citations(\n link_citations = TRUE\n ) %>%\n yml_description(\"This is a minimal example of using\n the bookdown package to write a book.\")\n\nx\n\n\n## No test: \noutput_yml <- yml_empty() %>%\n yml_output(\n bookdown::gitbook(\n lib_dir = \"assets\",\n split_by = \"section\",\n config = gitbook_config(toolbar_position = \"static\")\n ),\n bookdown::pdf_book(keep_tex = TRUE),\n bookdown::html_book(css = \"toc.css\")\n )\noutput_yml\n## End(No test)\n\n\n\n\n"} {"package":"ymlthis","topic":"yml_citations","snippet":"### Name: yml_citations\n### Title: Set citation-related YAML options\n### Aliases: yml_citations\n\n### ** Examples\n\n\nyml() %>%\n yml_citations(bibliography = \"references.bib\", csl = \"aje.csl\")\n\n\n\n"} {"package":"ymlthis","topic":"yml_clean","snippet":"### Name: yml_clean\n### Title: Remove intermediate rendering files\n### Aliases: yml_clean\n\n### ** Examples\n\n\nyml() %>%\n # keep intermediate files\n yml_clean(FALSE)\n\n\n\n"} {"package":"ymlthis","topic":"yml_code","snippet":"### Name: yml_code\n### Title: Take code and write it as valid YAML\n### Aliases: yml_code yml_params_code\n\n### ** Examples\n\n\nyml_empty() %>%\n yml_date(yml_code(sys.Date()))\n\nyml_empty() %>%\n yml_params(date = yml_params_code(sys.Date()))\n\n\n\n"} {"package":"ymlthis","topic":"yml_distill_opts","snippet":"### Name: yml_distill_opts\n### Title: Set Top-level YAML options for distill\n### Aliases: yml_distill_opts yml_distill_author distill_listing\n### distill_collection distill_resources\n\n### ** Examples\n\npost_listing <- distill_listing(\n slugs = c(\n \"2016-11-08-sharpe-ratio\",\n \"2017-11-09-visualizing-asset-returns\",\n \"2017-09-13-asset-volatility\"\n )\n)\n\nyml() %>%\n yml_title(\"Gallery of featured posts\") %>%\n yml_distill_opts(listing = post_listing)\n\nyml_empty() %>%\n yml_title(\"Reproducible Finance with R\") %>%\n yml_description(\"Exploring reproducible finance with the R statistical,\n computing environment.\") %>%\n yml_site_opts(name = \"reproducible-finance-with-r\") %>%\n yml_distill_opts(\n base_url = \"https://beta.rstudioconnect.com/content/3776/\",\n collection = distill_collection(\n feed_items_max = 30,\n disqus_name = \"reproducible-finance-with-r\",\n disqus_hidden = FALSE,\n share = c(\"twitter\", \"linkedin\")\n )\n )\n\n\n\n"} {"package":"ymlthis","topic":"yml_latex_opts","snippet":"### Name: yml_latex_opts\n### Title: Set LaTeX YAML options for PDF output\n### Aliases: yml_latex_opts\n\n### ** Examples\n\n## No test: \nyml() %>%\n yml_output(pdf_document()) %>%\n yml_latex_opts(\n fontfamily = \"Fira Sans Thin\",\n fontsize = \"11pt\",\n links_as_notes = TRUE\n )\n## End(No test)\n\n\n"} {"package":"ymlthis","topic":"yml_load","snippet":"### Name: yml_load\n### Title: Load YAML from string\n### Aliases: yml_load\n\n### ** Examples\n\nc(\"title: my title\", \"author: Malcolm Barrett\") %>%\n yml_load()\n\n\n\n"} {"package":"ymlthis","topic":"yml_output","snippet":"### Name: yml_output\n### Title: Capture, validate, and write output YAML\n### Aliases: yml_output\n\n### ** Examples\n\n## No test: \nyml() %>%\n yml_output(html_document())\n\nyml() %>%\n yml_output(\n pdf_document(keep_tex = TRUE, includes = includes2(after_body = \"footer.tex\")),\n bookdown::html_document2()\n )\n## End(No test)\n\n\n"} {"package":"ymlthis","topic":"yml_pagedown_opts","snippet":"### Name: yml_pagedown_opts\n### Title: Top-level YAML options for pagedown\n### Aliases: yml_pagedown_opts\n\n### ** Examples\n\n\nyml() %>%\n yml_pagedown_opts(\n toc = TRUE,\n toc_title = \"TOC\",\n chapter_name = c(\"CHAPTER\\\\ \", \".\"),\n links_to_footnotes = TRUE\n )\n\n\n\n"} {"package":"ymlthis","topic":"yml_params","snippet":"### Name: yml_params\n### Title: Parameterize an R Markdown report using Shiny components\n### Aliases: yml_params shiny_params shiny_checkbox shiny_numeric\n### shiny_slider shiny_date shiny_text shiny_file shiny_radio\n### shiny_select shiny_password\n\n### ** Examples\n\n\nyml() %>%\n yml_params(\n z = \"z\",\n x = shiny_numeric(\"Starting value\", 23),\n no = shiny_checkbox(\"No option?\"),\n y = shiny_slider(\"Data range\", 0, 1, .5, round = TRUE)\n )\n\n\n\n"} {"package":"ymlthis","topic":"yml_pkgdown","snippet":"### Name: yml_pkgdown\n### Title: Set Top-level YAML options for pkgdown\n### Aliases: yml_pkgdown yml_pkgdown_opts yml_pkgdown_development\n### yml_pkgdown_template yml_pkgdown_reference pkgdown_ref\n### yml_pkgdown_news yml_pkgdown_articles pkgdown_article\n### yml_pkgdown_tutorial pkgdown_tutorial yml_pkgdown_figures\n### yml_pkgdown_docsearch\n\n### ** Examples\n\n\nyml_empty() %>%\n yml_pkgdown(\n as_is = TRUE,\n extension = \"pdf\"\n ) %>%\n yml_pkgdown_reference(\n pkgdown_ref(\n title = \"pkgdown functions\",\n contents = \"contains('function_name')\"\n )\n ) %>%\n yml_pkgdown_articles(\n pkgdown_article(\n title = \"Introduction to the package\"\n )\n )\n\n\n\n"} {"package":"ymlthis","topic":"yml_reference","snippet":"### Name: yml_reference\n### Title: Write references as YAML fields\n### Aliases: yml_reference reference\n\n### ** Examples\n\n\nref <- reference(\n id = \"fenner2012a\",\n title = \"One-click science marketing\",\n author = list(\n family = \"Fenner\",\n given = \"Martin\"\n ),\n `container-title` = \"Nature Materials\",\n volume = 11L,\n URL = \"https://doi.org/10.1038/nmat3283\",\n DOI = \"10.1038/nmat3283\",\n issue = 4L,\n publisher = \"Nature Publishing Group\",\n page = \"261-263\",\n type = \"article-journal\",\n issued = list(\n year = 2012,\n month = 3\n )\n)\n\nyml() %>%\n yml_reference(ref)\n\n# from ?bibentry\nbref <- c(\n bibentry(\n bibtype = \"Manual\",\n title = \"boot: Bootstrap R (S-PLUS) Functions\",\n author = c(\n person(\"Angelo\", \"Canty\", role = \"aut\",\n comment = \"S original\"),\n person(c(\"Brian\", \"D.\"), \"Ripley\", role = c(\"aut\", \"trl\", \"cre\"),\n comment = \"R port, author of parallel support\",\n email = \"ripley@stats.ox.ac.uk\")\n ),\n year = \"2012\",\n note = \"R package version 1.3-4\",\n url = \"https://CRAN.R-project.org/package=boot\",\n key = \"boot-package\"\n ),\n\n bibentry(\n bibtype = \"Book\",\n title = \"Bootstrap Methods and Their Applications\",\n author = as.person(\"Anthony C. Davison [aut], David V. Hinkley [aut]\"),\n year = \"1997\",\n publisher = \"Cambridge University Press\",\n address = \"Cambridge\",\n isbn = \"0-521-57391-2\",\n url = \"http://statwww.epfl.ch/davison/BMA/\",\n key = \"boot-book\"\n )\n)\n## No test: \n# requires pandoc-citeproc to be installed\nyml() %>%\n yml_reference(.bibentry = bref)\n\nyml() %>%\n yml_reference(.bibentry = citation(\"purrr\"))\n## End(No test)\n\n\n"} {"package":"ymlthis","topic":"yml_replace","snippet":"### Name: yml_replace\n### Title: Replace, pluck, or discard top-level YAML fields\n### Aliases: yml_replace yml_discard yml_pluck yml_chuck\n\n### ** Examples\n\n## No test: \nyml() %>%\n yml_clean(TRUE) %>%\n yml_replace(clean = FALSE) %>%\n yml_discard(\"author\")\n\nyml() %>%\n yml_output(\n pdf_document(),\n html_document()\n )%>%\n yml_discard(~ length(.x) > 1)\n## End(No test)\n\n\n\n"} {"package":"ymlthis","topic":"yml_resource_files","snippet":"### Name: yml_resource_files\n### Title: Add external resource files to R Markdown document\n### Aliases: yml_resource_files\n\n### ** Examples\n\n\nyml() %>%\n yml_resource_files(c(\"data/mydata.csv\", \"images/figure.png\"))\n\n\n"} {"package":"ymlthis","topic":"yml_rsconnect_email","snippet":"### Name: yml_rsconnect_email\n### Title: Set YAML for Scheduled Emails in RStudio Connect\n### Aliases: yml_rsconnect_email yml_output_metadata\n\n### ** Examples\n\n\nyml() %>%\n yml_rsconnect_email(\n rsc_email_subject = \"Quarterly report\",\n rsc_output_files = \"data.csv\",\n rsc_email_attachments = c(\"attachment_1.csv\", \"attachment_2.csv\")\n )\n\n\n"} {"package":"ymlthis","topic":"yml_rticles_opts","snippet":"### Name: yml_rticles_opts\n### Title: Set YAML related to rticles output formats\n### Aliases: yml_rticles_opts rticles_author rticles_address\n### rticles_corr_author\n\n### ** Examples\n\n\nyml() %>%\n yml_rticles_opts(received = \"09-12-2014\")\n\n\n\n"} {"package":"ymlthis","topic":"yml_runtime","snippet":"### Name: yml_runtime\n### Title: Activate Shiny in R Markdown\n### Aliases: yml_runtime\n\n### ** Examples\n\n\nyml() %>%\n yml_runtime(\"shiny\")\n\n\n\n"} {"package":"ymlthis","topic":"yml_site_opts","snippet":"### Name: yml_site_opts\n### Title: Add site options for _site.yml and navbars for R Markdown\n### websites\n### Aliases: yml_site_opts yml_navbar navbar_page navbar_separator\n\n### ** Examples\n\nyml_empty() %>%\n yml_site_opts(\n name = \"my-website\",\n output_dir = \"_site\",\n include = \"demo.R\",\n exclude = c(\"docs.txt\", \"*.csv\")\n ) %>%\n yml_navbar(\n title = \"My Website\",\n left = list(\n navbar_page(\"Home\", href = \"index.html\"),\n navbar_page(navbar_separator(), href = \"about.html\")\n )\n ) %>%\n yml_output(html_document(toc = TRUE, highlight = \"textmate\"))\n\n\n\n"} {"package":"ymlthis","topic":"yml_toc","snippet":"### Name: yml_toc\n### Title: Specify Table of Contents options\n### Aliases: yml_toc\n\n### ** Examples\n\n\nyml() %>%\n yml_toc(toc = TRUE, toc_depth = 1, toc_title = \"Article Outline\")\n\n\n\n"} {"package":"ymlthis","topic":"yml_verbatim","snippet":"### Name: yml_verbatim\n### Title: Write YAML field or content verbatim\n### Aliases: yml_verbatim\n\n### ** Examples\n\n# \"yes\" and \"no\" serve as alternatives to `true` and `false`. This writes\n# \"yes\" literally.\nyml_verbatim(\"yes\")\n\n\n"} {"package":"ymlthis","topic":"yml_vignette","snippet":"### Name: yml_vignette\n### Title: Set up a package vignette\n### Aliases: yml_vignette\n\n### ** Examples\n\n\nyml() %>%\n yml_output(html_vignette()) %>%\n yml_vignette(\"An introduction to R Markdown\")\n\n\n\n"} {"package":"embed","topic":"add_woe","snippet":"### Name: add_woe\n### Title: Add WoE in a data frame\n### Aliases: add_woe\n\n### ** Examples\n\n\nmtcars %>% add_woe(\"am\", cyl, gear:carb)\n\n\n"} {"package":"embed","topic":"dictionary","snippet":"### Name: dictionary\n### Title: Weight of evidence dictionary\n### Aliases: dictionary\n\n### ** Examples\n\n\nmtcars %>% dictionary(\"am\", cyl, gear:carb)\n\n\n"} {"package":"embed","topic":"solubility","snippet":"### Name: solubility\n### Title: Compound solubility data\n### Aliases: solubility\n### Keywords: datasets\n\n### ** Examples\n\ndata(solubility)\nstr(solubility)\n\n\n"} {"package":"embed","topic":"step_collapse_cart","snippet":"### Name: step_collapse_cart\n### Title: Supervised Collapsing of Factor Levels\n### Aliases: step_collapse_cart\n\n### ** Examples\n\n## Don't show: \nif (rlang::is_installed(c(\"modeldata\", \"rpart\"))) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\ndata(ames, package = \"modeldata\")\names$Sale_Price <- log10(ames$Sale_Price)\n\nrec <-\n recipe(Sale_Price ~ ., data = ames) %>%\n step_collapse_cart(\n Sale_Type, Garage_Type, Neighborhood,\n outcome = vars(Sale_Price)\n ) %>%\n prep()\ntidy(rec, number = 1)\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"embed","topic":"step_collapse_stringdist","snippet":"### Name: step_collapse_stringdist\n### Title: collapse factor levels using stringdist\n### Aliases: step_collapse_stringdist\n\n### ** Examples\n\n## Don't show: \nif (rlang::is_installed(\"stringdist\")) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\nlibrary(recipes)\nlibrary(tibble)\ndata0 <- tibble(\n x1 = c(\"a\", \"b\", \"d\", \"e\", \"sfgsfgsd\", \"hjhgfgjgr\"),\n x2 = c(\"ak\", \"b\", \"djj\", \"e\", \"hjhgfgjgr\", \"hjhgfgjgr\")\n)\n\nrec <- recipe(~., data = data0) %>%\n step_collapse_stringdist(all_predictors(), distance = 1) %>%\n prep()\n\nrec %>%\n bake(new_data = NULL)\n\ntidy(rec, 1)\n\nrec <- recipe(~., data = data0) %>%\n step_collapse_stringdist(all_predictors(), distance = 2) %>%\n prep()\n\nrec %>%\n bake(new_data = NULL)\n\ntidy(rec, 1)\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"embed","topic":"step_discretize_cart","snippet":"### Name: step_discretize_cart\n### Title: Discretize numeric variables with CART\n### Aliases: step_discretize_cart\n\n### ** Examples\n\n## Don't show: \nif (rlang::is_installed(\"modeldata\")) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\nlibrary(modeldata)\ndata(ad_data)\nlibrary(rsample)\n\nsplit <- initial_split(ad_data, strata = \"Class\")\n\nad_data_tr <- training(split)\nad_data_te <- testing(split)\n\ncart_rec <-\n recipe(Class ~ ., data = ad_data_tr) %>%\n step_discretize_cart(\n tau, age, p_tau, Ab_42,\n outcome = \"Class\", id = \"cart splits\"\n )\n\ncart_rec <- prep(cart_rec, training = ad_data_tr)\n\n# The splits:\ntidy(cart_rec, id = \"cart splits\")\n\nbake(cart_rec, ad_data_te, tau)\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"embed","topic":"step_discretize_xgb","snippet":"### Name: step_discretize_xgb\n### Title: Discretize numeric variables with XgBoost\n### Aliases: step_discretize_xgb\n\n### ** Examples\n\n## Don't show: \nif (rlang::is_installed(c(\"xgboost\", \"modeldata\"))) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\nlibrary(rsample)\nlibrary(recipes)\ndata(credit_data, package = \"modeldata\")\n\nset.seed(1234)\nsplit <- initial_split(credit_data[1:1000, ], strata = \"Status\")\n\ncredit_data_tr <- training(split)\ncredit_data_te <- testing(split)\n\nxgb_rec <-\n recipe(Status ~ Income + Assets, data = credit_data_tr) %>%\n step_impute_median(Income, Assets) %>%\n step_discretize_xgb(Income, Assets, outcome = \"Status\")\n\nxgb_rec <- prep(xgb_rec, training = credit_data_tr)\n\nbake(xgb_rec, credit_data_te, Assets)\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"embed","topic":"step_embed","snippet":"### Name: step_embed\n### Title: Encoding Factors into Multiple Columns\n### Aliases: step_embed embed_control\n### Keywords: datagen\n\n### ** Examples\n\n## Don't show: \nif (!embed:::is_cran_check() && rlang::is_installed(\"modeldata\")) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\ndata(grants, package = \"modeldata\")\n\nset.seed(1)\ngrants_other <- sample_n(grants_other, 500)\n\nrec <- recipe(class ~ num_ci + sponsor_code, data = grants_other) %>%\n step_embed(sponsor_code,\n outcome = vars(class),\n options = embed_control(epochs = 10)\n )\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"embed","topic":"step_feature_hash","snippet":"### Name: step_feature_hash\n### Title: Dummy Variables Creation via Feature Hashing\n### Aliases: step_feature_hash\n\n### ** Examples\n\n## Don't show: \nif (!embed:::is_cran_check() && rlang::is_installed(\"modeldata\")) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\ndata(grants, package = \"modeldata\")\nrec <-\n recipe(class ~ sponsor_code, data = grants_other) %>%\n step_feature_hash(\n sponsor_code,\n num_hash = 2^6, keep_original_cols = TRUE\n ) %>%\n prep()\n\n# How many of the 298 locations ended up in each hash column?\nresults <-\n bake(rec, new_data = NULL, starts_with(\"sponsor_code\")) %>%\n distinct()\n\napply(results %>% select(-sponsor_code), 2, sum) %>% table()\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"embed","topic":"step_lencode_bayes","snippet":"### Name: step_lencode_bayes\n### Title: Supervised Factor Conversions into Linear Functions using\n### Bayesian Likelihood Encodings\n### Aliases: step_lencode_bayes\n### Keywords: datagen\n\n### ** Examples\n\n## Don't show: \nif (rlang::is_installed(\"modeldata\")) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\nlibrary(recipes)\nlibrary(dplyr)\nlibrary(modeldata)\n\ndata(grants)\n\nset.seed(1)\ngrants_other <- sample_n(grants_other, 500)\n## No test: \nreencoded <- recipe(class ~ sponsor_code, data = grants_other) %>%\n step_lencode_bayes(sponsor_code, outcome = vars(class))\n## End(No test)\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"embed","topic":"step_lencode_glm","snippet":"### Name: step_lencode_glm\n### Title: Supervised Factor Conversions into Linear Functions using\n### Likelihood Encodings\n### Aliases: step_lencode_glm\n### Keywords: datagen\n\n### ** Examples\n\n## Don't show: \nif (rlang::is_installed(\"modeldata\")) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\nlibrary(recipes)\nlibrary(dplyr)\nlibrary(modeldata)\n\ndata(grants)\n\nset.seed(1)\ngrants_other <- sample_n(grants_other, 500)\n## No test: \nreencoded <- recipe(class ~ sponsor_code, data = grants_other) %>%\n step_lencode_glm(sponsor_code, outcome = vars(class))\n## End(No test)\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"embed","topic":"step_lencode_mixed","snippet":"### Name: step_lencode_mixed\n### Title: Supervised Factor Conversions into Linear Functions using\n### Bayesian Likelihood Encodings\n### Aliases: step_lencode_mixed\n### Keywords: datagen\n\n### ** Examples\n\n## Don't show: \nif (rlang::is_installed(\"modeldata\")) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\nlibrary(recipes)\nlibrary(dplyr)\nlibrary(modeldata)\n\ndata(grants)\n\nset.seed(1)\ngrants_other <- sample_n(grants_other, 500)\n## No test: \nreencoded <- recipe(class ~ sponsor_code, data = grants_other) %>%\n step_lencode_mixed(sponsor_code, outcome = vars(class))\n## End(No test)\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"embed","topic":"step_pca_sparse","snippet":"### Name: step_pca_sparse\n### Title: Sparse PCA Signal Extraction\n### Aliases: step_pca_sparse\n### Keywords: datagen\n\n### ** Examples\n\n## Don't show: \nif (rlang::is_installed(c(\"modeldata\", \"ggplot2\"))) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\nlibrary(recipes)\nlibrary(ggplot2)\n\ndata(ad_data, package = \"modeldata\")\n\nad_rec <-\n recipe(Class ~ ., data = ad_data) %>%\n step_zv(all_predictors()) %>%\n step_YeoJohnson(all_numeric_predictors()) %>%\n step_normalize(all_numeric_predictors()) %>%\n step_pca_sparse(\n all_numeric_predictors(),\n predictor_prop = 0.75,\n num_comp = 3,\n id = \"sparse pca\"\n ) %>%\n prep()\n\ntidy(ad_rec, id = \"sparse pca\") %>%\n mutate(value = ifelse(value == 0, NA, value)) %>%\n ggplot(aes(x = component, y = terms, fill = value)) +\n geom_tile() +\n scale_fill_gradient2() +\n theme(axis.text.y = element_blank())\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"embed","topic":"step_pca_sparse_bayes","snippet":"### Name: step_pca_sparse_bayes\n### Title: Sparse Bayesian PCA Signal Extraction\n### Aliases: step_pca_sparse_bayes\n### Keywords: datagen\n\n### ** Examples\n\n## Don't show: \nif (rlang::is_installed(c(\"modeldata\", \"ggplot2\"))) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\nlibrary(recipes)\nlibrary(ggplot2)\n\ndata(ad_data, package = \"modeldata\")\n\nad_rec <-\n recipe(Class ~ ., data = ad_data) %>%\n step_zv(all_predictors()) %>%\n step_YeoJohnson(all_numeric_predictors()) %>%\n step_normalize(all_numeric_predictors()) %>%\n step_pca_sparse_bayes(\n all_numeric_predictors(),\n prior_mixture_threshold = 0.95,\n prior_slab_dispersion = 0.05,\n num_comp = 3,\n id = \"sparse bayesian pca\"\n ) %>%\n prep()\n\ntidy(ad_rec, id = \"sparse bayesian pca\") %>%\n mutate(value = ifelse(value == 0, NA, value)) %>%\n ggplot(aes(x = component, y = terms, fill = value)) +\n geom_tile() +\n scale_fill_gradient2() +\n theme(axis.text.y = element_blank())\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"embed","topic":"step_pca_truncated","snippet":"### Name: step_pca_truncated\n### Title: Truncated PCA Signal Extraction\n### Aliases: step_pca_truncated\n\n### ** Examples\n\nrec <- recipe(~., data = mtcars)\npca_trans <- rec %>%\n step_normalize(all_numeric()) %>%\n step_pca_truncated(all_numeric(), num_comp = 2)\npca_estimates <- prep(pca_trans, training = mtcars)\npca_data <- bake(pca_estimates, mtcars)\n\nrng <- extendrange(c(pca_data$PC1, pca_data$PC2))\nplot(pca_data$PC1, pca_data$PC2,\n xlim = rng, ylim = rng\n)\n\ntidy(pca_trans, number = 2)\ntidy(pca_estimates, number = 2)\n\n\n"} {"package":"embed","topic":"step_umap","snippet":"### Name: step_umap\n### Title: Supervised and unsupervised uniform manifold approximation and\n### projection (UMAP)\n### Aliases: step_umap\n\n### ** Examples\n\n## Don't show: \nif (rlang::is_installed(\"ggplot2\")) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\nlibrary(recipes)\nlibrary(ggplot2)\n\nsplit <- seq.int(1, 150, by = 9)\ntr <- iris[-split, ]\nte <- iris[split, ]\n\nset.seed(11)\nsupervised <-\n recipe(Species ~ ., data = tr) %>%\n step_center(all_predictors()) %>%\n step_scale(all_predictors()) %>%\n step_umap(all_predictors(), outcome = vars(Species), num_comp = 2) %>%\n prep(training = tr)\n\ntheme_set(theme_bw())\n\nbake(supervised, new_data = te, Species, starts_with(\"umap\")) %>%\n ggplot(aes(x = UMAP1, y = UMAP2, col = Species)) +\n geom_point(alpha = .5)\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"embed","topic":"step_woe","snippet":"### Name: step_woe\n### Title: Weight of evidence transformation\n### Aliases: step_woe\n### Keywords: datagen\n\n### ** Examples\n\n## Don't show: \nif (rlang::is_installed(\"modeldata\")) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\nlibrary(modeldata)\ndata(\"credit_data\")\n\nset.seed(111)\nin_training <- sample(1:nrow(credit_data), 2000)\n\ncredit_tr <- credit_data[in_training, ]\ncredit_te <- credit_data[-in_training, ]\n\nrec <- recipe(Status ~ ., data = credit_tr) %>%\n step_woe(Job, Home, outcome = vars(Status))\n\nwoe_models <- prep(rec, training = credit_tr)\n\n# the encoding:\nbake(woe_models, new_data = credit_te %>% slice(1:5), starts_with(\"woe\"))\n# the original data\ncredit_te %>%\n slice(1:5) %>%\n dplyr::select(Job, Home)\n# the details:\ntidy(woe_models, number = 1)\n\n# Example of custom dictionary + tweaking\n# custom dictionary\nwoe_dict_custom <- credit_tr %>% dictionary(Job, Home, outcome = \"Status\")\nwoe_dict_custom[4, \"woe\"] <- 1.23 # tweak\n\n# passing custom dict to step_woe()\nrec_custom <- recipe(Status ~ ., data = credit_tr) %>%\n step_woe(\n Job, Home,\n outcome = vars(Status), dictionary = woe_dict_custom\n ) %>%\n prep()\n\nrec_custom_baked <- bake(rec_custom, new_data = credit_te)\nrec_custom_baked %>%\n dplyr::filter(woe_Job == 1.23) %>%\n head()\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"DOPE","topic":"compress_lookup","snippet":"### Name: compress_lookup\n### Title: Collapse Redundant Rows of a Lookup Table\n### Aliases: compress_lookup\n\n### ** Examples\n\n longExampleTable <- lookup(\"dope\", \"methamphetamine\")\n compress_lookup(longExampleTable)\n compress_lookup(longExampleTable, compressCategory = TRUE)\n\n\n"} {"package":"DOPE","topic":"lookup","snippet":"### Name: lookup\n### Title: Make a table with the class and category for a drug name\n### Aliases: lookup\n\n### ** Examples\n\n lookup(\"zip\", \"shrooms\")\n\n\n"} {"package":"DOPE","topic":"lookup_syn","snippet":"### Name: lookup_syn\n### Title: Make a table with the class and category for a drug name\n### Aliases: lookup_syn\n\n### ** Examples\n\n lookup_syn(\"zip\")\n\n\n"} {"package":"DOPE","topic":"parse","snippet":"### Name: parse\n### Title: Parse a vector of free text containing drug information\n### Aliases: parse\n\n### ** Examples\n\n parse(\"Lortab and Percocet\")\n\n\n\n"} {"package":"tinytest2JUnit","topic":"writeJUnit","snippet":"### Name: writeJUnit\n### Title: Write the results of a 'tinytests'-object into JUnit xml report.\n### Aliases: writeJUnit\n\n### ** Examples\n\n# Run tests with `tinytest`\ndirWithTests <- system.file(\"example_tests/multiple_files\",package = \"tinytest2JUnit\")\ntestresults <- tinytest::run_test_dir(dirWithTests, verbose = FALSE)\n# temporary output file to save JUnit XML to\ntmpFile <- tempfile(fileext = \".xml\")\nwriteJUnit(tinytests = testresults, file = tmpFile)\n\n\n"} {"package":"humaniformat","topic":"first_name","snippet":"### Name: first_name\n### Title: Get or set a name's first name\n### Aliases: first_name first_name<-\n\n### ** Examples\n\n#Get a first name\nexample_name <- \"Mr Jim Jeffries\"\nfirst_name(example_name)\n\n#Set a first name\nfirst_name(example_name) <- \"Prof\"\n\n\n"} {"package":"humaniformat","topic":"format_period","snippet":"### Name: format_period\n### Title: Reformat Period-Separated Names\n### Aliases: format_period\n\n### ** Examples\n\nformat_period(\"G.K.Chesterton\")\n\n\n\n"} {"package":"humaniformat","topic":"format_reverse","snippet":"### Name: format_reverse\n### Title: Reformat Reversed Names\n### Aliases: format_reverse\n\n### ** Examples\n\n\n# Take a reversed name and un-reverse it\nformat_reverse(\"Keyes, Oliver\")\n\n\n\n"} {"package":"humaniformat","topic":"last_name","snippet":"### Name: last_name\n### Title: Get or set a name's last name\n### Aliases: last_name last_name<-\n\n### ** Examples\n\n#Get a last name\nexample_name <- \"Mr Jim Toby Jeffries\"\nlast_name(example_name)\n\n#Set a last name\nlast_name(example_name) <- \"Smith\"\n\n\n"} {"package":"humaniformat","topic":"middle_name","snippet":"### Name: middle_name\n### Title: Get or set a name's middle name\n### Aliases: middle_name middle_name<-\n\n### ** Examples\n\n#Get a middle name\nexample_name <- \"Mr Jim Toby Jeffries\"\nmiddle_name(example_name)\n\n#Set a middle name\nmiddle_name(example_name) <- \"Richard\"\n\n\n"} {"package":"humaniformat","topic":"parse_names","snippet":"### Name: parse_names\n### Title: Parse Human Names\n### Aliases: parse_names\n\n### ** Examples\n\n# Parse a simple name\nparse_names(\"Oliver Keyes\")\n\n# Parse a more complex name\nparse_names(\"Hon. Oliver Timothy Keyes Esq.\")\n\n\n\n"} {"package":"humaniformat","topic":"salutation","snippet":"### Name: salutation\n### Title: Get or set a name's saltation\n### Aliases: salutation salutation<-\n\n### ** Examples\n\n#Get a salutation\nexample_name <- \"Mr Jim Jeffries\"\nsalutation(example_name)\n\n#Set a salutation\nsalutation(example_name) <- \"Prof\"\n\n\n"} {"package":"humaniformat","topic":"suffix","snippet":"### Name: suffix\n### Title: Get or set a name's suffix\n### Aliases: suffix suffix<-\n\n### ** Examples\n\n#Get a suffix]\nexample_name <- \"Mr Jim Toby Jeffries Esq\"\nsuffix(example_name)\n\n#Set a suffix\nsuffix(example_name) <- \"PhD\"\n\n\n"} {"package":"ARPobservation","topic":"F_const","snippet":"### Name: F_const\n### Title: Constant (degenerate) distribution and related equilibrium\n### distribution\n### Aliases: F_const\n\n### ** Examples\n\nhist(F_const()$r_gen(1000, 2))\nhist(F_const()$r_eq(1000, 2))\n\n\n\n"} {"package":"ARPobservation","topic":"F_exp","snippet":"### Name: F_exp\n### Title: Exponential distribution and related equilibrium distribution\n### Aliases: F_exp\n\n### ** Examples\n\nhist(F_exp()$r_gen(1000, 3))\nhist(F_exp()$r_eq(1000, 3))\n\n\n\n"} {"package":"ARPobservation","topic":"F_gam","snippet":"### Name: F_gam\n### Title: Gamma distribution and related equilibrium distribution\n### Aliases: F_gam\n\n### ** Examples\n\nhist(F_gam(2)$r_gen(1000, 3))\nhist(F_gam(2)$r_eq(1000, 3))\n\n\n\n"} {"package":"ARPobservation","topic":"F_gam_mix","snippet":"### Name: F_gam_mix\n### Title: Mixture of two gamma distributions and related equilibrium\n### distribution\n### Aliases: F_gam_mix\n\n### ** Examples\n\nhist(F_gam_mix(2, 2, 1 / 12, 3 / 5)$r_gen(1000, 20))\nhist(F_gam_mix(2, 2, 1 / 12, 3 / 5)$r_eq(1000, 20))\n\n\n\n"} {"package":"ARPobservation","topic":"F_unif","snippet":"### Name: F_unif\n### Title: Uniform distribution and related equilibrium distribution\n### Aliases: F_unif\n\n### ** Examples\n\nhist(F_unif()$r_gen(1000, 2))\nhist(F_unif()$r_eq(1000, 2))\n\n\n\n"} {"package":"ARPobservation","topic":"F_weib","snippet":"### Name: F_weib\n### Title: Weibull distribution and related equilibrium distribution\n### Aliases: F_weib\n\n### ** Examples\n\nhist(F_gam(2)$r_gen(1000, 3))\nhist(F_gam(2)$r_eq(1000, 3))\n\n\n\n"} {"package":"ARPobservation","topic":"PIR_MOM","snippet":"### Name: PIR_MOM\n### Title: Moment estimator for prevalence and incidence, with bootstrap\n### confidence intervals\n### Aliases: PIR_MOM\n\n### ** Examples\n\n\n# Estimate prevalence and incidence ratios for Carl from the Moes dataset\ndata(Moes)\nwith(subset(Moes, Case == \"Carl\"),\n PIR_MOM(PIR = outcome,\n phase = Phase,\n intervals = intervals,\n interval_length = (active_length + rest_length),\n rest_length = rest_length,\n base_level = \"No Choice\",\n Bootstraps = 200,\n seed = 149568373))\n\n\n\n"} {"package":"ARPobservation","topic":"augmented_recording","snippet":"### Name: augmented_recording\n### Title: Applies augmented interval recording to a behavior stream\n### Aliases: augmented_recording\n\n### ** Examples\n\nBS <- r_behavior_stream(n = 5, mu = 3, lambda = 10, \n F_event = F_exp(), F_interim = F_exp(), stream_length = 100)\naugmented_recording(BS, interval_length = 20)\n\n\n"} {"package":"ARPobservation","topic":"continuous_duration_recording","snippet":"### Name: continuous_duration_recording\n### Title: Applies continuous duration recording to a behavior stream\n### Aliases: continuous_duration_recording\n\n### ** Examples\n\nBS <- r_behavior_stream(n = 5, mu = 3, lambda = 10, \n F_event = F_exp(), F_interim = F_exp(), stream_length = 100)\ncontinuous_duration_recording(BS)\n\n\n"} {"package":"ARPobservation","topic":"event_counting","snippet":"### Name: event_counting\n### Title: Applies event counting to a behavior stream\n### Aliases: event_counting\n\n### ** Examples\n\nBS <- r_behavior_stream(n = 5, mu = 3, lambda = 10, \n F_event = F_exp(), F_interim = F_exp(), stream_length = 100)\nevent_counting(BS)\n\n\n"} {"package":"ARPobservation","topic":"incidence_bounds","snippet":"### Name: incidence_bounds\n### Title: Incidence bounds and confidence interval\n### Aliases: incidence_bounds\n\n### ** Examples\n\n\n# Estimate bounds on the incidence ratio for Ahmad from the Dunlap dataset\ndata(Dunlap)\nwith(subset(Dunlap, Case == \"Ahmad\"),\nincidence_bounds(PIR = outcome, phase = Phase, base_level = \"No Choice\",\n mu_U = 10, p = .15, active_length = active_length, intervals = intervals))\n\n\n\n"} {"package":"ARPobservation","topic":"interim_bounds","snippet":"### Name: interim_bounds\n### Title: Interim bounds and confidence interval\n### Aliases: interim_bounds\n\n### ** Examples\n\n# Estimate bounds on the interim time ratio for Carl from the Moes dataset\ndata(Moes)\nwith(subset(Moes, Case == \"Carl\"),\ninterim_bounds(PIR = outcome, phase = Phase, base_level = \"No Choice\"))\n\n\n\n"} {"package":"ARPobservation","topic":"interval_recording","snippet":"### Name: interval_recording\n### Title: Applies interval recording to a behavior stream\n### Aliases: interval_recording\n\n### ** Examples\n\nBS <- r_behavior_stream(n = 5, mu = 3, lambda = 10, \n F_event = F_exp(), F_interim = F_exp(), stream_length = 100)\ninterval_recording(BS, interval_length = 20, partial = TRUE, summarize = FALSE)\ninterval_recording(BS, interval_length = 20, partial = TRUE, summarize = TRUE)\ncolMeans(interval_recording(BS, 20, partial = TRUE, summarize = FALSE))\ninterval_recording(BS, interval_length = 20, rest_length = 5, partial = FALSE)\n\n\n"} {"package":"ARPobservation","topic":"logRespRatio","snippet":"### Name: logRespRatio\n### Title: Calculate log-response ratio, variance, and confidence interval\n### Aliases: logRespRatio\n\n### ** Examples\n\n\n# Estimate the log response ratio and its variance for Carl from Moes dataset\ndata(Moes)\nwith(subset(Moes, Case == \"Carl\"),\nlogRespRatio(observations = outcome, phase = Phase, base_level = \"No Choice\"))\n\n\n\n"} {"package":"ARPobservation","topic":"momentary_time_recording","snippet":"### Name: momentary_time_recording\n### Title: Applies momentary time recording to a behavior stream\n### Aliases: momentary_time_recording\n\n### ** Examples\n\nBS <- r_behavior_stream(n = 5, mu = 3, lambda = 10, \n F_event = F_exp(), F_interim = F_exp(), stream_length = 100)\nmomentary_time_recording(BS, interval_length = 20, FALSE)\nmomentary_time_recording(BS, interval_length = 20)\ncolMeans(momentary_time_recording(BS, 20, FALSE)[-1,])\n\n\n"} {"package":"ARPobservation","topic":"plot.behavior_stream","snippet":"### Name: plot.behavior_stream\n### Title: Plot method for 'behavior_stream' objects\n### Aliases: plot.behavior_stream\n\n### ** Examples\n\n\nif (requireNamespace(\"ggplot2\", quietly = TRUE)) {\nb_streams <- r_behavior_stream(n = 5, mu = 3, lambda = 10, \n F_event = F_exp(), F_interim = F_exp(), \n stream_length = 100)\nplot(b_streams)\n}\n\n\n\n"} {"package":"ARPobservation","topic":"prevalence_bounds","snippet":"### Name: prevalence_bounds\n### Title: Prevalence bounds and confidence interval\n### Aliases: prevalence_bounds\n\n### ** Examples\n\n# Estimate bounds on the prevalence ratio for Carl from Moes dataset\ndata(Moes)\nwith(subset(Moes, Case == \"Carl\"),\n prevalence_bounds(PIR = outcome, phase = Phase, base_level = \"No Choice\",\n mu_L = 10, active_length = active_length, intervals = intervals))\n\n\n\n"} {"package":"ARPobservation","topic":"r_AIR","snippet":"### Name: r_AIR\n### Title: Generates random augmented interval recording behavior streams\n### Aliases: r_AIR\n\n### ** Examples\n\n\nr_AIR(n = 5, mu = 2, lambda = 4, stream_length = 20, \n F_event = F_exp(), F_interim = F_exp(), \n interval_length = 1, rest_length = 0)\n \n\n\n"} {"package":"ARPobservation","topic":"r_MTS","snippet":"### Name: r_MTS\n### Title: Generates random momentary time sampling behavior streams\n### Aliases: r_MTS\n\n### ** Examples\n\n\n# A set of unsummarized MTS observations\nr_MTS(n = 5, mu = 2, lambda = 4, stream_length = 20, \n F_event = F_exp(), F_interim = F_exp(), interval_length = 1)\n \n# A set of summarized MTS observations\nr_MTS(n = 5, mu = 2, lambda = 4, stream_length = 20, \n F_event = F_exp(), F_interim = F_exp(), \n interval_length = 1, summarize = TRUE)\n \n\n\n"} {"package":"ARPobservation","topic":"r_PIR","snippet":"### Name: r_PIR\n### Title: Generates random partial interval recording behavior streams\n### Aliases: r_PIR\n\n### ** Examples\n\n\n# An unsummarized set of PIR observations\nr_PIR(n = 5, mu = 2, lambda = 4, stream_length = 20, \n F_event = F_exp(), F_interim = F_exp(), \n interval_length = 1, rest_length = 0)\n \n# A summarized set of of PIR observations\nr_PIR(n = 5, mu = 2, lambda = 4, stream_length = 20, \n F_event = F_exp(), F_interim = F_exp(), \n interval_length = 1, rest_length = 0,\n summarize = TRUE)\n \n\n\n"} {"package":"ARPobservation","topic":"r_WIR","snippet":"### Name: r_WIR\n### Title: Generates random whole interval recording behavior streams\n### Aliases: r_WIR\n\n### ** Examples\n\n\n# An unsummarized set of WIR observations\nr_WIR(n = 5, mu = 2, lambda = 4, stream_length = 20, \n F_event = F_exp(), F_interim = F_exp(), \n interval_length = 1, rest_length = 0)\n \n# A summarized set of of WIR observations\nr_WIR(n = 5, mu = 2, lambda = 4, stream_length = 20, \n F_event = F_exp(), F_interim = F_exp(), \n interval_length = 1, rest_length = 0,\n summarize = TRUE)\n \n\n\n"} {"package":"ARPobservation","topic":"r_behavior_stream","snippet":"### Name: r_behavior_stream\n### Title: Generates random behavior streams\n### Aliases: r_behavior_stream\n\n### ** Examples\n\n# default equilibrium initial conditions\nr_behavior_stream(n = 5, mu = 3, lambda = 10, \n F_event = F_exp(), F_interim = F_exp(), \n stream_length = 100)\n \n# non-equilibrium initial conditions\nr_behavior_stream(n = 5, mu = 3, lambda = 10,\n F_event = F_gam(3), F_interim = F_gam(3),\n stream_length = 100, \n equilibrium = FALSE, p0 = 0.5)\n\n\n"} {"package":"ARPobservation","topic":"r_continuous_recording","snippet":"### Name: r_continuous_recording\n### Title: Generates random samples of continuously recorded behavior\n### streams\n### Aliases: r_continuous_recording\n\n### ** Examples\n\n\nr_continuous_recording(n = 5, mu = 2, lambda = 4, stream_length = 20,\n F_event = F_exp(), F_interim = F_exp())\n\n\n\n"} {"package":"ARPobservation","topic":"r_event_counting","snippet":"### Name: r_event_counting\n### Title: Generates random samples of event counts\n### Aliases: r_event_counting\n\n### ** Examples\n\n\nr_event_counting(n = 5, mu = 2, lambda = 4, stream_length = 20,\n F_event = F_exp(), F_interim = F_exp())\n \n\n\n"} {"package":"ARPobservation","topic":"reported_observations","snippet":"### Name: reported_observations\n### Title: Applies multiple recording procedures to a behavior stream\n### Aliases: reported_observations\n\n### ** Examples\n\nBS <- r_behavior_stream(n = 50, mu = 3, lambda = 10, \n F_event = F_exp(), F_interim = F_exp(), stream_length = 100)\nreported_observations(BS, interval_length = 10)\nreported_observations(BS, interval_length = 10, n_aggregate = 5)\n\n\n"} {"package":"Gmisc","topic":"Transition-class","snippet":"### Name: Transition-class\n### Title: A reference class for generating transition plots\n### Aliases: Transition-class Transition\n\n### ** Examples\n\n# Transitions\nset.seed(1)\nn <- 10\nmy_data <-\n data.frame(\n Var_a = sample(c(\n \"Test 1\",\n \"Test 2\",\n \"Test 3\"\n ),\n size = n,\n replace = TRUE,\n prob = 3:1\n ),\n Var_b = sample(c(\n \"Test 1\",\n \"Test 2\",\n \"Test 3\"\n ),\n size = n,\n replace = TRUE,\n prob = 1:3\n )\n )\nmtrx <- with(\n my_data,\n table(Var_a, Var_b)\n)\n\n# Initialize the transition plot\ntransitions <- getRefClass(\"Transition\")$new(mtrx,\n label = c(\"Before\", \"After\"))\n\n# Render the plot\ntransitions$render()\n\n\n"} {"package":"Gmisc","topic":"align","snippet":"### Name: align\n### Title: Align boxes\n### Aliases: align alignVertical alignHorizontal\n\n### ** Examples\n\nlibrary(grid)\ngrid.newpage()\n\nbox <- boxGrob(\"A cool\\nreference\\nbox\",\n x = .5, y = .8,\n box_gp = gpar(fill = \"#ADB5C7\"))\nanother_box <- boxGrob(\"A horizontal box\", x = .1, y = .5)\nyet_another_box <- boxGrob(\"Another horizontal box\", x = .8, y = .3)\n\nalignedBoxes <- alignHorizontal(box,\n another_box,\n yet_another_box,\n .position = \"right\")\n\nbox\nfor (b in alignedBoxes) {\n print(b)\n}\n\n\nvert_box <- boxGrob(\"Vert\", \n x = .8, y = .3,\n box_gp = gpar(fill = \"darkgreen\"),\n txt_gp = gpar(col = \"white\"))\nanother_vert_box <- boxGrob(\"Another vertical\", \n x = .1, y = .5,\n box_gp = gpar(fill = \"darkgreen\"),\n txt_gp = gpar(col = \"white\"))\n\nalignedBoxes <- alignVertical(box,\n vert_box,\n another_vert_box,\n .position = \"bottom\")\nfor (b in alignedBoxes) {\n print(b)\n}\n\n\n"} {"package":"Gmisc","topic":"bezierArrowGradient","snippet":"### Name: bezierArrowGradient\n### Title: A bezier arrow with gradient\n### Aliases: bezierArrowGradient\n\n### ** Examples\n\nlibrary(grid)\ngrid.newpage()\narrowGrob <- bezierArrowGradient(\n x = c(.1, .3, .6, .9),\n y = c(0.2, 0.2, 0.9, 0.9)\n)\ngrid.draw(arrowGrob)\n\n\n"} {"package":"Gmisc","topic":"bezierArrowSmpl","snippet":"### Name: bezierArrowSmpl\n### Title: A simple bezier arrow\n### Aliases: bezierArrowSmpl\n\n### ** Examples\n\nlibrary(grid)\ngrid.newpage()\narrowGrob <- bezierArrowSmpl(\n x = c(.1, .3, .6, .9),\n y = c(0.2, 0.2, 0.9, 0.9)\n)\ngrid.draw(arrowGrob)\n\n\n"} {"package":"Gmisc","topic":"boxGrob","snippet":"### Name: boxGrob\n### Title: Create a box with text\n### Aliases: boxGrob print.box plot.box widthDetails.box heightDetails.box\n\n### ** Examples\n\nlibrary(grid)\ngrid.newpage()\nboxGrob(\"My box\")\n\n\n"} {"package":"Gmisc","topic":"boxPropGrob","snippet":"### Name: boxPropGrob\n### Title: Create a box with a color split\n### Aliases: boxPropGrob\n\n### ** Examples\n\nlibrary(grid)\ngrid.newpage()\nboxPropGrob(\"Main label\", \"Left text\", \"Right text\", prop = .3)\n\n\n"} {"package":"Gmisc","topic":"connectGrob","snippet":"### Name: connectGrob\n### Title: Connect boxes with an arrow\n### Aliases: connectGrob print.connect_boxes plot.connect_boxes\n\n### ** Examples\n\nlibrary(grid)\ngrid.newpage()\n\n# Initiate the boxes that we want to connect\nstart <- boxGrob(\"Top\", x = .5, y = .8)\nend <- boxGrob(\"Bottom\", x = .5, y = .2)\nside <- boxPropGrob(\"Side\", \"Left\", \"Right\", prop = .3, x = .2, y = .8)\nsub_side_left <- boxGrob(\"Left\", x = attr(side, \"coords\")$left_x, y = .5)\nsub_side_right <- boxGrob(\"Right\", x = attr(side, \"coords\")$right_x, y = .5)\nexclude <- boxGrob(\"Exclude:\\n - Too sick\\n - Prev. surgery\", x = .8, y = .5, just = \"left\")\n\n# Connect the boxes and print/plot them\nconnectGrob(start, end, \"vertical\")\nconnectGrob(start, side, \"horizontal\")\nconnectGrob(side, sub_side_left, \"v\", \"l\")\nconnectGrob(side, sub_side_right, \"v\", \"r\")\nconnectGrob(start, exclude, \"L\")\n\n# Print the grobs\nstart\nend\nside\nexclude\nsub_side_left\nsub_side_right\n\n\n"} {"package":"Gmisc","topic":"coords","snippet":"### Name: coords\n### Title: Get the box coordinates\n### Aliases: coords\n\n### ** Examples\n\nbox <- boxGrob(\"A test box\")\ncoords(box)\n\n\n"} {"package":"Gmisc","topic":"copyAllNewAttributes","snippet":"### Name: copyAllNewAttributes\n### Title: A simple thing to keep the attributes\n### Aliases: copyAllNewAttributes\n\n### ** Examples\n\na <- \"test\"\nattr(a, 'wow') <- 1000\nb <- a\nb <- copyAllNewAttributes(a, b)\nprint(attr(b, 'wow'))\n\n\n\n"} {"package":"Gmisc","topic":"describeFactors","snippet":"### Name: describeFactors\n### Title: Describes factor variables\n### Aliases: describeFactors\n\n### ** Examples\n\nset.seed(1)\ndescribeFactors(sample(50, x = c(\"A\", \"B\", \"C\"), replace = TRUE))\n\nn <- 500\nmy_var <- factor(sample(size = n, x = c(\"A\", \"B\", \"C\", NA), replace = TRUE))\nmy_exp <- rbinom(n = n, size = 1, prob = 0.2)\ntotal <- table(my_var, useNA = \"ifany\")\nby(my_var,\n INDICES = my_exp,\n FUN = describeFactors,\n useNA = \"ifany\",\n horizontal_proportions = total\n)\n\n\n"} {"package":"Gmisc","topic":"describeMean","snippet":"### Name: describeMean\n### Title: Describe the mean\n### Aliases: describeMean\n\n### ** Examples\n\ndescribeMean(1:10)\ndescribeMean(c(1:10, NA), useNA = \"always\")\ndescribeMean(c(1:10, NA), useNA = \"no\")\n\n\n"} {"package":"Gmisc","topic":"describeMedian","snippet":"### Name: describeMedian\n### Title: A function that returns a description median that contains the\n### interquartile range or the full range\n### Aliases: describeMedian\n\n### ** Examples\n\ndescribeMedian(1:10)\ndescribeMedian(c(1:10, NA), useNA = \"ifany\")\n\n\n"} {"package":"Gmisc","topic":"describeProp","snippet":"### Name: describeProp\n### Title: A function that returns a description proportion that contains\n### the number and the percentage\n### Aliases: describeProp\n\n### ** Examples\n\ndescribeProp(factor(sample(50, x = c(\"A\", \"B\", NA), replace = TRUE)))\n\n\n"} {"package":"Gmisc","topic":"distance","snippet":"### Name: distance\n### Title: Get the distance between grid objects\n### Aliases: distance print.Gmisc_unit\n\n### ** Examples\n\nbox1 <- boxGrob(\"A test box\", y = .8)\nbox2 <- boxGrob(\"Another test box\", y = .2)\ndistance(box1, box2, \"v\")\n\n\n"} {"package":"Gmisc","topic":"docx_document","snippet":"### Name: docx_document\n### Title: Formatter wrapper for 'html_document', facilitates easier\n### porting to docx\n### Aliases: docx_document\n\n### ** Examples\n\n# Possible yaml configuration at the top of the Rmd doc\n## Not run: \n##D ---\n##D title: \"Test\"\n##D author: \"Max Gordon\"\n##D output:\n##D Gmisc::docx_document\n##D ---\n## End(Not run)\n\n\n"} {"package":"Gmisc","topic":"fastDoCall","snippet":"### Name: fastDoCall\n### Title: An alternative to the internal 'do.call'\n### Aliases: fastDoCall\n\n### ** Examples\n\nfastDoCall(\"complex\", list(imaginary = 1:3))\n\n## if we already have a list (e.g. a data frame)\n## we need c() to add further arguments\ntmp <- expand.grid(letters[1:2], 1:3, c(\"+\", \"-\"))\nfastDoCall(\"paste\", c(tmp, sep = \"\"))\n\n## examples of where objects will be found.\nA <- 2\nf <- function(x) print(x^2)\nenv <- new.env()\nassign(\"A\", 10, envir = env)\nassign(\"f\", f, envir = env)\nf <- function(x) print(x)\nf(A) # 2\nfastDoCall(\"f\", list(A)) # 2\nfastDoCall(\"f\", list(A), envir = env) # 4\nfastDoCall(f, list(A), envir = env) # 2\nfastDoCall(\"f\", list(quote(A)), envir = env) # 100\nfastDoCall(f, list(quote(A)), envir = env) # 10\nfastDoCall(\"f\", list(as.name(\"A\")), envir = env) # 100\n\neval(call(\"f\", A)) # 2\neval(call(\"f\", quote(A))) # 2\neval(call(\"f\", A), envir = env) # 4\neval(call(\"f\", quote(A)), envir = env) # 100\n\n\n"} {"package":"Gmisc","topic":"figCapNo","snippet":"### Name: figCapNo\n### Title: Adds a figure caption number\n### Aliases: figCapNo\n\n### ** Examples\n\n## Not run: \n##D ```{r, fig.cap = pigCapNo(\"My nice plot\")}\n##D plot(1:10 + rnorm(10), 1:10)\n##D ```\n## End(Not run)\norg_opts <- options(fig_caption_no = 2,\n fig_caption_no_sprintf = \"Figure %s: %s\")\nfigCapNo(\"A plot with caption number = 3\")\n\norg_opts <- options(fig_caption_no = TRUE)\nfigCapNo(\"A plot with caption number = 1\")\n\n# Use default setting\noptions(fig_caption_no_sprintf = NULL)\nfigCapNo(\"A plot with caption number = 2\")\n\n# Return the original settings\noptions(org_opts)\n\n\n"} {"package":"Gmisc","topic":"figCapNoLast","snippet":"### Name: figCapNoLast\n### Title: Gets the last figure caption number\n### Aliases: figCapNoLast\n\n### ** Examples\n\norg_opts <- options(fig_caption_no = 1)\nfigCapNoLast()\noptions(org_opts)\n\n\n"} {"package":"Gmisc","topic":"figCapNoNext","snippet":"### Name: figCapNoNext\n### Title: Gets the next figure caption number\n### Aliases: figCapNoNext\n\n### ** Examples\n\norg_opts <- options(fig_caption_no = 1)\nfigCapNoNext()\noptions(org_opts)\n\n\n"} {"package":"Gmisc","topic":"getDescriptionStatsBy","snippet":"### Name: getDescriptionStatsBy\n### Title: Creating of description statistics\n### Aliases: getDescriptionStatsBy htmlTable.Gmisc_getDescriptionStatsBy\n### print.Gmisc_getDescriptionStatsBy\n### knit_print.Gmisc_getDescriptionStatsBy\n### length.Gmisc_getDescriptionStatsBy\n\n### ** Examples\n\nlibrary(magrittr)\nlibrary(dplyr)\nlibrary(htmlTable)\n\ndata(mtcars)\nmtcars %<>%\n mutate(am = factor(am, levels = 0:1, labels = c(\"Automatic\", \"Manual\")),\n vs = factor(vs, levels = 0:1, labels = c(\"V-shaped\", \"straight\")),\n drat_prop = drat > median(drat),\n drat_prop = factor(drat_prop,\n levels = c(FALSE, TRUE),\n labels = c(\"High ratio\", \"Low ratio\")),\n carb_prop = carb > 2,\n carb_prop = factor(carb_prop,\n levels = c(FALSE, TRUE),\n labels = c(\"≤ 2\", \"> 2\")),\n across(c(gear, carb, cyl), factor))\n\n# A simple bare-bone example\nmtcars %>%\n getDescriptionStatsBy(`Miles per gallon` = mpg,\n Weight = wt,\n `Carborators ≤ 2` = carb_prop,\n by = am) %>%\n htmlTable(caption = \"Basic continuous stats from the mtcars dataset\")\ninvisible(readline(prompt = \"Press [enter] to continue\"))\n\n# For labeling & units we use set_column_labels/set_column_unit that use\n# the Hmisc package annotation functions\nmtcars %<>%\n set_column_labels(am = \"Transmission\",\n mpg = \"Gas\",\n wt = \"Weight\",\n gear = \"Gears\",\n disp = \"Displacement\",\n vs = \"Engine type\",\n drat_prop = \"Rear axel ratio\",\n carb_prop = \"Carburetors\") %>%\n set_column_units(mpg = \"Miles/(US) gallon\",\n wt = \"103<\/sup> lbs\",\n disp = \"cu.in.\")\n\nmtcars %>%\n getDescriptionStatsBy(mpg,\n wt,\n `Gear†` = gear,\n drat_prop,\n carb_prop,\n vs,\n by = am,\n header_count = TRUE,\n use_units = TRUE,\n show_all_values = TRUE) %>%\n addHtmlTableStyle(pos.caption = \"bottom\") %>%\n htmlTable(caption = \"Stats from the mtcars dataset\",\n tfoot = \"† Number of forward gears\")\ninvisible(readline(prompt = \"Press [enter] to continue\"))\n\n# Using the default parameter we can\nmtcars %>%\n getDescriptionStatsBy(mpg,\n wt,\n `Gear†` = gear,\n drat_prop,\n carb_prop,\n vs,\n by = am,\n header_count = TRUE,\n use_units = TRUE,\n default_ref = c(drat_prop = \"Low ratio\",\n carb_prop = \"> 2\")) %>%\n addHtmlTableStyle(pos.caption = \"bottom\") %>%\n htmlTable(caption = \"Stats from the mtcars dataset\",\n tfoot = \"† Number of forward gears\")\ninvisible(readline(prompt = \"Press [enter] to continue\"))\n\n# We can also use lists\ntll <- list()\ntll[[\"Gear (3 to 5)\"]] <- getDescriptionStatsBy(mtcars$gear, mtcars$am)\ntll <- c(tll,\n list(getDescriptionStatsBy(mtcars$disp, mtcars$am)))\n\nmergeDesc(tll,\n htmlTable_args = list(caption = \"Factored variables\")) %>%\n htmlTable::addHtmlTableStyle(css.rgroup = \"\")\ninvisible(readline(prompt = \"Press [enter] to continue\"))\n\ntl_no_units <- list()\ntl_no_units[[\"Gas (mile/gallons)\"]] <-\n getDescriptionStatsBy(mtcars$mpg, mtcars$am,\n header_count = TRUE)\ntl_no_units[[\"Weight (103<\/sup> kg)\"]] <-\n getDescriptionStatsBy(mtcars$wt, mtcars$am,\n header_count = TRUE)\nmergeDesc(tl_no_units,\n tll) %>%\n htmlTable::addHtmlTableStyle(css.rgroup = \"\")\ninvisible(readline(prompt = \"Press [enter] to continue\"))\n\n# Other settings\nmtcars$mpg[sample(1:NROW(mtcars), size = 5)] <- NA\ngetDescriptionStatsBy(mtcars$mpg,\n mtcars$am,\n statistics = TRUE)\ninvisible(readline(prompt = \"Press [enter] to continue\"))\n\n# Do the horizontal version\ngetDescriptionStatsBy(mtcars$gear,\n mtcars$am,\n statistics = TRUE,\n hrzl_prop = TRUE)\ninvisible(readline(prompt = \"Press [enter] to continue\"))\n\nmtcars$wt_with_missing <- mtcars$wt\nmtcars$wt_with_missing[sample(1:NROW(mtcars), size = 8)] <- NA\ngetDescriptionStatsBy(mtcars$wt_with_missing, mtcars$am, statistics = TRUE,\n hrzl_prop = TRUE, total_col_show_perc = FALSE)\ninvisible(readline(prompt = \"Press [enter] to continue\"))\n\n## Not run: \n##D ## There is also a LaTeX wrapper\n##D tll <- list(\n##D getDescriptionStatsBy(mtcars$gear, mtcars$am),\n##D getDescriptionStatsBy(mtcars$col, mtcars$am))\n##D \n##D latex(mergeDesc(tll),\n##D caption = \"Factored variables\",\n##D file = \"\")\n## End(Not run)\n\n\n"} {"package":"Gmisc","topic":"getPvalWilcox","snippet":"### Name: getPvalWilcox\n### Title: P-value extractors for 'getDescriptionStatsBy'\n### Aliases: getPvalWilcox getPvalAnova getPvalFisher getPvalChiSq\n### getPvalKruskal\n\n### ** Examples\n\nset.seed(123)\ngetPvalFisher(\n sample(letters[1:3], size = 100, replace = TRUE),\n sample(LETTERS[1:3], size = 100, replace = TRUE)\n)\ngetPvalWilcox(\n rnorm(100),\n sample(LETTERS[1:2], size = 100, replace = TRUE)\n)\n\n\n"} {"package":"Gmisc","topic":"getSvdMostInfluential","snippet":"### Name: getSvdMostInfluential\n### Title: Gets the maximum contributor variables from svd()\n### Aliases: getSvdMostInfluential\n\n### ** Examples\n\norg_par <- par(ask = TRUE)\nset.seed(1345)\n# Simulate data with a pattern\ndataMatrix <- matrix(rnorm(15 * 160), ncol = 15)\ncolnames(dataMatrix) <- c(\n paste(\"Pos.3:\", 1:3, sep = \" #\"),\n paste(\"Neg.Decr:\", 4:6, sep = \" #\"),\n paste(\"No pattern:\", 7:8, sep = \" #\"),\n paste(\"Pos.Incr:\", 9:11, sep = \" #\"),\n paste(\"No pattern:\", 12:15, sep = \" #\"))\n\nfor (i in 1:nrow(dataMatrix)) {\n # flip a coin\n coinFlip1 <- rbinom(1, size = 1, prob = 0.5)\n coinFlip2 <- rbinom(1, size = 1, prob = 0.5)\n coinFlip3 <- rbinom(1, size = 1, prob = 0.5)\n\n # if coin is heads add a common pattern to that row\n if (coinFlip1) {\n cols <- grep(\"Pos.3\", colnames(dataMatrix))\n dataMatrix[i, cols] <- dataMatrix[i, cols] + 3\n }\n\n if (coinFlip2) {\n cols <- grep(\"Neg.Decr\", colnames(dataMatrix))\n dataMatrix[i, cols] <- dataMatrix[i, cols] - seq(from = 5, to = 15, length.out = length(cols))\n }\n\n if (coinFlip3) {\n cols <- grep(\"Pos.Incr\", colnames(dataMatrix))\n dataMatrix[i, cols] <- dataMatrix[i, cols] + seq(from = 3, to = 15, length.out = length(cols))\n }\n}\n\n# Illustrate data\nheatmap(dataMatrix, Colv = NA, Rowv = NA, margins = c(7, 2), labRow = \"\")\n\nsvd_out <- svd(scale(dataMatrix))\n\nlibrary(lattice)\nb_clr <- c(\"steelblue\", \"darkred\")\nkey <- simpleKey(\n rectangles = TRUE, space = \"top\", points = FALSE,\n text = c(\"Positive\", \"Negative\")\n)\nkey$rectangles$col <- b_clr\n\nb1 <- barchart(as.table(svd_out$v[, 1]),\n main = \"First column\",\n horizontal = FALSE, col = ifelse(svd_out$v[, 1] > 0,\n b_clr[1], b_clr[2]\n ),\n ylab = \"Impact value\",\n scales = list(x = list(rot = 55, labels = colnames(dataMatrix), cex = 1.1)),\n key = key\n)\n\nb2 <- barchart(as.table(svd_out$v[, 2]),\n main = \"Second column\",\n horizontal = FALSE, col = ifelse(svd_out$v[, 2] > 0,\n b_clr[1], b_clr[2]\n ),\n ylab = \"Impact value\",\n scales = list(x = list(rot = 55, labels = colnames(dataMatrix), cex = 1.1)),\n key = key\n)\n\nb3 <- barchart(as.table(svd_out$v[, 3]),\n main = \"Third column\",\n horizontal = FALSE, col = ifelse(svd_out$v[, 3] > 0,\n b_clr[1], b_clr[2]\n ),\n ylab = \"Impact value\",\n scales = list(x = list(rot = 55, labels = colnames(dataMatrix), cex = 1.1)),\n key = key\n)\n\nb4 <- barchart(as.table(svd_out$v[, 4]),\n main = \"Fourth column\",\n horizontal = FALSE, col = ifelse(svd_out$v[, 4] > 0,\n b_clr[1], b_clr[2]\n ),\n ylab = \"Impact value\",\n scales = list(x = list(rot = 55, labels = colnames(dataMatrix), cex = 1.1)),\n key = key\n)\n\n# Note that the fourth has the no pattern columns as the\n# chosen pattern, probably partly because of the previous\n# patterns already had been identified\nprint(b1, position = c(0, 0.5, .5, 1), more = TRUE)\nprint(b2, position = c(0.5, 0.5, 1, 1), more = TRUE)\nprint(b3, position = c(0, 0, .5, .5), more = TRUE)\nprint(b4, position = c(0.5, 0, 1, .5))\n\n# Let's look at how well the SVD identifies\n# the most influential columns\ngetSvdMostInfluential(dataMatrix,\n quantile = .8,\n similarity_threshold = .9,\n plot_threshold = .05,\n plot_selection = TRUE)\npar(org_par)\n\n\n"} {"package":"Gmisc","topic":"has","snippet":"### Name: has\n### Title: An R alternative to the lodash 'has' in JavaScript\n### Aliases: has\n\n### ** Examples\n\nhas(list(a = list(b = 1)), \"a.b\")\n\n\n\n"} {"package":"Gmisc","topic":"insertRowAndKeepAttr","snippet":"### Name: insertRowAndKeepAttr\n### Title: Insert a row into a matrix\n### Aliases: insertRowAndKeepAttr\n\n### ** Examples\n\ntest <- matrix(1:4, ncol = 2)\nattr(test, \"wow\") <- 1000\ntest <- insertRowAndKeepAttr(test, 2)\nprint(attr(test, \"wow\"))\n\n\n"} {"package":"Gmisc","topic":"mergeDesc","snippet":"### Name: mergeDesc\n### Title: Prepares a matrix for 'htmlTable' from a list\n### Aliases: mergeDesc\n\n### ** Examples\n\nlibrary(magrittr)\nlibrary(dplyr)\nlibrary(htmlTable)\n\ndata(mtcars)\nmtcars %<>%\n mutate(am = factor(am, levels = 0:1, labels = c(\"Automatic\", \"Manual\")),\n vs = factor(vs, levels = 0:1, labels = c(\"V-shaped\", \"straight\")),\n drat_prop = drat > median(drat),\n drat_prop = factor(drat_prop,\n levels = c(FALSE, TRUE),\n labels = c(\"High ratio\", \"Low ratio\")),\n carb_prop = carb > 2,\n carb_prop = factor(carb_prop,\n levels = c(FALSE, TRUE),\n labels = c(\"≤ 2\", \"> 2\")),\n across(c(gear, carb, cyl), factor))\n\n# A simple bare-bone example\nmtcars %>%\n getDescriptionStatsBy(`Miles per gallon` = mpg,\n Weight = wt,\n `Carborators ≤ 2` = carb_prop,\n by = am) %>%\n htmlTable(caption = \"Basic continuous stats from the mtcars dataset\")\ninvisible(readline(prompt = \"Press [enter] to continue\"))\n\n# For labeling & units we use set_column_labels/set_column_unit that use\n# the Hmisc package annotation functions\nmtcars %<>%\n set_column_labels(am = \"Transmission\",\n mpg = \"Gas\",\n wt = \"Weight\",\n gear = \"Gears\",\n disp = \"Displacement\",\n vs = \"Engine type\",\n drat_prop = \"Rear axel ratio\",\n carb_prop = \"Carburetors\") %>%\n set_column_units(mpg = \"Miles/(US) gallon\",\n wt = \"103<\/sup> lbs\",\n disp = \"cu.in.\")\n\nmtcars %>%\n getDescriptionStatsBy(mpg,\n wt,\n `Gear†` = gear,\n drat_prop,\n carb_prop,\n vs,\n by = am,\n header_count = TRUE,\n use_units = TRUE,\n show_all_values = TRUE) %>%\n addHtmlTableStyle(pos.caption = \"bottom\") %>%\n htmlTable(caption = \"Stats from the mtcars dataset\",\n tfoot = \"† Number of forward gears\")\ninvisible(readline(prompt = \"Press [enter] to continue\"))\n\n# Using the default parameter we can\nmtcars %>%\n getDescriptionStatsBy(mpg,\n wt,\n `Gear†` = gear,\n drat_prop,\n carb_prop,\n vs,\n by = am,\n header_count = TRUE,\n use_units = TRUE,\n default_ref = c(drat_prop = \"Low ratio\",\n carb_prop = \"> 2\")) %>%\n addHtmlTableStyle(pos.caption = \"bottom\") %>%\n htmlTable(caption = \"Stats from the mtcars dataset\",\n tfoot = \"† Number of forward gears\")\ninvisible(readline(prompt = \"Press [enter] to continue\"))\n\n# We can also use lists\ntll <- list()\ntll[[\"Gear (3 to 5)\"]] <- getDescriptionStatsBy(mtcars$gear, mtcars$am)\ntll <- c(tll,\n list(getDescriptionStatsBy(mtcars$disp, mtcars$am)))\n\nmergeDesc(tll,\n htmlTable_args = list(caption = \"Factored variables\")) %>%\n htmlTable::addHtmlTableStyle(css.rgroup = \"\")\ninvisible(readline(prompt = \"Press [enter] to continue\"))\n\ntl_no_units <- list()\ntl_no_units[[\"Gas (mile/gallons)\"]] <-\n getDescriptionStatsBy(mtcars$mpg, mtcars$am,\n header_count = TRUE)\ntl_no_units[[\"Weight (103<\/sup> kg)\"]] <-\n getDescriptionStatsBy(mtcars$wt, mtcars$am,\n header_count = TRUE)\nmergeDesc(tl_no_units,\n tll) %>%\n htmlTable::addHtmlTableStyle(css.rgroup = \"\")\ninvisible(readline(prompt = \"Press [enter] to continue\"))\n\n# Other settings\nmtcars$mpg[sample(1:NROW(mtcars), size = 5)] <- NA\ngetDescriptionStatsBy(mtcars$mpg,\n mtcars$am,\n statistics = TRUE)\ninvisible(readline(prompt = \"Press [enter] to continue\"))\n\n# Do the horizontal version\ngetDescriptionStatsBy(mtcars$gear,\n mtcars$am,\n statistics = TRUE,\n hrzl_prop = TRUE)\ninvisible(readline(prompt = \"Press [enter] to continue\"))\n\nmtcars$wt_with_missing <- mtcars$wt\nmtcars$wt_with_missing[sample(1:NROW(mtcars), size = 8)] <- NA\ngetDescriptionStatsBy(mtcars$wt_with_missing, mtcars$am, statistics = TRUE,\n hrzl_prop = TRUE, total_col_show_perc = FALSE)\ninvisible(readline(prompt = \"Press [enter] to continue\"))\n\n## Not run: \n##D ## There is also a LaTeX wrapper\n##D tll <- list(\n##D getDescriptionStatsBy(mtcars$gear, mtcars$am),\n##D getDescriptionStatsBy(mtcars$col, mtcars$am))\n##D \n##D latex(mergeDesc(tll),\n##D caption = \"Factored variables\",\n##D file = \"\")\n## End(Not run)\n\n\n"} {"package":"Gmisc","topic":"mergeLists","snippet":"### Name: mergeLists\n### Title: Merging of multiple lists\n### Aliases: mergeLists\n\n### ** Examples\n\nv1 <- list(\"a\" = c(1, 2), b = \"test 1\", sublist = list(one = 20:21, two = 21:22))\nv2 <- list(\"a\" = c(3, 4), b = \"test 2\", sublist = list(one = 10:11, two = 11:12, three = 1:2))\nmergeLists(v1, v2)\n\n\n"} {"package":"Gmisc","topic":"moveBox","snippet":"### Name: moveBox\n### Title: Move a boxGrob\n### Aliases: moveBox\n\n### ** Examples\n\nlibrary(grid)\ngrid.newpage()\n\nbox <- boxGrob(\"A simple box\", x = .5, y = .8)\nmoveBox(box, x = -.2, space = \"relative\")\n\n\n"} {"package":"Gmisc","topic":"pathJoin","snippet":"### Name: pathJoin\n### Title: A path join function\n### Aliases: pathJoin\n\n### ** Examples\n\npathJoin(\"my_base_path/helpers\", \"superfunction.R\")\n# 'my_base_path/helpers/superfunction.R'\n\nbase_dir <- \"/home/tester/images\"\nout <- data.frame(filename = c(\"file1.png\", \"file2.png\", \"file3.png\")) |>\n dplyr::mutate(full_path = pathJoin(base_dir, filename))\n\n\n\n"} {"package":"Gmisc","topic":"retrieve","snippet":"### Name: retrieve\n### Title: An R alternative to the lodash 'get' in JavaScript\n### Aliases: retrieve\n\n### ** Examples\n\nsource <- list(a = list(b = 1, `odd.name` = 'I hate . in names', c(1,2,3)))\nretrieve(source, \"a.b\")\nretrieve(source, \"a.b.1\")\nretrieve(source, \"a.odd\\\\.name\")\nretrieve(source, \"a.not_in_list\")\n\n\n\n"} {"package":"Gmisc","topic":"set_column_labels","snippet":"### Name: set_column_labels\n### Title: Add [Hmisc::label()] to multiple columns\n### Aliases: set_column_labels\n\n### ** Examples\n\nlibrary(magrittr)\ndata(mtcars)\nmtcars_with_labels <- mtcars %>%\n set_column_labels(mpg = \"Gas\",\n cyl = \"Cylinders\",\n hp = \"Strength\")\nHmisc::label(mtcars_with_labels$mpg)\n\n\n"} {"package":"Gmisc","topic":"set_column_units","snippet":"### Name: set_column_units\n### Title: Add [Hmisc::unit()] to multiple columns\n### Aliases: set_column_units\n\n### ** Examples\n\nlibrary(magrittr)\ndata(mtcars)\nmtcars_with_units <- mtcars %>%\n set_column_units(wt = \"1000 lbs\")\nHmisc::units(mtcars_with_units$wt)\n\n\n"} {"package":"Gmisc","topic":"spread","snippet":"### Name: spread\n### Title: Spread boxes\n### Aliases: spread spreadVertical spreadHorizontal\n\n### ** Examples\n\nlibrary(grid)\ngrid.newpage()\n\nbox1 <- boxGrob(\"B1\", x = .2, y = .8)\nbox2 <- boxGrob(\"B2\\n\\n\\neach\\nbox\\neven\\nspace\\nbetween\", x = .2, y = .8)\nbox3 <- boxGrob(\"B3\", x = .2, y = .8)\nbox4 <- boxGrob(\"B4\", x = .2, y = .8)\nbox5 <- boxGrob(\"B5\", x = .2, y = .8)\n\nspread_boxes <- spreadVertical(box1,\n box2,\n box3,\n a = box4,\n box5, \n .type = \"between\")\nfor (b in spread_boxes) {\n print(b)\n}\n\nbox1 <- boxGrob(\"B1\\n\\nanother group\\ncenter oriented\", x = .6, y = .8)\nbox2 <- boxGrob(\"B2\", x = .6, y = .8)\nbox3 <- boxGrob(\"B3\", x = .6, y = .8)\nbox4 <- boxGrob(\"B4\", x = .6, y = .8)\nbox5 <- boxGrob(\"B5\", x = .6, y = .8)\n\nspread_boxes <- spreadVertical(box1,\n box2,\n box3,\n a = box4,\n box5, \n .type = \"center\")\nfor (b in spread_boxes) {\n print(b)\n}\n\n\n"} {"package":"Gmisc","topic":"time2spanTxt","snippet":"### Name: time2spanTxt\n### Title: A dense time-span text\n### Aliases: time2spanTxt\n\n### ** Examples\n\ntime2spanTxt(as.POSIXct(c(\"2020-01-02\", \"2020-03-01\", NA)))\n# 2 jan to 1 mar\n\n\n\n"} {"package":"Gmisc","topic":"transitionPlot","snippet":"### Name: transitionPlot\n### Title: A transition plot\n### Aliases: transitionPlot\n\n### ** Examples\n\n# This example does not run since it\n# takes a little while to assemble the\n# arrows and RMD Check complains that this\n# is more than allowed for\nlibrary(grid)\npar_org <- par(ask = TRUE)\n# Settings\nno_boxes <- 3\n# Generate test setting\ntransition_matrix <- matrix(NA, nrow = no_boxes, ncol = no_boxes)\ntransition_matrix[1, ] <- 200 * c(.5, .25, .25)\ntransition_matrix[2, ] <- 540 * c(.75, .10, .15)\ntransition_matrix[3, ] <- 340 * c(0, .2, .80)\n\ngrid.newpage()\ntransitionPlot(transition_matrix,\n box_txt = c(\"First\", \"Second\", \"Third\"),\n type_of_arrow = \"simple\",\n min_lwd = unit(1, \"mm\"),\n max_lwd = unit(6, \"mm\"),\n overlap_add_width = unit(1, \"mm\")\n)\n\n\n# Setup proportions\nbox_prop <- cbind(c(1, 0, 0.5), c(.52, .2, .8))\n# From the Set2 Colorbrewer\nstart_box_clr <- c(\"#8DA0CB\", \"#FC8D62\")\n# Darken the colors slightly\nend_box_clr <- c(\n colorRampPalette(c(start_box_clr[1], \"#000000\"))(10)[2],\n colorRampPalette(c(start_box_clr[2], \"#000000\"))(10)[2]\n)\n# Create a new grid\ngrid.newpage()\ntransitionPlot(transition_matrix,\n box_prop = box_prop,\n fill_start_box = start_box_clr, fill_end_box = end_box_clr,\n txt_start_clr = c(\"#FFFFFF\", \"#000000\"), txt_end_clr = c(\"#FFFFFF\", \"#000000\"),\n box_txt = c(\"First\", \"Second\", \"Third\"),\n type_of_arrow = \"gradient\",\n min_lwd = unit(1, \"mm\"),\n max_lwd = unit(10, \"mm\"),\n overlap_add_width = unit(1, \"mm\")\n)\npar(par_org)\n\n\n"} {"package":"Gmisc","topic":"yamlDump","snippet":"### Name: yamlDump\n### Title: Outputs an object\n### Aliases: yamlDump\n\n### ** Examples\n\nsome_fancy_list <- list(complex = list(some_data = 1:3,\n other_data = list(name = \"Max\")),\n simple = \"awesome overview\")\nyamlDump(some_fancy_list)\n#complex:\n# some_data:\n# - 1\n# - 2\n# - 3\n# other_data:\n# name: Max\n#simple: awesome overview\n\n# If you got a character json you can also input it directly\n# and the function will automatically convert it to a list\nyamlDump('{\"a\":{\"b\":[\"1\"]}}')\n\n\n\n"} {"package":"HeterFunctionalData","topic":"Heter.test","snippet":"### Name: Heter.test\n### Title: Heteroscedastic test for functional data\n### Aliases: Heter.test\n\n### ** Examples\n\n# Generate a data set that contains data from 3 treatments,\n# with 3 subjects in treatment 1, 3 subjects in treatment 2,\n# and 4 subjects in treatment 3. Each subject contains m=50\n# repeated observations from Poisson distribution. For the 1st treatment,\n# the mean vector of the repeated observations from the same subject is\n# equal to mu1 plus a random effect vector generated by NorRanGen( ).\n# The m is the number of repeated measurements per subject.\n f1<-function(m, mu1, raneff) {\n currentmu=mu1+raneff;\n currentmu[abs(currentmu)<1e-2]=1e-2;\n rpois(m, abs(currentmu))}\n f2<-function(m, mu2, raneff) {\n currentmu=mu2+raneff;\n currentmu[abs(currentmu)<1e-2]=1e-2;\n rpois(m, abs(currentmu))}\nf3<- function(m, mu3, raneff){\n currentmu=mu3+raneff;\n currentmu[abs(currentmu)<1e-2]=1e-2;\n rpois(m, abs(currentmu))}\n# The a is the number of treatments. The mn stores the number of subjects in treatments.\na=3; mn=c(3, 3, 4); mu1=3; mu2=3; mu3=3; m=50\n# Generate the time effects via random effects with AR(1) structure.\nraneff=NorRanGen(m)\n# Generate data and store in wide format.\ndatawide=numeric()\nnow=0\nfor (i in 1:a){\n fi=function(x1, x2) f1(m,x1, x2)*(i==1)+f2(m,x1, x2)*(i==2)+f3(m, x1, x2)*(i==3)\n mu=mu1*(i==1)+mu2*(i==2)+mu3*(i==3)\n for (k in 1:mn[i]){\n now=now+1\n datawide<-rbind(datawide, c(k, i, fi(mu, raneff) ) \t)\n colnames(datawide)=c(\"sub\", \"trt\", paste(\"time\", seq(m), sep=\"\"))\n #this is a typical way to store data in practice\n }\n} #end of j\n\n# Note:There are different time effects since values in raneff vector are different\ndat=dataformat_wide_to_long(datawide) #dat is in long format\n# Define the h value used in Proposition 3.3 of Wang and Akritas (2010a)\nh=c(0.45, 0.49)\n #Note: The resulting palpha, pbeta, pgamma, pphi each contains\n # two p-values, one corresponds to each h value\n # (see Proposition 3.3 of Wang and Akritas (2010a))\n# test based on original data.\n(org= Heter.test(dat, a, m, mn, h, method='original') )\n#test based on ranks\n(rankt= Heter.test(dat, a, m, mn, h, method='rank') )\n\n\n\n\n"} {"package":"HeterFunctionalData","topic":"NorRanGen","snippet":"### Name: NorRanGen\n### Title: Generate a vector of random effects with specific correlation\n### structure and given variance\n### Aliases: NorRanGen\n\n### ** Examples\n\n m=50; raneff=NorRanGen(m)\n# Note: If X ~ N(0, I), then tran X ~ N(0, A) with\n# A being the cov matrix of AR(1), which contains the standard deviations sigma and the\n# correlation coeff rho=exp(-1/m).\n# i.e. corr= (1 rho rho^2 ... rho^(m-1)\n# rho 1 rho ... rho^(m-2)\n# ...................\n# rho^(m-1) rho^(m-2) ... rho )\n#\n# To see the correlation values, run the following example\n# j1=seq(25); cv=numeric()\n# for (j in 1:25){\n# lag=abs(j1-j)/25; cv=rbind(cv, exp(-lag))\n#}\n# row.names(cv)=j1; colnames(cv)=j1; cv[1,]\n\n\n"} {"package":"HeterFunctionalData","topic":"dataformat_wide_to_long","snippet":"### Name: dataformat_wide_to_long\n### Title: Convert data from wide format to long format\n### Aliases: dataformat_wide_to_long\n\n### ** Examples\n\n# Example of data in wide format\n# sub trt time1 time2 time3 time4 time5\n # 1 1 2.4644642 1.7233498 -1.1374695 -0.5242729 -2.379145\n # 2 1 2.5746848 1.0181738 -0.8325308 -2.4873067 -3.463602\n # 3 1 2.5813995 -0.7528324 -3.1457645 -3.3135573 -4.364621\n # 4 1 0.8232141 0.2394987 -2.2073150 -3.3583005 -6.073399\n # 5 1 0.8274860 0.8323298 -2.1028060 -2.6015848 -3.291307\n # 1 2 -2.2217084 0.6779049 3.6310542 3.2052691 4.310316\n # 2 2 -3.3954705 -0.7827040 3.1364749 3.7184895 5.118996\n #\n # Data stored in long format\n # x_{ijk}, k=1, ..., n_i are the kth observation from the ith subject at time j.\n # 1 1 1 x111\n # 1 1 2 x112\n # 1 2 1 x121\n # 1 2 2 x122\n # 1 2 3 x123\n\n # The following example generate a data set that contains data from\n # 3 treatments, with 3 subjects in treatment 1, 3 subjects in treatment 2,\n # and 4 subjects in treatment 3. Each subject contains m=50\n # repeated observations from Poisson distribution. For the 1st treatment,\n # the mean vector of the repeated observations from the same subject is\n # equal to mu1 plus a random effect vector generated by NorRanGen( ).\n # The m is the number of repeated measurements per subject.\n f1<-function(m, mu1, raneff) {\n currentmu=mu1+raneff;\n currentmu[abs(currentmu)<1e-2]=1e-2;\n rpois(m, abs(currentmu))}\n f2<-function(m, mu2, raneff) {\n currentmu=mu2+raneff;\n currentmu[abs(currentmu)<1e-2]=1e-2;\n rpois(m, abs(currentmu))}\n f3<- function(m, mu3, raneff){\n currentmu=mu3+raneff;\n currentmu[abs(currentmu)<1e-2]=1e-2;\n rpois(m, abs(currentmu))}\n\n # The a is the number of treatments. The mn stores the number of subjects in treatments.\n a=3; mn=c(3, 3, 4); mu1=3; mu2=3; mu3=3; m=50\n raneff=NorRanGen(m) # generate random effects with AR(1) structure.\n\n # Generate data and store in wide format.\ndatawide=numeric()\nnow=0\nfor (i in 1:a){\n fi=function(x1, x2) f1(m,x1, x2)*(i==1)+f2(m,x1, x2)*(i==2)+f3(m, x1, x2)*(i==3)\n mu=mu1*(i==1)+mu2*(i==2)+mu3*(i==3)\n for (k in 1:mn[i]){\n now=now+1\n datawide<-rbind(datawide, c(k, i, fi(mu, raneff) ) \t)\n colnames(datawide)=c(\"sub\", \"trt\", paste(\"time\", seq(m), sep=\"\"))\n #this is a typical way to store data in practice\n }\n } #end of j\n\n dat=dataformat_wide_to_long(datawide)\n\n\n"} {"package":"HeterFunctionalData","topic":"sigma4","snippet":"### Name: sigma4\n### Title: Unbiased estimate of squared variance $sigma^4$ based on\n### U-statistic of an i.i.d. sample\n### Aliases: sigma4\n\n### ** Examples\n\n x=stats::rnorm(10)\n sigma4(x)\n\n\n"} {"package":"HeterFunctionalData","topic":"sigma4bootstrap","snippet":"### Name: sigma4bootstrap\n### Title: Bootstrap estimate of $sigma^4$ using an i.i.d. sample\n### Aliases: sigma4bootstrap\n\n### ** Examples\n\n x=stats::rnorm(10)\n sigma4bootstrap(x)\n\n\n"} {"package":"HeterFunctionalData","topic":"sigma4jackknife","snippet":"### Name: sigma4jackknife\n### Title: Jackknife estimate of $sigma^4$ using an i.i.d. sample\n### Aliases: sigma4jackknife\n\n### ** Examples\n\n x=stats::rnorm(10)\n sigma4jackknife(x)\n\n\n"} {"package":"HeterFunctionalData","topic":"tcontrast","snippet":"### Name: tcontrast\n### Title: Test of no contrast effect of the treatments\n### Aliases: tcontrast\n\n### ** Examples\n\n# Generate a data set that contains data from 3 treatments,\n# with 3 subjects in treatment 1, 3 subjects in treatment 2,\n# and 4 subjects in treatment 3. Each subject contains m=50\n# repeated observations from Poisson distribution. For the 1st treatment,\n# the mean vector of the repeated observations from the same subject is\n# equal to mu1 plus a random effect vector generated by NorRanGen( ).\n# The m is the number of repeated measurements per subject.\n f1<-function(m, mu1, raneff) {\n currentmu=mu1+raneff;\n currentmu[abs(currentmu)<1e-2]=1e-2;\n rpois(m, abs(currentmu))}\n f2<-function(m, mu2, raneff) {\n currentmu=mu2+raneff;\n currentmu[abs(currentmu)<1e-2]=1e-2;\n rpois(m, abs(currentmu))}\n f3<- function(m, mu3, raneff){\n currentmu=mu3+raneff;\n currentmu[abs(currentmu)<1e-2]=1e-2;\n rpois(m, abs(currentmu))}\n# The a is the number of treatments. The mn stores the number of subjects in treatments.\na=3; mn=c(3, 3, 4); mu1=3; mu2=3; mu3=2; m=50\n# Note treatment 3 has mean mu3=2, which is different from the mean of\n# the other two treatments.\n# Generate the time effects via random effects with AR(1) structure.\nraneff=NorRanGen(m)\n# Generate data and store in wide format.\ndatawide=numeric()\nnow=0\nfor (i in 1:a){\n fi=function(x1, x2) f1(m,x1, x2)*(i==1)+f2(m,x1, x2)*(i==2)+f3(m, x1, x2)*(i==3)\n mu=mu1*(i==1)+mu2*(i==2)+mu3*(i==3)\n for (k in 1:mn[i]){\n now=now+1\n datawide<-rbind(datawide, c(k, i, fi(mu, raneff) ) \t)\n colnames(datawide)=c(\"sub\", \"trt\", paste(\"time\", seq(m), sep=\"\"))\n #this is a typical way to store data in practice\n }\n} #end of j\n\n# Note:There are different time effects since values in raneff vector are different\ndat=dataformat_wide_to_long(datawide) #dat is in long format\n #Note: For each h value below, the test statistic and p-value are calculated.\n # (see Theorem 3.2 of Wang, Higgins, and Blasi (2010))\n\ntcontrast(dat, a, m, mn, h=c(0.45, 0.49), method='original')\n\n\n\n"} {"package":"plan","topic":"as.burndown","snippet":"### Name: as.burndown\n### Title: Create a burndown object\n### Aliases: as.burndown\n\n### ** Examples\n\nlibrary(plan)\n# same data as in tests/burndown.dat\nstart <- as.POSIXct(strptime(\"2006-04-08 12:00:00\", \"%Y-%m-%d %H:%M:%S\"))\ndeadline <- as.POSIXct(strptime(\"2006-04-11 20:00:00\", \"%Y-%m-%d %H:%M:%S\"))\ntasks <- data.frame(key = c(1, 2, 3, 4, 5, 6),\n description = c(\"code read.burndown()\", \"code summary.burndown()\", \n \"code plot.burndown()\", \"create R package\", \n \"write documentation\", \"set up website\"),\n effort = c(4, 1, 5, 2, 2, 1),\n stringsAsFactors = FALSE)\nprogress <- data.frame(key = c(1, 2, 1, 2, 4, 5, 4, 1, 3, 3, 3, 2, 2, 1, 5, 5, 5, 1, 3, 6),\n progress = c(5, 5, 10, 50, 5, 5, 100, 50, 5, 30, 80, 60, \n 100, 70, 30, 90, 100, 100, 100, 100),\n time = structure(c(1144494000, 1144495800, 1144497600, 1144501200, \n 1144517400, 1144519200, 1144523760, 1144566600, \n 1144568460, 1144570680, 1144573200, 1144576800, \n 1144577400, 1144578600, 1144583400, 1144585200,\n 1144585800, 1144586100, 1144586400, 1144591200), \n class = \"POSIXct\"),\n stringsAsFactors = FALSE\n)\nb <- as.burndown(start, deadline, tasks, progress, progressInPercent = TRUE)\nsummary(b)\nplot(b)\n\n\n"} {"package":"plan","topic":"as.gantt","snippet":"### Name: as.gantt\n### Title: Create a gantt object.\n### Aliases: as.gantt\n\n### ** Examples\n\n\nlibrary(plan)\narrive <- as.POSIXct(\"2012-09-05\")\nmonth <- 28 * 86400\nyear <- 12 * month\nleave <- arrive + 4 * year\nstartT1 <- arrive\nendT1 <- startT1 + 4 * month\nstartT2 <- endT1 + 1\nendT2 <- startT2 + 4 * month\nstartQE <- arrive + 9 * month\nendQE <- arrive + 12 * month\nQEabsoluteEnd <- arrive + 15 * month\nstartProposal <- arrive + 15 * month # for example\nendProposal <- arrive + 20 * month\nstartThesisWork <- arrive + 2 * month # assumes no thesis work until 2 months in\nendThesisWork <- leave - 4 * month\nstartWriting <- leave - 36 * month\nendWriting <- leave\ng <- as.gantt(key=1:8, c(\"Academic\",\n \"Term 1 classes\",\n \"Term 2 classes\",\n \"Qualifying Examination\",\n \"Research\",\n \"Proposal Defence\",\n \"Thesis Work\",\n \"Paper/Thesis Writing\"),\n c(startT1, startT1, startT2, startQE, startProposal, startProposal,\n startThesisWork, startWriting),\n c(startT1, endT1, endT2, endQE, startProposal, endProposal,\n endThesisWork, endWriting),\n done=rep(0, 7))\nplot(g, xlim=c(arrive, leave),\n ylabel=list(font=c(2,rep(1,3),2), justification=c(0,rep(1,3),0)))\n\n\n"} {"package":"plan","topic":"ganttAddTask","snippet":"### Name: ganttAddTask\n### Title: Add a task to a gantt object\n### Aliases: ganttAddTask\n\n### ** Examples\n\nlibrary(\"plan\")\ng <- new(\"gantt\")\ng <- ganttAddTask(g, \"Courses\") # no times, so a heading\ng <- ganttAddTask(g, \"Physical Oceanography\", \"2016-09-03\", \"2016-12-05\")\ng <- ganttAddTask(g, \"Chemistry Oceanography\", \"2016-09-03\", \"2016-12-05\")\ng <- ganttAddTask(g, \"Fluid Dynamics\", \"2016-09-03\", \"2016-12-05\")\ng <- ganttAddTask(g, \"Biological Oceanography\", \"2017-01-03\", \"2017-04-05\")\ng <- ganttAddTask(g, \"Geological Oceanography\", \"2017-01-03\", \"2017-04-05\")\ng <- ganttAddTask(g, \"Time-series Analysis\", \"2017-01-03\", \"2017-04-05\")\ng <- ganttAddTask(g, \"Research\") # no times, so a heading\ng <- ganttAddTask(g, \"Literature review\", \"2016-09-03\", \"2017-04-05\")\ng <- ganttAddTask(g, \"Develop analysis skills\", \"2016-09-03\", \"2017-08-01\")\ng <- ganttAddTask(g, \"Thesis work\", \"2017-01-01\", \"2018-04-01\")\ng <- ganttAddTask(g, \"Defend thesis proposal\", \"2017-05-01\", \"2017-06-01\")\ng <- ganttAddTask(g, \"Write papers & thesis\", \"2017-05-01\", \"2018-04-01\")\ng <- ganttAddTask(g, \"Defend thesis\", \"2018-05-01\", \"2018-05-15\")\n# Set 'font' for bold-faced headings\nfont <- ifelse(is.na(g[[\"start\"]]), 2, 1)\nplot(g, ylabel=list(font=font))\n\n\n\n"} {"package":"plan","topic":"plot,burndown-method","snippet":"### Name: plot,burndown-method\n### Title: Draw a burndown chart\n### Aliases: plot,burndown-method plot.burndown\n\n### ** Examples\n\nlibrary(plan)\ndata(burndown)\nsummary(burndown)\nplot(burndown)\n\n\n"} {"package":"plan","topic":"plot,gantt-method","snippet":"### Name: plot,gantt-method\n### Title: Draw a Gantt diagram\n### Aliases: plot,gantt-method plot.gantt\n\n### ** Examples\n\nlibrary(plan)\ndata(gantt)\nsummary(gantt)\n\n# 1. Simple plot\nplot(gantt)\n\n# 2. Plot with two events\nevent.label <- c(\"Proposal\", \"AGU\")\nevent.time <- c(\"2008-01-28\", \"2008-12-10\")\nplot(gantt, event.label=event.label,event.time=event.time)\n\n# 3. Control x axis (months, say)\nplot(gantt,labels=paste(\"M\",1:6,sep=\"\"))\n\n# 4. Control task colours\nplot(gantt,\n col.done=c(\"black\", \"red\", rep(\"black\", 10)),\n col.notdone=c(\"lightgray\", \"pink\", rep(\"lightgray\", 10)))\n\n# 5. Control event colours (garish, to illustrate)\nplot(gantt, event.time=event.time, event.label=event.label,\n lwd.eventLine=1:2, lty.eventLine=1:2,\n col.eventLine=c(\"pink\", \"lightblue\"),\n col.event=c(\"red\", \"blue\"), font.event=1:2, cex.event=1:2)\n\n# 6. Top task is in bold font and red colour\nplot(gantt,ylabels=list(col=\"red\",font=2))\n\n# 7. Demonstrate zero-time item (which becomes a heading)\ngantt[[\"description\"]][1] <- \"Preliminaries\"\ngantt[[\"end\"]][1] <- gantt[[\"start\"]][1]\nplot(gantt, ylabel=list(font=2, justification=0))\n\n# 8. Arrows at task ends\nplot(gantt, arrows=c(\"right\",\"left\",\"left\",\"right\"))\n\n\n"} {"package":"plan","topic":"read.burndown","snippet":"### Name: read.burndown\n### Title: Scan burndown data file\n### Aliases: read.burndown\n\n### ** Examples\n\nlibrary(plan)\nfilename <- system.file(\"extdata\", \"burndown.dat\", package=\"plan\")\nb <- read.burndown(filename)\nsummary(b)\nplot(b)\n\n\n"} {"package":"plan","topic":"read.gantt","snippet":"### Name: read.gantt\n### Title: Read a gantt data file\n### Aliases: read.gantt\n\n### ** Examples\n\nlibrary(plan)\nfilename <- system.file(\"extdata\", \"gantt.dat\", package=\"plan\")\ng <- read.gantt(filename)\nsummary(g)\nplot(g)\n\n\n\n"} {"package":"plan","topic":"summary,burndown-method","snippet":"### Name: summary,burndown-method\n### Title: Summarize a burndown object\n### Aliases: summary,burndown-method\n\n### ** Examples\n\nlibrary(plan)\ndata(burndown)\nsummary(burndown)\n\n\n"} {"package":"plan","topic":"summary,gantt-method","snippet":"### Name: summary,gantt-method\n### Title: Summarize a gantt object\n### Aliases: summary,gantt-method\n\n### ** Examples\n\nlibrary(plan)\ndata(gantt)\nsummary(gantt)\n\n\n"} {"package":"vici","topic":"ICS_ex","snippet":"### Name: ICS_ex\n### Title: Toy data to upload in the app.\n### Aliases: ICS_ex\n### Keywords: data\n\n### ** Examples\n\nif(interactive()){\nset.seed(1382019)\nnsubj <- 20\nntp <- 3\nnstim <- 3\nnarm <- 3\nsubj <- rep(rep(rep(1:nsubj, each = ntp), times = nstim), times = narm)\nstim <- rep(rep(c(\"NS\", \"S1\", \"S2\"), each = nsubj*ntp), times = narm)\ntp <- rep(rep(c(\"D0\", \"D1\", \"D3\"), times=nsubj*nstim), times = narm)\na <- rep(c(\"Placebo\", \"A2\", \"A3\"), each = nsubj*nstim*ntp)\ny1 <- round(abs(rnorm(n=nsubj*nstim*ntp*narm,m = 0.03, sd=0.06)) +\n (stim==\"S2\" & a == \"A2\" & tp == \"D1\")*abs(rnorm(n=nsubj*nstim*ntp*narm, m = 0.05, sd=0.01)), 4)\ny2 <- round(abs(rnorm(n=nsubj*nstim*ntp*narm,m = 0.03, sd=0.06)) +\n (stim==\"S1\" & a ==\"A3\" & tp == \"D3\")*abs(rnorm(n=nsubj*nstim*ntp*narm, m = 0.1, sd=0.02)), 4)\nICS_ex <- cbind.data.frame(\"Subject\" = subj, \"StimulationPool\" = stim, \"TimePoint\" = tp,\n \"Arm\" = a, \"Response1\" = y1, \"Response2\" = y2)\n#View(ICS_ex)\nwrite.table(ICS_ex, file=\"Documents/GitHub/vici/data/ICS_ex.txt\", sep=\"\\t\",\nrow.names = FALSE, quote = FALSE)\n}\n\n\n\n\n"} {"package":"vici","topic":"run_app","snippet":"### Name: run_app\n### Title: Launch VICI Shiny App\n### Aliases: run_app\n\n### ** Examples\n\nif(interactive()){\nvici::run_app()\n}\n\n\n\n"} {"package":"DiversityOccupancy","topic":"batchoccu","snippet":"### Name: batchoccu\n### Title: Fits occupancy models for multiple species detection history\n### Aliases: batchoccu\n\n### ** Examples\n\n## Not run: \n##D data(\"IslandBirds\")\n##D data(\"Daily_Cov\")\n##D data(\"siteCov\")\n##D BirdOccupancy <-batchoccu(pres = IslandBirds, sitecov = siteCov, obscov =\n##D Daily_Cov, spp = 5, form = ~ Day + Wind + Rain + Noise + Clouds ~\n##D Elev + AgroFo + SecVec + Wetland)\n##D #plot the response of occupancy to individual variables for species 4 and 5\n##D \n##D responseplot.occu(batch = BirdOccupancy, spp = 4, variable = Elev)\n##D \n##D responseplot.occu(batch = BirdOccupancy, spp = 5, variable = Elev)\n## End(Not run)\n#Dredge for all species\nBirdOccupancy2 <- batchoccu(pres = IslandBirds, sitecov = siteCov, obscov =\nDaily_Cov, spp = 5, form = ~ 1 ~\nElev + AgroFo, dredge = TRUE)\n\n\n"} {"package":"DiversityOccupancy","topic":"batchoccuavg","snippet":"### Name: batchoccuavg\n### Title: Fits occupancy models for multiple species detection history and\n### calculated model average\n### Aliases: batchoccuavg\n\n### ** Examples\n\n## Not run: \n##D data(\"IslandBirds\")\n##D data(\"Daily_Cov\")\n##D data(\"siteCov\")\n##D BirdOccupancy <-batchoccuavg(pres = IslandBirds, sitecov = siteCov, obscov =\n##D Daily_Cov, spp = 5, form = ~ Day + Wind + Rain + Noise + Clouds ~\n##D Elev + AgroFo + SecVec + Wetland)\n##D #Summary of averaged model for species 2\n##D summary(BirdOccupancy$models[[2]])\n## End(Not run)\n#Dredge for all species\n\n\n"} {"package":"DiversityOccupancy","topic":"diversity.predict","snippet":"### Name: diversity.predict\n### Title: Makes a spacially explicit prediction of the occupancy of\n### multiple species and alpha diversity, and select the area where\n### Aliases: diversity.predict\n\n### ** Examples\n\n## Not run: \n##D #Load the data\n##D data(\"IslandBirds\")\n##D data(\"Daily_Cov\")\n##D data(\"siteCov\")\n##D data(\"Birdstack\")\n##D \n##D #Model the abundance for 5 bat species and calculate alpha diversity from that\n##D \n##D #Model the abundance for 5 bat species and calculate alpha diversity from that\n##D \n##D BirdDiversity <-diversityoccu(pres = IslandBirds, sitecov = siteCov,\n##D obscov = Daily_Cov,spp = 5, form = ~ Day + Wind + Time ~ Elev + Wetland + Upland)\n##D \n##D #Select the best model that explains diversity using genetic algorithms\n##D set.seed(123)\n##D glm.Birdiversity <- model.diversity(BirdDiversity, method = \"g\")\n##D \n##D # get the area where the first two bird species are most abundant\n##D # and the diversity is high\n##D \n##D library(rgdal)\n##D Selected.area <- diversity.predict(model = BirdDiversity, diverse = glm.Birdiversity,\n##D new.data = Birdstack, quantile.nth = 0.65, species =\n##D c(TRUE, TRUE, FALSE, FALSE, FALSE))\n##D \n##D Selected.area\n## End(Not run)\n\n\n"} {"package":"DiversityOccupancy","topic":"diversityoccu","snippet":"### Name: diversityoccu\n### Title: Calculates alpha diversity from multiple species occupancy data\n### Aliases: diversityoccu\n\n### ** Examples\n\n## Not run: \n##D #Load the data\n##D data(\"IslandBirds\")\n##D data(\"Daily_Cov\")\n##D data(\"siteCov\")\n##D \n##D #Model the abundance for 5 bird species and calculate alpha diversity from that\n##D \n##D BirdDiversity <-diversityoccu(pres = IslandBirds, sitecov = siteCov,\n##D obscov = Daily_Cov,spp = 5, form = ~ Day + Wind + Time + Rain +\n##D Noise ~ Elev + AgroFo + SecVec + Wetland + Upland)\n##D \n##D #To see the estimates and p values for each model:\n##D \n##D BirdDiversity$models\n## End(Not run)\n\n\n"} {"package":"DiversityOccupancy","topic":"model.diversity","snippet":"### Name: model.diversity\n### Title: Find the best GLM model explaining the alpha divesity of the\n### species\n### Aliases: model.diversity\n\n### ** Examples\n\n## Not run: \n##D #Load the data\n##D data(\"IslandBirds\")\n##D data(\"Daily_Cov\")\n##D data(\"siteCov\")\n##D \n##D #Model the abundance for 5 bat species and calculate alpha diversity from that\n##D \n##D BirdDiversity <-diversityoccu(pres = IslandBirds, sitecov = siteCov,\n##D obscov = Daily_Cov,spp = 5, form = ~ Day + Wind + Time + Rain +\n##D Noise ~ Elev + AgroFo + SecVec + Wetland + Upland)\n##D \n##D #Select the best model that explains diversity using genetic algorithms\n##D set.seed(123)\n##D glm.Birdiversity <- model.diversity(BirdDiversity, method = \"g\")\n##D \n##D #see the best models\n##D \n##D glm.Birdiversity$Best.model\n##D \n##D #plot the response of diversity to individual variables\n##D \n##D plot(glm.Birdiversity, elev)\n##D \n##D #To add the quadratic components of models\n##D \n##D glm.birdiversity <- model.diversity(BirdDiversity , method = \"g\", squared = TRUE)\n##D \n##D responseplot.diver(glm.birdiversity, Elev)\n## End(Not run)\n\n\n"} {"package":"DiversityOccupancy","topic":"occupancy.predict","snippet":"### Name: occupancy.predict\n### Title: Predicts occupancy for all the species in a batchoccupancy class\n### object\n### Aliases: occupancy.predict\n\n### ** Examples\n\n## Not run: \n##D #Load the data\n##D data(\"IslandBirds\")\n##D data(\"Daily_Cov\")\n##D data(\"siteCov\")\n##D data(\"Birdstack\")\n##D BirdOccupancy <-batchoccu(pres = IslandBirds, sitecov = siteCov, obscov =\n##D Daily_Cov, spp = 5, form = ~ Day + Wind + Rime + Noise + Clouds ~\n##D Elev + AgroFo + SecVec + Wetland)\n##D \n##D Occupancy.stack <- occupancy.predict(batch = BirdOccupancy, new.data =\n##D Birdstack)\n## End(Not run)\n\n\n"} {"package":"DiversityOccupancy","topic":"responseplot.abund","snippet":"### Name: responseplot.abund\n### Title: plot the response of an abundance model to the change of\n### aparticular variable\n### Aliases: responseplot.abund\n\n### ** Examples\n\n## Not run: \n##D data(\"IslandBirds\")\n##D data(\"Daily_Cov\")\n##D data(\"siteCov\")\n##D \n##D #Model the abundance for 5 bird species and calculate alpha diversity from that\n##D \n##D BirdDiversity <-diversityoccu(pres = IslandBirds, sitecov = siteCov,\n##D obscov = Daily_Cov,spp = 5, form = ~ Day + Wind + Time + Rain +\n##D Noise ~ Elev + AgroFo + SecVec + Wetland + Upland)\n##D \n##D #plot the response of abundance to individual variables for species 4, 11\n##D \n##D responseplot.abund(batch = BirdDiversity, spp = 4, variable = Elev)\n##D \n##D responseplot.abund(batch = BirdDiversity, spp = 11, variable = Elev)\n## End(Not run)\n\n\n"} {"package":"DiversityOccupancy","topic":"responseplot.diver","snippet":"### Name: responseplot.diver\n### Title: plot the response of the calculated alpha diversity to the\n### change of a particular variable\n### Aliases: responseplot.diver\n\n### ** Examples\n\n## Not run: \n##D #Load the data\n##D data(\"IslandBirds\")\n##D data(\"Daily_Cov\")\n##D data(\"siteCov\")\n##D \n##D #Model the abundance for 5 bird species and calculate alpha diversity from that\n##D \n##D BirdDiversity <-diversityoccu(pres = IslandBirds, sitecov = siteCov,\n##D obscov = Daily_Cov,spp = 5, form = ~ Day + Wind + Time + Rain +\n##D Noise ~ Elev + AgroFo + SecVec + Wetland + Upland)\n##D \n##D #Select the best model that explains diversity using genetic algorithms\n##D set.seed(123)\n##D glm.Birdiversity <- model.diversity(BirdDiversity, method = \"g\")\n##D \n##D #see the best models\n##D \n##D glm.Birdiversity$Best.model\n##D \n##D #plot the response of diversity to individual variables\n##D \n##D plot(glm.Birdiversity, elev)\n## End(Not run)\n\n\n"} {"package":"DiversityOccupancy","topic":"responseplot.occu","snippet":"### Name: responseplot.occu\n### Title: plot the response of an occupancy model to the change of\n### aparticular variable\n### Aliases: responseplot.occu\n\n### ** Examples\n\n## Not run: \n##D data(\"IslandBirds\")\n##D data(\"Daily_Cov\")\n##D data(\"siteCov\")\n##D BirdOccupancy <-batchoccu(pres = IslandBirds, sitecov = siteCov, obscov =\n##D Daily_Cov, spp = 5, form = ~ Day + Wind + Rime + Noise + Clouds ~\n##D Elev + AgroFo + SecVec + Wetland)\n##D #plot the response of occupancy to individual variables for species 4 and 5\n##D \n##D responseplot.occu(batch = BirdOccupancy, spp = 4, variable = Elev)\n##D \n##D \n##D responseplot.occu(batch = BirdOccupancy, spp = 5, variable = Elev)\n## End(Not run)\n\n\n"} {"package":"ACWR","topic":"ACWR","snippet":"### Name: ACWR\n### Title: Acute Chronic Workload Ratio\n### Aliases: ACWR\n\n### ** Examples\n\n\n## Not run: \n##D # Get old working directory\n##D oldwd <- getwd()\n##D \n##D # Set temporary directory\n##D setwd(tempdir())\n##D \n##D # Read dfs\n##D data(\"training_load\", package = \"ACWR\")\n##D \n##D # Convert to data.frame\n##D training_load <- data.frame(training_load)\n##D \n##D # Calculate ACWR\n##D result_ACWR <- ACWR(db = training_load,\n##D ID = \"ID\",\n##D TL = \"TL\",\n##D weeks = \"Week\",\n##D days = \"Day\",\n##D training_dates = \"Training_Date\",\n##D ACWR_method = c(\"EWMA\", \"RAC\", \"RAU\"))\n##D \n##D # set user working directory\n##D setwd(oldwd)\n## End(Not run)\n\n\n\n"} {"package":"ACWR","topic":"EWMA","snippet":"### Name: EWMA\n### Title: Exponentially Weighted Moving Average\n### Aliases: EWMA\n\n### ** Examples\n\n\n## Not run: \n##D # Get old working directory\n##D oldwd <- getwd()\n##D \n##D # Set temporary directory\n##D setwd(tempdir())\n##D \n##D # Read db\n##D data(\"training_load\", package = \"ACWR\")\n##D \n##D # Convert to data.frame\n##D training_load <- data.frame(training_load)\n##D \n##D # Select the first subject\n##D training_load_1 <- training_load[training_load[[\"ID\"]] == 1, ]\n##D \n##D # Calculate ACWR\n##D result_EWMA <- EWMA(TL = training_load_1$TL)\n##D \n##D # set user working directory\n##D setwd(oldwd)\n## End(Not run)\n\n\n\n"} {"package":"ACWR","topic":"RAC","snippet":"### Name: RAC\n### Title: Rolling Average Coupled\n### Aliases: RAC\n\n### ** Examples\n\n\n## Not run: \n##D # Get old working directory\n##D oldwd <- getwd()\n##D \n##D # Set temporary directory\n##D setwd(tempdir())\n##D \n##D # Read db\n##D data(\"training_load\", package = \"ACWR\")\n##D \n##D # Convert to data.frame\n##D training_load <- data.frame(training_load)\n##D \n##D # Select the first subject\n##D training_load_1 <- training_load[training_load[[\"ID\"]] == 1, ]\n##D \n##D # Calculate ACWR\n##D result_RAC <- RAC(TL = training_load_1$TL,\n##D weeks = training_load_1$Week,\n##D training_dates = training_load_1$Training_Date)\n##D \n##D # set user working directory\n##D setwd(oldwd)\n## End(Not run)\n\n\n\n"} {"package":"ACWR","topic":"RAU","snippet":"### Name: RAU\n### Title: Rolling Average Uncoupled\n### Aliases: RAU\n\n### ** Examples\n\n\n## Not run: \n##D # Get old working directory\n##D oldwd <- getwd()\n##D \n##D # Set temporary directory\n##D setwd(tempdir())\n##D \n##D # Read db\n##D data(\"training_load\", package = \"ACWR\")\n##D \n##D # Convert to data.frame\n##D training_load <- data.frame(training_load)\n##D \n##D # Select the first subject\n##D training_load_1 <- training_load[training_load[[\"ID\"]] == 1, ]\n##D \n##D # Calculate ACWR\n##D result_RAU <- RAU(TL = training_load_1$TL,\n##D weeks = training_load_1$Week,\n##D training_dates = training_load_1$Training_Date)\n##D \n##D # set user working directory\n##D setwd(oldwd)\n## End(Not run)\n\n\n\n"} {"package":"ACWR","topic":"plot_ACWR","snippet":"### Name: plot_ACWR\n### Title: ACWR plots using d3.js\n### Aliases: plot_ACWR\n\n### ** Examples\n\n\n## Not run: \n##D # Get old working directory\n##D oldwd <- getwd()\n##D \n##D # Set temporary directory\n##D setwd(tempdir())\n##D \n##D # Read db\n##D data(\"training_load\", package = \"ACWR\")\n##D \n##D # Convert to data.frame\n##D training_load_db <- data.frame(training_load)\n##D \n##D # Calculate ACWR\n##D result_ACWR <- ACWR(db = training_load_db,\n##D ID = \"ID\",\n##D TL = \"TL\",\n##D weeks = \"Week\",\n##D days = \"Day\",\n##D training_dates = \"Training_Date\",\n##D ACWR_method = c(\"EWMA\", \"RAC\", \"RAU\"))\n##D \n##D # Plot for 1 subject\n##D # Select the first subject\n##D result_ACWR_1 <- result_ACWR[result_ACWR[[\"ID\"]] == 1, ]\n##D \n##D # plot ACWR (e.g. EWMA)\n##D ACWR_plot_1 <- plot_ACWR(db = result_ACWR_1,\n##D TL = \"TL\",\n##D ACWR = \"EWMA_ACWR\",\n##D day = \"Day\")\n##D \n##D # Plot for several subjects\n##D # plot ACWR (e.g. RAC)\n##D ACWR_plot <- plot_ACWR(db = result_ACWR,\n##D TL = \"TL\",\n##D ACWR = \"RAC_ACWR\",\n##D day = \"Day\",\n##D ID = \"ID\")\n##D \n##D # set user working directory\n##D setwd(oldwd)\n## End(Not run)\n\n\n\n"} {"package":"ETLUtils","topic":"ETLUtils-package","snippet":"### Name: ETLUtils-package\n### Title: Extra utility functions to execute standard ETL operations on\n### large data\n### Aliases: ETLUtils-package ETLUtils\n\n### ** Examples\n\n# See the specified functions in the package\n\n\n"} {"package":"ETLUtils","topic":"factorise","snippet":"### Name: factorise\n### Title: Put character vectors, columns of a data.frame or list elements\n### as factor\n### Aliases: factorise factorise.default factorise.character\n### factorise.data.frame factorise.list\n\n### ** Examples\n\nx <- data.frame(x = 1:4, y = LETTERS[1:4], b = c(TRUE, FALSE, NA, TRUE), stringsAsFactors=FALSE)\nstr(factorise(x))\nstr(factorise(x, logicals = TRUE))\nstr(factorise(list(a = LETTERS, b = 1:10, c = pi, d = list(x = x))))\n\n\n"} {"package":"ETLUtils","topic":"matchmerge","snippet":"### Name: matchmerge\n### Title: Merge two data frames (fast) by common columns by performing a\n### left (outer) join or an inner join.\n### Aliases: matchmerge\n\n### ** Examples\n\nleft <- data.frame(idlhs = c(1:4, 3:5), a = LETTERS[1:7], stringsAsFactors = FALSE)\nright <- data.frame(idrhs = c(1:4), b = LETTERS[8:11], stringsAsFactors = FALSE)\n## Inner join\nmatchmerge(x=left, y=right, by.x = \"idlhs\", by.y = \"idrhs\")\n\n## Left outer join in 2 ways\nmatchmerge(x=left, y=right, by.x = \"idlhs\", by.y = \"idrhs\", all.x=TRUE)\nmatchmerge(x=left, y=right, by.x = left$idlhs, by.y = right$idrhs, all.x=TRUE, by.iskey=TRUE)\n\n## Show usage when y is just a vector instead of a data.frame\nmatchmerge(x=left, y=right$b, by.x = left$idlhs, by.y = right$idrhs, all.x=TRUE, \nby.iskey=TRUE, add.columns=\"b.renamed\")\n\n## Show speedup difference with merge\n## Not run: \n##D size <- 100000 \n##D dimension <- seq(Sys.Date(), Sys.Date()+10, by = \"day\")\n##D left <- data.frame(date = rep(dimension, size), sales = rnorm(size))\n##D right <- data.frame(date = dimension, feature = dimension-7, feature = dimension-14)\n##D dim(left)\n##D dim(right)\n##D print(system.time(merge(left, right, by.x=\"date\", by.y=\"date\", all.x=TRUE, all.y=FALSE)))\n##D print(system.time(matchmerge(left, right, by.x=\"date\", by.y=\"date\", all.x=TRUE, by.iskey=FALSE)))\n## End(Not run)\n## Show example usage \nproducts <- expand.grid(product = c(\"Pepsi\", \"Coca Cola\"), type = c(\"Can\",\"Bottle\"), \nsize = c(\"6Ml\",\"8Ml\"), distributor = c(\"Distri X\",\"Distri Y\"), salesperson = c(\"Mr X\",\"Mr Y\"), \nstringsAsFactors=FALSE)\nproducts <- products[!duplicated(products[, c(\"product\",\"type\",\"size\")]), ]\nproducts$key <- paste(products$product, products$type, products$size, sep=\".\")\nsales <- expand.grid(item = unique(products$key), sales = rnorm(10000, mean = 100))\nstr(products)\nstr(sales)\ninfo <- matchmerge(x=sales, y=products, \n by.x=sales$item, by.y=products$key, all.x=TRUE, by.iskey=TRUE, \n add.columns=c(\"size\",\"distributor\"), check.duplicates=FALSE)\nstr(info)\ntapply(info$sales, info$distributor, FUN=sum)\n\n\n"} {"package":"ETLUtils","topic":"naLOCFPlusone","snippet":"### Name: naLOCFPlusone\n### Title: Performs NA replacement by last observation carried forward but\n### adds 1 to the last observation carried forward.\n### Aliases: naLOCFPlusone\n\n### ** Examples\n\nrequire(zoo)\nx <- c(2,NA,NA,4,5,2,NA)\nnaLOCFPlusone(x)\n\n\n"} {"package":"ETLUtils","topic":"read.dbi.ffdf","snippet":"### Name: read.dbi.ffdf\n### Title: Read data from a DBI connection into an ffdf.\n### Aliases: read.dbi.ffdf\n\n### ** Examples\n\nrequire(ff)\n\n##\n## Example query using data in sqlite\n##\nrequire(RSQLite)\ndbfile <- system.file(\"smalldb.sqlite3\", package=\"ETLUtils\")\ndrv <- dbDriver(\"SQLite\")\nquery <- \"select * from testdata limit 10000\"\nx <- read.dbi.ffdf(query = query, dbConnect.args = list(drv = drv, dbname = dbfile), \nfirst.rows = 100, next.rows = 1000, VERBOSE=TRUE)\nclass(x)\nx[1:10, ]\n\n## show it is the same as getting the data directly using RSQLite \n## apart from characters which are factors in ffdf objects\ndirectly <- dbGetQuery(dbConnect(drv = drv, dbname = dbfile), query)\ndirectly <- as.data.frame(as.list(directly), stringsAsFactors=TRUE)\nall.equal(x[,], directly)\n\n## show how to use the transFUN argument to transform the data before saving into the ffdf\n## and shows the use of the levels argument\nquery <- \"select * from testdata limit 10\"\nx <- read.dbi.ffdf(query = query, dbConnect.args = list(drv = drv, dbname = dbfile), \nfirst.rows = 100, next.rows = 1000, VERBOSE=TRUE, levels = list(a = rev(LETTERS)),\ntransFUN = function(x, subtractdays){\n\tx$b <- as.Date(x$b)\n\tx$b.subtractdaysago <- x$b - subtractdays\n\tx\n}, subtractdays=7)\nclass(x)\nx[1:10, ]\n## remark that the levels of column a are reversed due to specifying the levels argument correctly\nlevels(x$a)\n\n## show how to append data to an existing ffdf object \ntransformexample <- function(x, subtractdays){\n\tx$b <- as.Date(x$b)\n\tx$b.subtractdaysago <- x$b - subtractdays\n\tx\n}\ndim(x)\nx[,]\ncombined <- read.dbi.ffdf(query = query, \n dbConnect.args = list(drv = drv, dbname = dbfile), \n first.rows = 100, next.rows = 1000, x = x, VERBOSE=TRUE, \n transFUN = transformexample, subtractdays=1000)\ndim(combined)\ncombined[,]\n\n##\n## Example query using ROracle. Do try this at home with some larger data :)\n##\n## Not run: \n##D require(ROracle)\n##D query <- \"select OWNER, TABLE_NAME, TABLESPACE_NAME, NUM_ROWS, LAST_ANALYZED from all_all_tables\" \n##D x <- read.dbi.ffdf(query=query,\n##D dbConnect.args = list(drv = dbDriver(\"Oracle\"), \n##D user = \"YourUser\", password = \"YourPassword\", dbname = \"Mydatabase\"),\n##D first.rows = 100, next.rows = 50000, nrows = -1, VERBOSE=TRUE)\n## End(Not run)\n\n\n"} {"package":"ETLUtils","topic":"read.jdbc.ffdf","snippet":"### Name: read.jdbc.ffdf\n### Title: Read data from a JDBC connection into an ffdf.\n### Aliases: read.jdbc.ffdf\n\n### ** Examples\n\n## Not run: \n##D require(ff)\n##D \n##D ##\n##D ## Example query using data in sqlite\n##D ##\n##D require(RSQLite)\n##D dbfile <- system.file(\"smalldb.sqlite3\", package=\"ETLUtils\")\n##D drv <- JDBC(driverClass = \"org.sqlite.JDBC\", classPath = \"/usr/local/lib/sqlite-jdbc-3.7.2.jar\")\n##D query <- \"select * from testdata limit 10000\"\n##D x <- read.jdbc.ffdf(query = query, \n##D dbConnect.args = list(drv = drv, url = sprintf(\"jdbc:sqlite:%s\", dbfile)), \n##D first.rows = 100, next.rows = 1000, VERBOSE=TRUE)\n##D class(x)\n##D x[1:10, ]\n## End(Not run)\n\n\n"} {"package":"ETLUtils","topic":"read.odbc.ffdf","snippet":"### Name: read.odbc.ffdf\n### Title: Read data from a ODBC connection into an ffdf.\n### Aliases: read.odbc.ffdf\n\n### ** Examples\n\n##\n## Using the sqlite database (smalldb.sqlite3) in the /inst folder of the package\n## set up the sqlite ODBC driver (www.stats.ox.ac.uk/pub/bdr/RODBC-manual.pd) \n## and call it 'smalltestsqlitedb' \n##\n## Not run: \n##D require(RODBC)\n##D x <- read.odbc.ffdf(\n##D query = \"select * from testdata limit 10000\",\n##D odbcConnect.args = list(\n##D dsn=\"smalltestsqlitedb\", uid = \"\", pwd = \"\", \n##D believeNRows = FALSE, rows_at_time = 1), \n##D nrows = -1, \n##D first.rows = 100, next.rows = 1000, VERBOSE = TRUE)\n## End(Not run)\n\n\n"} {"package":"ETLUtils","topic":"recoder","snippet":"### Name: recoder\n### Title: Recodes the values of a character vector\n### Aliases: recoder\n\n### ** Examples\n\nrecoder(x=append(LETTERS, NA, 5), from = c(\"A\",\"B\"), to = c(\"a.123\",\"b.123\")) \n\n\n"} {"package":"ETLUtils","topic":"renameColumns","snippet":"### Name: renameColumns\n### Title: Renames variables in a data frame.\n### Aliases: renameColumns\n\n### ** Examples\n\nx <- data.frame(x = 1:4, y = LETTERS[1:4])\nrenameColumns(x, from = c(\"x\",\"y\"), to = c(\"digits\",\"letters\"))\n\n\n"} {"package":"ETLUtils","topic":"write.dbi.ffdf","snippet":"### Name: write.dbi.ffdf\n### Title: Write ffdf data to a database table by using a DBI connection.\n### Aliases: write.dbi.ffdf\n\n### ** Examples\n\nrequire(ff)\n\n##\n## Example query using data in sqlite\n##\nrequire(RSQLite)\ndbfile <- system.file(\"smalldb.sqlite3\", package=\"ETLUtils\")\ndrv <- dbDriver(\"SQLite\")\nquery <- \"select * from testdata limit 10000\"\nx <- read.dbi.ffdf(query = query, dbConnect.args = list(drv = drv, dbname = dbfile), \nfirst.rows = 100, next.rows = 1000, VERBOSE=TRUE)\n\n## copy db in package folder to temp folder as CRAN does not allow writing in package dirs\ndbfile <- tempfile(fileext = \".sqlite3\")\nfile.copy(from = system.file(\"smalldb.sqlite3\", package=\"ETLUtils\"), to = dbfile)\nSys.chmod(dbfile, mode = \"777\")\nwrite.dbi.ffdf(x = x, name = \"helloworld\", row.names = FALSE, overwrite = TRUE,\n dbConnect.args = list(drv = drv, dbname = dbfile), \n by = 1000, VERBOSE=TRUE)\ntest <- read.dbi.ffdf(query = \"select * from helloworld\", \n dbConnect.args = list(drv = drv, dbname = dbfile))\n\n## clean up for CRAN\nfile.remove(dbfile)\n## Not run: \n##D require(ROracle)\n##D write.dbi.ffdf(x = x, name = \"hellooracle\", row.names = FALSE, overwrite = TRUE,\n##D dbConnect.args = list(drv = dbDriver(\"Oracle\"), \n##D user = \"YourUser\", password = \"YourPassword\", dbname = \"Mydatabase\"), \n##D VERBOSE=TRUE)\n## End(Not run)\n\n\n"} {"package":"ETLUtils","topic":"write.jdbc.ffdf","snippet":"### Name: write.jdbc.ffdf\n### Title: Write ffdf data to a database table by using a JDBC connection.\n### Aliases: write.jdbc.ffdf\n\n### ** Examples\n\n## Not run: \n##D require(ff)\n##D \n##D ##\n##D ## Example query using data in sqlite\n##D ##\n##D require(RJDBC)\n##D dbfile <- system.file(\"smalldb.sqlite3\", package=\"ETLUtils\")\n##D drv <- JDBC(driverClass = \"org.sqlite.JDBC\", classPath = \"/usr/local/lib/sqlite-jdbc-3.7.2.jar\")\n##D query <- \"select * from testdata limit 10000\"\n##D x <- read.jdbc.ffdf(query = query, \n##D dbConnect.args = list(drv = drv, url = sprintf(\"jdbc:sqlite:%s\", dbfile)), \n##D first.rows = 100, next.rows = 1000, VERBOSE=TRUE)\n##D \n##D write.jdbc.ffdf(x = x, name = \"helloworld\", row.names = FALSE, overwrite = TRUE,\n##D dbConnect.args = list(drv = drv, url = sprintf(\"jdbc:sqlite:%s\", dbfile)), \n##D by = 1000, VERBOSE=TRUE)\n##D test <- read.jdbc.ffdf(query = \"select * from helloworld\", \n##D dbConnect.args = list(drv = drv, url = sprintf(\"jdbc:sqlite:%s\", dbfile)))\n## End(Not run)\n\n\n"} {"package":"ETLUtils","topic":"write.odbc.ffdf","snippet":"### Name: write.odbc.ffdf\n### Title: Write ffdf data to a database table by using a ODBC connection.\n### Aliases: write.odbc.ffdf\n\n### ** Examples\n\n##\n## Using the sqlite database (smalldb.sqlite3) in the /inst folder of the package\n## set up the sqlite ODBC driver (www.stats.ox.ac.uk/pub/bdr/RODBC-manual.pd) \n## and call it 'smalltestsqlitedb' \n##\n## Not run: \n##D require(RODBC)\n##D x <- read.odbc.ffdf(\n##D query = \"select * from testdata limit 10000\",\n##D odbcConnect.args = list(\n##D dsn=\"smalltestsqlitedb\", uid = \"\", pwd = \"\", \n##D believeNRows = FALSE, rows_at_time = 1), \n##D nrows = -1, \n##D first.rows = 100, next.rows = 1000, VERBOSE = TRUE)\n##D \n##D write.odbc.ffdf(x = x, tablename = \"testdata\", rownames = FALSE, append = TRUE,\n##D odbcConnect.args = list(\n##D dsn=\"smalltestsqlitedb\", uid = \"\", pwd = \"\", \n##D believeNRows = FALSE, rows_at_time = 1), \n##D by = 1000, VERBOSE=TRUE)\n## End(Not run)\n\n\n"} {"package":"iForecast","topic":"iForecast","snippet":"### Name: iForecast\n### Title: Extract predictions and class probabilities from train objects\n### Aliases: iForecast\n\n### ** Examples\n\n# Cross-validation takes time, example below is commented.\n## Machine Learning by library(caret)\n#Case 1. Low frequency, regression type\ndata(\"macrodata\")\ndep <- macrodata[569:669,\"unrate\",drop=FALSE]\nind <- macrodata[569:669,-1,drop=FALSE]\ntrain.end <- \"2018-12-01\"# Choosing the end dating of train\n\nmodels <- c(\"svm\",\"rf\",\"rpart\")[1]\ntype <- c(\"none\",\"trend\",\"season\",\"both\")[1]\n#Caret <- ttsCaret(y=dep, x=ind, arOrder=c(1), xregOrder=c(1),\n# method=models, tuneLength =1, train.end, type=type,resampling=\"cv\",preProcess = #\"center\")\n# testData1 <- window(Caret$data,start=\"2019-01-01\",end=end(Caret$data))\n#P1 <- iForecast(Model=Caret,newdata=testData1,type=\"static\")\n#P2 <- iForecast(Model=Caret,newdata=testData1,type=\"dynamic\")\n\n#tail(cbind(testData1[,1],P1))\n#tail(cbind(testData1[,1],P2))\n\n#Case 2. Low frequency, binary type\ndata(bc) #binary dependent variable, business cycle phases\ndep=bc[,1,drop=FALSE]\nind=bc[,-1]\n\ntrain.end=as.character(rownames(dep))[as.integer(nrow(dep)*0.8)]\ntest.start=as.character(rownames(dep))[as.integer(nrow(dep)*0.8)+1]\n\n#Caret = ttsCaret(y=dep, x=ind, arOrder=c(1), xregOrder=c(1), method=models,\n# tuneLength =10, train.end, type=type)\n\n#testData1=window(Caret$data,start=test.start,end=end(Caret$data))\n\n#head(Caret$dataused)\n#P1=iForecast(Model=Caret,newdata=testData1,type=\"static\")\n#P2=iForecast(Model=Caret,newdata=testData1,type=\"dynamic\")\n\n#tail(cbind(testData1[,1],P1),10)\n#tail(cbind(testData1[,1],P2),10)\n\n\n\n"} {"package":"iForecast","topic":"rollingWindows","snippet":"### Name: rollingWindows\n### Title: Rolling timeframe for time series anaysis\n### Aliases: rollingWindows\n\n### ** Examples\n\ndata(macrodata)\ny=macrodata[,1,drop=FALSE]\ntimeframe=rollingWindows(y,estimation=\"300m\",by=\"6m\")\n#estimation=\"300m\", because macrodata is monthly\nFROM=timeframe$from\nTO=timeframe$to\n\ndata(ES_Daily)\ny=ES_Daily[,1,drop=FALSE]\ntimeframe=rollingWindows(y,estimation =\"60w\",by=\"1w\")\n#60 weeks as estimation windowand move by 1 week.\n\nFROM=timeframe$from\nTO=timeframe$to\n\ny=ES_Daily[,1,drop=FALSE]\ntimeframe=rollingWindows(y,estimation =\"250d\",by=\"1d\")\n#250-day as estimation window and move by 1 days.\n\n\n\n"} {"package":"iForecast","topic":"ttsAutoML","snippet":"### Name: ttsAutoML\n### Title: Train time series by automatic machine learning of 'h2o'\n### provided by H2O.ai\n### Aliases: ttsAutoML\n\n### ** Examples\n\n# Cross-validation takes time, example below is commented.\ndata(\"macrodata\")\ndep<-macrodata[,\"unrate\",drop=FALSE]\nind<-macrodata[,-1,drop=FALSE]\n\n# Choosing the dates of training and testing data\ntrain.end<-\"2008-12-01\"\n\n#autoML of H2O.ai\n\n#autoML <- ttsAutoML(y=dep, x=ind, train.end,arOrder=c(2,4),\n# xregOrder=c(0,1,3), maxSecs =30)\n#testData2 <- window(autoML$dataused,start=\"2009-01-01\",end=end(autoML$data))\n#P1<-iForecast(Model=autoML,newdata=testData2,type=\"static\")\n#P2<-iForecast(Model=autoML,newdata=testData2,type=\"dynamic\")\n\n#tail(cbind(testData2[,1],P1))\n#tail(cbind(testData2[,1],P2))\n\n\n\n\n"} {"package":"iForecast","topic":"ttsCaret","snippet":"### Name: ttsCaret\n### Title: Train time series by 'caret' and produce two types of time\n### series forecasts: static and recursive\n### Aliases: ttsCaret\n\n### ** Examples\n\n# Cross-validation takes time, example below is commented.\n## Machine Learning by library(caret)\nlibrary(zoo)\n#Case 1. Low frequency\ndata(\"macrodata\")\ndep <- macrodata[569:669,\"unrate\",drop=FALSE]\nind <- macrodata[569:669,-1,drop=FALSE]\ntrain.end <- \"2018-12-01\"# Choosing the end dating of train\n\nmodels <- c(\"glm\",\"knn\",\"nnet\",\"rpart\",\"rf\",\"svm\",\"enet\",\"gbm\",\"lasso\",\"bridge\")[2]\ntype <- c(\"none\",\"trend\",\"season\",\"both\")[1]\nCaret <- ttsCaret(y=dep, x=NULL, arOrder=c(1), xregOrder=c(1),\n method=models, tuneLength =1, train.end, type=type,\n resampling=c(\"boot\",\"cv\",\"repeatedcv\")[2],preProcess = \"center\")\n testData1 <- window(Caret$data,start=\"2019-01-01\",end=end(Caret$data))\nP1 <- iForecast(Model=Caret,newdata=testData1,type=\"static\")\nP2 <- iForecast(Model=Caret,newdata=testData1,type=\"dynamic\")\n\ntail(cbind(testData1[,1],P1,P2))\n\n#Case 2. High frequency\n#head(ES_15m)\n#head(ES_Daily)\n#dep <- ES_15m #SP500 15-minute realized absolute variance\n#ind <- NULL\n#train.end <- as.character(rownames(dep))[as.integer(nrow(dep)*0.9)]\n\n#models<-c(\"svm\",\"rf\",\"rpart\",\"gamboost\",\"BstLm\",\"bstSm\",\"blackboost\")[1]\n#type<-c(\"none\",\"trend\",\"season\",\"both\")[1]\n# Caret <- ttsCaret(y=dep, x=ind, arOrder=c(3,5), xregOrder=c(0,2,4),\n# method=models, tuneLength =10, train.end, type=type,\n# resampling=c(\"boot\",\"cv\",\"repeatedcv\")[2],preProcess = \"center\")\n#testData1<-window(Caret$data,start=\"2009-01-01\",end=end(Caret$data))\n#P1<-iForecast(Model=Caret,newdata=testData1,type=\"static\")\n#P2<-iForecast(Model=Caret,newdata=testData1,type=\"dynamic\")\n\n\n\n"} {"package":"iForecast","topic":"ttsLSTM","snippet":"### Name: ttsLSTM\n### Title: Train time series by LSTM of 'tensorflow' provided by 'kera'\n### Aliases: ttsLSTM\n\n### ** Examples\n\n# Cross-validation takes time, example below is commented.\ndata(\"macrodata\")\ndep<-macrodata[,\"unrate\",drop=FALSE]\nind<-macrodata[,-1,drop=FALSE]\n\n# Choosing the dates of training and testing data\ntrain.end<-\"2008-12-01\"\n\n\n#RNN with LSTM network\n#LSTM<-ttsLSTM(y=dep, x=ind, train.end,arOrder=c(2,4), xregOrder=c(1,4),\n# memoryLoops=5, type=c(\"none\",\"trend\",\"season\",\"both\")[4],\n# batch.range=2:7,batch.size=NULL)\n\n#testData3<-window(LSTM$dataused,start=\"2009-01-01\",end=end(LSTM$data))\n#P1<-iForecast(Model=LSTM,newdata=testData3,type=\"static\")\n#P2<-iForecast(Model=LSTM,newdata=testData3,type=\"dynamic\")\n\n#tail(cbind(testData3[,1],P1,P2))\n\n\n\n\n\n"} {"package":"MultiGlarmaVarSel","topic":"MultiGlarmaVarSel-package","snippet":"### Name: MultiGlarmaVarSel-package\n### Title: Variable Selection in Sparse Multivariate GLARMA Models\n### Aliases: MultiGlarmaVarSel-package MultiGlarmaVarSel\n### Keywords: package\n\n### ** Examples\n\ndata(Y)\nI=3\nJ=100\nT=dim(Y)[2]\nq=1\nX=matrix(0,nrow=(I*J),ncol=I)\nfor (i in 1:I)\n{\n X[((i-1)*J+1):(i*J),i]=rep(1,J)\n}\ngamma_0 = matrix(0, nrow = 1, ncol = q)\nresult=variable_selection(Y, X, gamma_0, k_max=1, \nn_iter=100, method=\"min\", nb_rep_ss=1000, threshold=0.6)\nestim_active = result$estim_active\neta_est = result$eta_est\ngamma_est = result$gamma_est\n\n\n"} {"package":"MultiGlarmaVarSel","topic":"NR_gamma","snippet":"### Name: NR_gamma\n### Title: Newton-Raphson method for estimation of gamma\n### Aliases: NR_gamma\n\n### ** Examples\n\ndata(Y)\nI=3\nJ=100\nT=dim(Y)[2]\nq=1\nX=matrix(0,nrow=(I*J),ncol=I)\nfor (i in 1:I)\n{\n X[((i-1)*J+1):(i*J),i]=rep(1,J)\n}\ngamma_0 = matrix(0, nrow = 1, ncol = q)\neta_glm_mat_0 = matrix(0,ncol=T,nrow=I)\nfor (t in 1:T)\n{\n result_glm_0 = glm(Y[,t]~X-1,family=poisson(link='log'))\n eta_glm_mat_0[,t]=as.numeric(result_glm_0$coefficients)\n}\neta_0 = round(as.numeric(t(eta_glm_mat_0)),digits=6)\ngamma_est=NR_gamma(Y, X, eta_0, gamma_0, I, J, n_iter = 100)\n\n\n"} {"package":"MultiGlarmaVarSel","topic":"Y","snippet":"### Name: Y\n### Title: Observation matrix Y\n### Aliases: Y\n\n### ** Examples\n\ndata(Y)\n\n\n"} {"package":"MultiGlarmaVarSel","topic":"grad_hess_L_eta","snippet":"### Name: grad_hess_L_eta\n### Title: Gradient and Hessian of the log-likelihood with respect to eta\n### Aliases: grad_hess_L_eta\n\n### ** Examples\n\ndata(Y)\nI=3\nJ=100\nT=dim(Y)[2]\nq=1\nX=matrix(0,nrow=(I*J),ncol=I)\nfor (i in 1:I)\n{\n X[((i-1)*J+1):(i*J),i]=rep(1,J)\n}\ngamma_0 = matrix(0, nrow = 1, ncol = q)\neta_glm_mat_0 = matrix(0,ncol=T,nrow=I)\nfor (t in 1:T)\n{\n result_glm_0 = glm(Y[,t]~X-1,family=poisson(link='log'))\n eta_glm_mat_0[,t]=as.numeric(result_glm_0$coefficients)\n}\neta_0 = round(as.numeric(t(eta_glm_mat_0)),digits=6)\nresult = grad_hess_L_eta(Y, X, eta_0, gamma_0, I, J)\ngrad = result$grad_L_eta\nHessian = result$hess_L_eta\n\n\n"} {"package":"MultiGlarmaVarSel","topic":"grad_hess_L_gamma","snippet":"### Name: grad_hess_L_gamma\n### Title: Gradient and Hessian of the log-likelihood with respect to gamma\n### Aliases: grad_hess_L_gamma\n\n### ** Examples\n\ndata(Y)\nI=3\nJ=100\nT=dim(Y)[2]\nq=1\nX=matrix(0,nrow=(I*J),ncol=I)\nfor (i in 1:I)\n{\n X[((i-1)*J+1):(i*J),i]=rep(1,J)\n}\ngamma_0 = matrix(0, nrow = 1, ncol = q)\neta_glm_mat_0 = matrix(0,ncol=T,nrow=I)\nfor (t in 1:T)\n{\n result_glm_0 = glm(Y[,t]~X-1,family=poisson(link='log'))\n eta_glm_mat_0[,t]=as.numeric(result_glm_0$coefficients)\n}\neta_0 = round(as.numeric(t(eta_glm_mat_0)),digits=6)\nresult = grad_hess_L_gamma(Y, X, eta_0, gamma_0, I, J)\ngrad = result$grad_L_gamma\nHessian = result$hess_L_gamma\n\n\n"} {"package":"MultiGlarmaVarSel","topic":"variable_selection","snippet":"### Name: variable_selection\n### Title: Variable selection\n### Aliases: variable_selection\n\n### ** Examples\n\ndata(Y)\nI=3\nJ=100\nT=dim(Y)[2]\nq=1\nX=matrix(0,nrow=(I*J),ncol=I)\nfor (i in 1:I)\n{\n X[((i-1)*J+1):(i*J),i]=rep(1,J)\n}\ngamma_0 = matrix(0, nrow = 1, ncol = q)\nresult=variable_selection(Y, X, gamma_0, k_max=1, \nn_iter=100, method=\"min\", nb_rep_ss=1000, threshold=0.6)\nestim_active = result$estim_active\neta_est = result$eta_est\ngamma_est = result$gamma_est\n\n\n"} {"package":"MultiGlarmaVarSel","topic":"MultiGlarmaVarSel","snippet":"### Name: MultiGlarmaVarSel-package\n### Title: Variable Selection in Sparse Multivariate GLARMA Models\n### Aliases: MultiGlarmaVarSel-package MultiGlarmaVarSel\n### Keywords: package\n\n### ** Examples\n\ndata(Y)\nI=3\nJ=100\nT=dim(Y)[2]\nq=1\nX=matrix(0,nrow=(I*J),ncol=I)\nfor (i in 1:I)\n{\n X[((i-1)*J+1):(i*J),i]=rep(1,J)\n}\ngamma_0 = matrix(0, nrow = 1, ncol = q)\nresult=variable_selection(Y, X, gamma_0, k_max=1, \nn_iter=100, method=\"min\", nb_rep_ss=1000, threshold=0.6)\nestim_active = result$estim_active\neta_est = result$eta_est\ngamma_est = result$gamma_est\n\n\n"} {"package":"HotellingEllipse","topic":"ellipseCoord","snippet":"### Name: ellipseCoord\n### Title: Coordinate Points Of Hotelling Ellipse\n### Aliases: ellipseCoord\n\n### ** Examples\n\n## Principal components analysis (PCA)\nlibrary(dplyr)\nset.seed(123)\npca_mod <- specData %>%\n dplyr::select(where(is.numeric)) %>%\n FactoMineR::PCA(scale.unit = FALSE, graph = FALSE)\n\n## Extract PCA scores\npca_scores <- pca_mod %>%\n purrr::pluck(\"ind\", \"coord\") %>%\n tibble::as_tibble()\n\n## Get Hotelling ellipse coordinate points\nlibrary(HotellingEllipse)\nxy_coord <- ellipseCoord(data = pca_scores, pcx = 1, pcy = 2, conf.limit = 0.95, pts = 200)\n\n\n\n"} {"package":"HotellingEllipse","topic":"ellipseParam","snippet":"### Name: ellipseParam\n### Title: Lengths Of The Semi-Axes Of Hotelling Ellipse\n### Aliases: ellipseParam\n\n### ** Examples\n\n## Principal components analysis (PCA)\nlibrary(dplyr)\nset.seed(123)\npca_mod <- specData %>%\n dplyr::select(where(is.numeric)) %>%\n FactoMineR::PCA(scale.unit = FALSE, graph = FALSE)\n\n## Extract PCA scores\npca_scores <- pca_mod %>%\n purrr::pluck(\"ind\", \"coord\") %>%\n tibble::as_tibble()\n\n## Get Hotelling T2-value and the lengths of the ellipse semi-axes\nlibrary(HotellingEllipse)\nT2 <- ellipseParam(data = pca_scores, k = 2, pcx = 1, pcy = 2)\n\n\n\n"} {"package":"bucky","topic":"mi.eval","snippet":"### Name: mi.eval\n### Title: Multiple-imputation evaluation\n### Aliases: mi.eval\n### Keywords: models htest\n\n### ** Examples\n\nif (require(\"Amelia\")) {\n ## Load data\n data(africa)\n africa$civlib <- factor(round(africa$civlib*6), ordered=TRUE)\n\n ## Estimate a linear model using imputed data sets\n model0 <- lm(trade ~ log(gdp_pc), data=africa, subset=year==1973)\n summary(model0)\n\n ## Impute using Amelia \n a.out <- amelia(x = africa, cs = \"country\", ts = \"year\",\n logs = \"gdp_pc\", ord=\"civlib\")\n\n ## Estimate a linear model using imputed data sets\n model1 <- mi.eval(lm(trade ~ log(gdp_pc), data=a.out, subset=year==1973))\n\n ## Show estimates\n model1\n coef(model1)\n\n ## Show summary information\n summary(model1)\n\n if (require(\"MASS\")) {\n ## Estimate an ordered logit model\n model2 <- mi.eval(polr(civlib ~ log(gdp_pc) + log(population),\n data=a.out))\n summary(model2)\n\n ## Also show thresholds by including thresholds with coefficients\n model3 <- mi.eval(polr(civlib ~ log(gdp_pc) + log(population),\n data=a.out),\n coef=function(x) c(x$coefficients, x$zeta))\n summary(model2)\n }\n}\n\n\n"} {"package":"bucky","topic":"robust.summary","snippet":"### Name: robust.summary\n### Title: Robust summary\n### Aliases: robust.summary summary.robustified\n### Keywords: robust cluster htest\n\n### ** Examples\n\n## With clustering\nclotting <- data.frame(\n cl = 1:9,\n u = c(5,10,15,20,30,40,60,80,100),\n lot = c(118,58,42,35,27,25,21,19,18,\n 69,35,26,21,18,16,13,12,12))\nclot.model <- glm(lot ~ log(u), data = clotting, family = Gamma)\nrobust.summary(clot.model, cluster=cl)\n\n## Without clustering\ndata(swiss)\nmodel1 <- lm(Fertility ~ ., data = swiss)\nrobust.summary(model1)\nmodel1r <- robustify(model1)\nsummary(model1r)\n\n\n"} {"package":"bucky","topic":"robustify","snippet":"### Name: robustify\n### Title: Robustify a model\n### Aliases: robustify\n### Keywords: robust cluster htest\n\n### ** Examples\n\n## With clustering\nclotting <- data.frame(\n cl = 1:9,\n u = c(5,10,15,20,30,40,60,80,100),\n lot = c(118,58,42,35,27,25,21,19,18,\n 69,35,26,21,18,16,13,12,12))\nclot.model <- glm(lot ~ log(u), data = clotting, family = Gamma)\nrobust.clot.model <- robustify(clot.model, cluster=cl)\nrobust.clot.model\nsummary(robust.clot.model)\n\n## Without clustering\ndata(swiss)\nmodel1 <- robustify(lm(Fertility ~ ., data = swiss))\nmodel1\nsummary(model1)\n\n\n"} {"package":"bucky","topic":"summary.mi.estimates","snippet":"### Name: summary.mi.estimates\n### Title: Summary for multiple imputation\n### Aliases: summary.mi.estimates\n### Keywords: model htest\n\n### ** Examples\n\nif (require(\"Amelia\")) {\n data(africa)\n a.out <- amelia(x = africa, cs = \"country\", ts = \"year\", logs = \"gdp_pc\")\n\n model <- mi.eval(lm(civlib ~ log(gdp_pc), data=a.out, subset=year==1973))\n\n summary(model)\n}\n\n\n"} {"package":"bucky","topic":"vcovCR","snippet":"### Name: vcovCR\n### Title: Clustered Robust Covariance Matrix Estimation\n### Aliases: vcovCR\n### Keywords: robust cluster\n\n### ** Examples\n\nclotting <- data.frame(\n cl = rep(1:2,each=9),\n u = c(5,10,15,20,30,40,60,80,100),\n lot = c(118,58,42,35,27,25,21,19,18,\n 69,35,26,21,18,16,13,12,12))\nclot.model <- glm(lot ~ log(u), data = clotting, family = Gamma)\nvcovCR(clot.model, cluster=cl)\n\ndata(swiss)\nmodel1 <- lm(Fertility ~ ., data = swiss)\n## These should give the same answer\nvcovCR(model1, cluster=1:nobs(model1), type=\"CR0\")\nsandwich::vcovHC(model1, type=\"HC0\")\n\n\n"} {"package":"epitools","topic":"ageadjust.direct","snippet":"### Name: ageadjust.direct\n### Title: Age standardization by direct method, with exact confidence\n### intervals\n### Aliases: ageadjust.direct\n### Keywords: models\n\n### ** Examples\n\n## Data from Fleiss, 1981, p. 249 \npopulation <- c(230061, 329449, 114920, 39487, 14208, 3052,\n72202, 326701, 208667, 83228, 28466, 5375, 15050, 175702,\n207081, 117300, 45026, 8660, 2293, 68800, 132424, 98301, \n46075, 9834, 327, 30666, 123419, 149919, 104088, 34392, \n319933, 931318, 786511, 488235, 237863, 61313)\npopulation <- matrix(population, 6, 6, \ndimnames = list(c(\"Under 20\", \"20-24\", \"25-29\", \"30-34\", \"35-39\",\n\"40 and over\"), c(\"1\", \"2\", \"3\", \"4\", \"5+\", \"Total\")))\npopulation\ncount <- c(107, 141, 60, 40, 39, 25, 25, 150, 110, 84, 82, 39,\n3, 71, 114, 103, 108, 75, 1, 26, 64, 89, 137, 96, 0, 8, 63, 112,\n262, 295, 136, 396, 411, 428, 628, 530)\ncount <- matrix(count, 6, 6, \ndimnames = list(c(\"Under 20\", \"20-24\", \"25-29\", \"30-34\", \"35-39\",\n\"40 and over\"), c(\"1\", \"2\", \"3\", \"4\", \"5+\", \"Total\")))\ncount\n\n### Use average population as standard\nstandard<-apply(population[,-6], 1, mean)\nstandard\n\n### This recreates Table 1 of Fay and Feuer, 1997\nbirth.order1<-ageadjust.direct(count[,1],population[,1],stdpop=standard)\nround(10^5*birth.order1,1)\n\nbirth.order2<-ageadjust.direct(count[,2],population[,2],stdpop=standard)\nround(10^5*birth.order2,1)\n\nbirth.order3<-ageadjust.direct(count[,3],population[,3],stdpop=standard)\nround(10^5*birth.order3,1)\n\nbirth.order4<-ageadjust.direct(count[,4],population[,4],stdpop=standard)\nround(10^5*birth.order4,1)\n\nbirth.order5p<-ageadjust.direct(count[,5],population[,5],stdpop=standard)\nround(10^5*birth.order5p,1)\n\n\n\n"} {"package":"epitools","topic":"ageadjust.indirect","snippet":"### Name: ageadjust.indirect\n### Title: Age standardization by indirect method, with exact confidence\n### intervals\n### Aliases: ageadjust.indirect\n### Keywords: models\n\n### ** Examples\n\n##From Selvin (2004)\n##enter data\ndth60 <- c(141, 926, 1253, 1080, 1869, 4891, 14956, 30888,\n41725, 26501, 5928)\n\npop60 <- c(1784033, 7065148, 15658730, 10482916, 9939972,\n10563872, 9114202, 6850263, 4702482, 1874619, 330915)\n\ndth40 <- c(45, 201, 320, 670, 1126, 3160, 9723, 17935,\n22179, 13461, 2238)\n\npop40 <- c(906897, 3794573, 10003544, 10629526, 9465330,\n8249558, 7294330, 5022499, 2920220, 1019504, 142532)\n\n##calculate age-specific rates\nrate60 <- dth60/pop60\nrate40 <- dth40/pop40\n\n#create array for display\ntab <- array(c(dth60, pop60, round(rate60*100000,1), dth40, pop40,\nround(rate40*100000,1)),c(11,3,2))\nagelabs <- c(\"<1\", \"1-4\", \"5-14\", \"15-24\", \"25-34\", \"35-44\", \"45-54\",\n\"55-64\", \"65-74\", \"75-84\", \"85+\")\ndimnames(tab) <- list(agelabs,c(\"Deaths\", \"Population\", \"Rate\"),\nc(\"1960\", \"1940\"))\ntab\n\n##implement direct age standardization using 'ageadjust.direct'\ndsr <- ageadjust.direct(count = dth40, pop = pop40, stdpop = pop60)\nround(100000*dsr, 2) ##rate per 100,000 per year\n\n##implement indirect age standardization using 'ageadjust.indirect'\nisr <- ageadjust.indirect(count = dth40, pop = pop40,\n stdcount = dth60, stdpop = pop60)\nround(isr$sir, 2) ##standarized incidence ratio\nround(100000*isr$rate, 1) ##rate per 100,000 per year\n\n\n\n"} {"package":"epitools","topic":"as.hour","snippet":"### Name: as.hour\n### Title: Convert date-time object into hour units\n### Aliases: as.hour\n### Keywords: chron\n\n### ** Examples\n\n\ndates <- c(\"1/1/04\", \"1/2/04\", \"1/3/04\", \"1/4/04\", \"1/5/04\",\n\"1/6/04\", \"1/7/04\", \"1/8/04\", \"1/9/04\", \"1/10/04\", NA, \"1/12/04\",\n\"1/14/04\", \"3/5/04\", \"5/5/04\", \"7/6/04\", \"8/18/04\", \"12/13/05\",\n\"1/5/05\", \"4/6/05\", \"7/23/05\", \"10/3/05\")\naw <- as.week(dates, format = \"%m/%d/%y\")\naw\n\naw2 <- as.week(dates, format = \"%m/%d/%y\", sunday= FALSE)\naw2\n\naw3 <- as.week(dates, format = \"%m/%d/%y\", min.date=\"2003-01-01\")\naw3\n\n\n\n"} {"package":"epitools","topic":"as.month","snippet":"### Name: as.month\n### Title: Convert dates into months of the year for plotting epidemic\n### curves\n### Aliases: as.month\n### Keywords: chron\n\n### ** Examples\n\ndates <- c(\"1/1/04\", \"1/2/04\", \"1/3/04\", \"1/4/04\", \"1/5/04\", \"1/6/04\",\n\"1/7/04\", \"1/8/04\", \"1/9/04\", \"1/10/04\", NA, \"1/12/04\", \"1/14/04\",\n\"3/5/04\", \"5/5/04\", \"7/6/04\", \"8/18/04\", \"12/13/05\", \"1/5/05\",\n\"4/6/05\", \"7/23/05\", \"10/3/05\")\naw <- as.month(dates, format = \"%m/%d/%y\")\naw\n\naw2 <- as.month(dates, format = \"%m/%d/%y\", min.date=\"2003-01-01\")\naw2\n\n\n\n"} {"package":"epitools","topic":"as.week","snippet":"### Name: as.week\n### Title: Convert dates object in 'disease week' for plotting epidemic\n### curves\n### Aliases: as.week\n### Keywords: chron\n\n### ** Examples\n\ndates <- c(\"1/1/04\", \"1/2/04\", \"1/3/04\", \"1/4/04\", \"1/5/04\",\n\"1/6/04\", \"1/7/04\", \"1/8/04\", \"1/9/04\", \"1/10/04\", NA, \"1/12/04\",\n\"1/14/04\", \"3/5/04\", \"5/5/04\", \"7/6/04\", \"8/18/04\", \"12/13/05\",\n\"1/5/05\", \"4/6/05\", \"7/23/05\", \"10/3/05\")\naw <- as.week(dates, format = \"%m/%d/%y\")\naw\n\naw2 <- as.week(dates, format = \"%m/%d/%y\", sunday= FALSE)\naw2\n\naw3 <- as.week(dates, format = \"%m/%d/%y\", min.date=\"2003-01-01\")\naw3\n\n\n\n"} {"package":"epitools","topic":"binom.exact","snippet":"### Name: binom.conf.int\n### Title: Confidence intervals for binomial counts or proportions\n### Aliases: binom.exact binom.wilson binom.approx\n### Keywords: univar\n\n### ** Examples\n\nbinom.exact(1:10, seq(10, 100, 10))\nbinom.wilson(1:10, seq(10, 100, 10))\nbinom.approx(1:10, seq(10, 100, 10))\n\n\n"} {"package":"epitools","topic":"colorbrewer.display","snippet":"### Name: colorbrewer\n### Title: Display and create ColorBrewer palettes\n### Aliases: colorbrewer.display colorbrewer.palette colorbrewer.data\n### Keywords: color\n\n### ** Examples\n\n##display available palettes for given nclass and type\ncolorbrewer.display(9, \"sequential\")\n\n##change background to blue\ncolorbrewer.display(9, \"sequential\", \"blue\")\n\n##display available palettes for given nclass and type,\n##but also display RGB numbers to create your own palette\ncbrewer.9s <- colorbrewer.display(9, \"sequential\")\ncbrewer.9s\n\n\n##Display and use ColorBrewer palette\n##first, display and choose palette (letter)\ncolorbrewer.palette(10, \"q\")\n\n##second, extract and use ColorBrewer palette\nmycolors <- colorbrewer.palette(nclass = 10, type = \"q\", palette = \"b\")\nxx <- 1:10\nyy <- outer(1:10, 1:10, \"*\")\nmatplot(xx,yy, type=\"l\", col = mycolors, lty = 1, lwd = 4)\n\n\n\n"} {"package":"epitools","topic":"colors.plot","snippet":"### Name: colors.plot\n### Title: Plots R's 657 named colors for selection\n### Aliases: colors.plot colors.matrix\n### Keywords: color\n\n### ** Examples\n\n##creates matrix with color names\ncm <- colors.matrix()\ncm[1:3, 1:3]\n\n##generates plot\ncolors.plot()\n\n##generates plot and activates 'locator'\n##don't run\n##colors.plot(TRUE)\n\n\n\n"} {"package":"epitools","topic":"epicurve.hours","snippet":"### Name: epicurve\n### Title: Construct an epidemic curve\n### Aliases: epicurve.hours epicurve.dates epicurve.weeks epicurve.months\n### epicurve.table\n### Keywords: hplot\n\n### ** Examples\n\n##epicurve.dates\nsampdates <- seq(as.Date(\"2004-07-15\"), as.Date(\"2004-09-15\"), 1)\nx <- sample(sampdates, 100, rep=TRUE)\nxs <- sample(c(\"Male\",\"Female\"), 100, rep=TRUE)\nepicurve.dates(x)\nepicurve.dates(x, strata = xs)\nrr <- epicurve.dates(x, strata = xs, segments = TRUE,\n axisnames = FALSE)\naxis(1, at = rr$xvals, labels = rr$cmday, tick = FALSE, line = 0)\naxis(1, at = rr$xvals, labels = rr$cmonth, tick = FALSE, line = 1)\n\n##epicurve.weeks\nsampdates <- seq(as.Date(\"2004-07-15\"), as.Date(\"2004-09-15\"), 1)\nx <- sample(sampdates, 100, rep=TRUE)\nxs <- sample(c(\"Male\",\"Female\"), 100, rep=TRUE)\nepicurve.weeks(x)\n\nepicurve.weeks(x, strata = xs)\n\nrr <- epicurve.weeks(x, strata = xs, segments = TRUE)\nrr\n\n\n##epicurve.months\ndates <- c(\"1/1/04\", \"1/2/04\", \"1/3/04\", \"1/4/04\", \"1/5/04\",\n\"1/6/04\", \"1/7/04\", \"1/8/04\", \"1/9/04\", \"1/10/04\", NA, \"1/12/04\",\n\"1/14/04\", \"3/5/04\", \"5/5/04\", \"7/6/04\", \"8/18/04\", \"12/13/05\",\n\"1/5/05\", \"4/6/05\", \"7/23/05\", \"10/3/05\")\naw <- as.month(dates, format = \"%m/%d/%y\")\naw\naw2 <- as.month(dates, format = \"%m/%d/%y\", min.date=\"2003-01-01\")\naw2\n\n##epicurve.hours\ndata(oswego)\n## create vector with meal date and time\nmdt <- paste(\"4/18/1940\", oswego$meal.time)\nmdt[1:10]\n## convert into standard date and time\nmeal.dt <- strptime(mdt, \"%m/%d/%Y %I:%M %p\")\nmeal.dt[1:10]\n## create vector with onset date and time\nodt <- paste(paste(oswego$onset.date,\"/1940\",sep=\"\"), oswego$onset.time)\nodt[1:10]\n## convert into standard date and time\nonset.dt <- strptime(odt, \"%m/%d/%Y %I:%M %p\")\nonset.dt[1:10] \n\n##set colors\ncol3seq.d <- c(\"#43A2CA\", \"#A8DDB5\", \"#E0F3DB\")\n\npar.fin <- par()$fin\npar(fin=c(5,3.4))\n\n##1-hour categories\nxv <- epicurve.hours(onset.dt, \"1940-04-18 12:00:00\", \"1940-04-19 12:00:00\",\n axisnames = FALSE, axes = FALSE, ylim = c(0,11),\n col = col3seq.d[1], segments = TRUE,\n strata = oswego$sex)\n\nhh <- xv$chour12==3 | xv$chour12== 6 | xv$chour12== 9\nhh2 <- xv$chour12==12\nhh3 <- xv$chour12==1\nhlab <- paste(xv$chour12,xv$campm2,sep=\"\")\nhlab2 <- paste(xv$cmonth,xv$cmday)\naxis(1, at = xv$xval[hh], labels = xv$chour12[hh], tick = FALSE, line = -.2)\naxis(1, at = xv$xval[hh2], labels = hlab[hh2], tick = FALSE, line = -.2)\naxis(1, at = xv$xval[hh3], labels = hlab2[hh3], tick = FALSE, line = 1.0)\naxis(2, las = 1)\ntitle(main = \"Figure 1. Cases of Gastrointestinal Illness\nby Time of Onset of Symptoms (Hour Category)\nOswego County, New York, April 18-19, 2004\",\n xlab = \"Time of Onset\",\n ylab = \"Cases\")\n\n##1/2-hour categories\nxv <- epicurve.hours(onset.dt, \"1940-04-18 12:00:00\", \"1940-04-19 12:00:00\",\n axisnames = FALSE, axes = FALSE, ylim = c(0,11),\n col = col3seq.d[1], segments = TRUE,\n half.hour = TRUE, strata = oswego$sex)\nhh <- xv$chour12==3 | xv$chour12== 6 | xv$chour12== 9\nhh2 <- xv$chour12==12\nhh3 <- xv$chour12==1\nhlab <- paste(xv$chour12,xv$campm2,sep=\"\")\nhlab2 <- paste(xv$cmonth,xv$cmday)\naxis(1, at = xv$xval[hh], labels = xv$chour12[hh], tick = FALSE, line = -.2)\naxis(1, at = xv$xval[hh2], labels = hlab[hh2], tick = FALSE, line = -.2)\naxis(1, at = xv$xval[hh3], labels = hlab2[hh3], tick = FALSE, line = 1.0)\naxis(2, las = 1)\ntitle(main = \"Figure 2. Cases of Gastrointestinal Illness\nby Time of Onset of Symptoms (1/2 Hour Category)\nOswego County, New York, April 18-19, 2004\",\n xlab = \"Time of Onset\",\n ylab = \"Cases\")\n\npar(fin=par.fin)\n\n\n##epicurve.table\nxvec <- c(1,2,3,4,5,4,3,2,1)\nepicurve.table(xvec)\n\nnames(xvec) <- 1991:1999\nepicurve.table(xvec)\n\nxmtx <- rbind(xvec, xvec)\nrownames(xmtx) <- c(\"Male\", \"Female\")\nepicurve.table(xmtx)\n\nepicurve.table(xmtx, seg = TRUE)\n\n\n\n\n"} {"package":"epitools","topic":"epidate","snippet":"### Name: epidate\n### Title: Convert dates into multiple legible formats\n### Aliases: epidate\n### Keywords: chron\n\n### ** Examples\n\n#x <- c(\"12/1/03\", \"11/2/03\", NA, \"1/7/04\", \"1/14/04\", \"8/18/04\")\n#epidate(x, format = \"%m/%d/%y\")\n#epidate(x, format = \"%m/%d/%y\", TRUE)\n#\n###convert vector of disease weeks into vector of mid-week dates\n#dwk <- sample(0:53, 100, replace = TRUE)\n#wk2date <- paste(dwk, \"/\", \"Wed\", sep=\"\")\n#wk2date[1:10]\n#wk2date2 <- epidate(wk2date, format = \"%U/%a\")\n#wk2date2$dates[1:20]\n\n\n"} {"package":"epitools","topic":"epitab","snippet":"### Name: epitab\n### Title: Epidemiologic tabulation for a cohort or case-control study\n### Aliases: epitab\n### Keywords: models\n\n### ** Examples\n\nr243 <- matrix(c(12,2,7,9), 2, 2)\ndimnames(r243) <- list(Diarrhea = c(\"Yes\", \"No\"),\n \"Antibody level\" = c(\"Low\", \"High\")\n )\nr243\nr243b <- t(r243)\nr243b\nepitab(r243, rev = \"b\", verbose = TRUE)\nepitab(r243, method=\"riskratio\",rev = \"b\", verbose = TRUE)\nepitab(matrix(c(41, 15, 28010, 19017),2,2)[2:1,],\n method=\"rateratio\", verbose = TRUE)\n\n\n\n"} {"package":"epitools","topic":"epitable","snippet":"### Name: epitable\n### Title: Create r x c contigency table (exposure levels vs. binary\n### outcome)\n### Aliases: epitable\n### Keywords: manip\n\n### ** Examples\n\n## single vector\ndat <- c(88, 20, 555, 347)\nepitable(dat)\n\n## 4 or more integers\nepitable(1,2,3,4,5,6)\n\n## single matrix\nepitable(matrix(1:6, 3, 2))\n\n## two categorical vectors\nexposure <- factor(sample(c(\"Low\", \"Med\", \"High\"), 100, rep=TRUE),\n levels=c(\"Low\", \"Med\", \"High\"))\noutcome <- factor(sample(c(\"No\", \"Yes\"), 100, rep=TRUE))\nepitable(exposure, outcome)\nepitable(\"Exposure\"=exposure, \"Disease\"=outcome)\n\n## reversing row and/or column order\nzz <- epitable(\"Exposure Level\"=exposure, \"Disease\"=outcome)\nzz\nepitable(zz, rev = \"r\")\nepitable(zz, rev = \"c\")\nepitable(zz, rev = \"b\")\n\n\n\n"} {"package":"epitools","topic":"expand.table","snippet":"### Name: expand.table\n### Title: Expand contingency table into individual-level data set\n### Aliases: expand.table\n### Keywords: manip\n\n### ** Examples\n\n##Creating array using 'array' function and expanding it\ntab <- array(1:8, c(2, 2, 2))\ndimnames(tab) <- list(c(\"No\",\"Yes\"), c(\"No\",\"Yes\"), c(\"No\",\"Yes\"))\nnames(dimnames(tab)) <- c(\"Exposure\", \"Disease\", \"Confounder\")\ntab\ndf <- expand.table(tab)\ndf\n\n##Creating array using 'table' function and expanding it\ntab2 <- table(Exposure = df$Exp, Disease = df$Dis, Confounder = df$Conf)\nexpand.table(tab2)\n\n##Expanding ftable object\nftab2 <- ftable(tab2)\nftab2\nexpand.table(as.table(ftab2))\n\n##Convert Titanic data into individual-level data frame\ndata(Titanic)\nexpand.table(Titanic)[1:20,]\n\n##Convert Titanic data into group-level data frame\nas.data.frame(Titanic)\n\n\n\n"} {"package":"epitools","topic":"expected","snippet":"### Name: expected\n### Title: Expected values in a table\n### Aliases: expected\n### Keywords: manip\n\n### ** Examples\n\n##From Selvin, 2001, p.2\n##year = year of birth\n##one+ = one or more congenital defects\n##one = one congenital defect\ndat <- c(369, 460, 434, 434, 506, 487, 521, 518, 526, 488,\n605, 481, 649, 477, 733, 395, 688, 348)\n\n##observed\noi <- matrix(dat, nrow =2)\ncolnames(oi) <- 1983:1991\nrownames(oi) <- c(\"one+\", \"one\")\n\n##expected\nei <- expected(oi)\n\n##Pearson chi-square test \nchi2.T <- sum((oi - ei)^2/ei)\npchisq(q = chi2.T, df = 8, lower.tail = FALSE)\n\n\n\n"} {"package":"epitools","topic":"julian2date","snippet":"### Name: julian2date\n### Title: Convert a julian date into standard a date format\n### Aliases: julian2date\n### Keywords: chron\n\n### ** Examples\n\nmydates <- c(\"1/1/04\", \"1/2/04\", \"1/7/04\", \"1/14/04\", \"8/18/04\");\nmydates <- as.Date(mydates, format = \"%m/%d/%y\")\nmydates\nmyjulian <- julian(mydates)\nmyjulian\njulian2date(myjulian)\n\n\n\n"} {"package":"epitools","topic":"kapmeier","snippet":"### Name: kapmeier\n### Title: Implements product-limit (Kaplan-Meier) method\n### Aliases: kapmeier\n### Keywords: survival\n\n### ** Examples\n\n##Product-limit method using 'kapmeier' function\ntt <- c(1,17,20,9,24,16,2,13,10,3)\nss <- c(1,1,1,1,0,0,0,1,0,1)\nround(kapmeier(tt, ss), 3)\n\n\n"} {"package":"epitools","topic":"oddsratio","snippet":"### Name: oddsratio\n### Title: Odds ratio estimation and confidence intervals\n### Aliases: oddsratio oddsratio.midp oddsratio.fisher oddsratio.wald\n### oddsratio.small\n### Keywords: models\n\n### ** Examples\n\n\n##Case-control study assessing whether exposure to tap water\n##is associated with cryptosporidiosis among AIDS patients\n\ntapw <- c(\"Lowest\", \"Intermediate\", \"Highest\")\noutc <- c(\"Case\", \"Control\")\t\ndat <- matrix(c(2, 29, 35, 64, 12, 6),3,2,byrow=TRUE)\ndimnames(dat) <- list(\"Tap water exposure\" = tapw, \"Outcome\" = outc)\noddsratio(dat, rev=\"c\")\noddsratio.midp(dat, rev=\"c\")\noddsratio.fisher(dat, rev=\"c\")\noddsratio.wald(dat, rev=\"c\")\noddsratio.small(dat, rev=\"c\")\n\n\n\n"} {"package":"epitools","topic":"or.midp","snippet":"### Name: or.midp\n### Title: Odds ratio estimation and confidence intervals using mid-p\n### method\n### Aliases: or.midp\n### Keywords: models\n\n### ** Examples\n\n##rothman p. 243\nz1 <- matrix(c(12,2,7,9),2,2,byrow=TRUE)\nz2 <- z1[2:1,2:1]\n##jewell p. 79\nz3 <- matrix(c(347,555,20,88),2,2,byrow=TRUE)\nz4 <- z3[2:1,2:1]\nor.midp(z1)\nor.midp(z2)\nor.midp(z3)\nor.midp(z4)\n\n\n"} {"package":"epitools","topic":"ormidp.test","snippet":"### Name: ormidp.test\n### Title: odds ratio test for independence (p value) for a 2x2 table\n### Aliases: ormidp.test\n### Keywords: htest\n\n### ** Examples\n\n##rothman p. 243\normidp.test(12,2,7,9)\n\n##jewell p. 79\normidp.test(347,555,20,88)\n\n\n"} {"package":"epitools","topic":"pois.exact","snippet":"### Name: pois.conf.int\n### Title: Confidence intervals for Poisson counts or rates\n### Aliases: pois.exact pois.daly pois.byar pois.approx\n### Keywords: univar\n\n### ** Examples\n\npois.exact(1:10)\npois.exact(1:10, 101:110)\npois.daly(1:10)\npois.daly(1:10, 101:110)\npois.byar(1:10)\npois.byar(1:10, 101:110)\npois.approx(1:10)\npois.approx(1:10, 101:110)\n\n\n"} {"package":"epitools","topic":"probratio","snippet":"### Name: probratio\n### Title: Obtain unbiased probability ratios from logistic regression\n### models\n### Aliases: probratio\n### Keywords: models risk\n\n### ** Examples\n\n set.seed(123)\n x <- rnorm(500)\n y <- rbinom(500, 1, exp(-1 + .3*x))\n logreg <- glm(y ~ x, family=binomial)\n confint.default(logreg) ## 95% CI over-estimates the 0.3 log-RR\n pr1 <- probratio(logreg, method='ML', scale='log', start=c(log(mean(y)), 0)) \n \n ## generally more efficient to calculate log-RR then exponentiate for non-symmetric 95% CI\n pr1 <- probratio(logreg, scale='log', method='delta')\n pr2 <- probratio(logreg, scale='linear', method='delta')\n exp(pr1[, 5:6])\n pr2[, 5:6]\n\n\n"} {"package":"epitools","topic":"rate2by2.test","snippet":"### Name: rate2by2.test\n### Title: Comparative tests of independence in rx2 rate tables\n### Aliases: rate2by2.test\n### Keywords: htest\n\n### ** Examples\n\n##Examples from Rothman 1998, p. 238\nbc <- c(Unexposed = 15, Exposed = 41)\npyears <- c(Unexposed = 19017, Exposed = 28010)\ndd <- matrix(c(41,15,28010,19017),2,2)\ndimnames(dd) <- list(Exposure=c(\"Yes\",\"No\"), Outcome=c(\"BC\",\"PYears\"))\n##midp\nrate2by2.test(bc,pyears)\nrate2by2.test(dd, rev = \"r\")\nrate2by2.test(matrix(c(15, 41, 19017, 28010),2,2))\nrate2by2.test(c(15, 41, 19017, 28010))\n\n\n"} {"package":"epitools","topic":"rateratio","snippet":"### Name: rateratio\n### Title: Rate ratio estimation and confidence intervals\n### Aliases: rateratio rateratio.midp rateratio.wald\n### Keywords: models\n\n### ** Examples\n\n\n##Examples from Rothman 1998, p. 238\nbc <- c(Unexposed = 15, Exposed = 41)\npyears <- c(Unexposed = 19017, Exposed = 28010)\ndd <- matrix(c(41,15,28010,19017),2,2)\ndimnames(dd) <- list(Exposure=c(\"Yes\",\"No\"), Outcome=c(\"BC\",\"PYears\"))\n##midp\nrateratio(bc,pyears)\nrateratio(dd, rev = \"r\")\nrateratio(matrix(c(15, 41, 19017, 28010),2,2))\nrateratio(c(15, 41, 19017, 28010))\n\n##midp\nrateratio.midp(bc,pyears)\nrateratio.midp(dd, rev = \"r\")\nrateratio.midp(matrix(c(15, 41, 19017, 28010),2,2))\nrateratio.midp(c(15, 41, 19017, 28010))\n\n##wald\nrateratio.wald(bc,pyears)\nrateratio.wald(dd, rev = \"r\")\nrateratio.wald(matrix(c(15, 41, 19017, 28010),2,2))\nrateratio.wald(c(15, 41, 19017, 28010))\n\n\n"} {"package":"epitools","topic":"ratetable","snippet":"### Name: ratetable\n### Title: Create r x 2 count and person-time table for calculating rates\n### Aliases: ratetable\n### Keywords: manip\n\n### ** Examples\n\n##Breast cancer cases from radiation treatment for tuberculosis\n##Rothman 1998, p. 238\nbc0 <- 15\nbc1 <- 41\npy0 <- 19017\npy1 <- 28010\n\n##4 numbers\nratetable(bc0, py0, bc1, py1)\n\n##1 vector\ndat <- c(bc0, py0, bc1, py1)\nratetable(dat)\n\n##2 vectors\ncases <- c(bc0, bc1)\npyears <- c(py0, py1)\nratetable(bc.cases = cases, person.years = pyears)\n\n##1 matrix\nr238 <- matrix(c(41, 28010, 15, 19017), 2, 2)\ndimnames(r238) <- list(c(\"BC cases\", \"Person-years\"),\n \"Radiation\" = c(\"Yes\", \"No\"))\nr238\nr238b <- t(r238)\nr238b\nratetable(r238b, rev = \"r\")\n\n\n\n"} {"package":"epitools","topic":"riskratio","snippet":"### Name: riskratio\n### Title: Risk ratio estimation and confidence intervals\n### Aliases: riskratio riskratio.wald riskratio.small riskratio.boot\n### Keywords: models\n\n### ** Examples\n\n\n##Case-control study assessing whether exposure to tap water\n##is associated with cryptosporidiosis among AIDS patients\n\ntapw <- c(\"Lowest\", \"Intermediate\", \"Highest\")\noutc <- c(\"Case\", \"Control\")\t\ndat <- matrix(c(2, 29, 35, 64, 12, 6),3,2,byrow=TRUE)\ndimnames(dat) <- list(\"Tap water exposure\" = tapw, \"Outcome\" = outc)\nriskratio(dat, rev=\"c\")\nriskratio.wald(dat, rev=\"c\")\nriskratio.small(dat, rev=\"c\")\n\n##Selvin 1998, p. 289\nsel <- matrix(c(178, 79, 1411, 1486), 2, 2)\ndimnames(sel) <- list(\"Behavior type\" = c(\"Type A\", \"Type B\"),\n \"Outcome\" = c(\"CHD\", \"No CHD\")\n )\nriskratio.boot(sel, rev = \"b\")\nriskratio.boot(sel, rev = \"b\", verbose = TRUE)\nriskratio(sel, rev = \"b\", method = \"boot\")\n\n\n\n"} {"package":"epitools","topic":"tab2by2.test","snippet":"### Name: tab2by2.test\n### Title: Comparative tests of independence in rx2 contigency tables\n### Aliases: tab2by2.test\n### Keywords: htest\n\n### ** Examples\n\n\n##Case-control study assessing whether exposure to tap water\n##is associated with cryptosporidiosis among AIDS patients\n\ntapw <- c(\"Lowest\", \"Intermediate\", \"Highest\")\noutc <- c(\"Case\", \"Control\")\t\ndat <- matrix(c(2, 29, 35, 64, 12, 6),3,2,byrow=TRUE)\ndimnames(dat) <- list(\"Tap water exposure\" = tapw, \"Outcome\" = outc)\ntab2by2.test(dat, rev=\"c\")\n\n\n\n"} {"package":"epitools","topic":"table.margins","snippet":"### Name: table.margins\n### Title: Marginal totals of a table\n### Aliases: table.margins\n### Keywords: manip\n\n### ** Examples\n\nx <- matrix(1:4, 2, 2)\ntable.margins(x)\n\n\n\n"} {"package":"runstats","topic":"RunningCor","snippet":"### Name: RunningCor\n### Title: Fast Running Correlation Computation\n### Aliases: RunningCor\n\n### ** Examples\n\nx <- sin(seq(0, 1, length.out = 1000) * 2 * pi * 6)\ny <- x[1:100]\nout1 <- RunningCor(x, y, circular = TRUE)\nout2 <- RunningCor(x, y, circular = FALSE)\nplot(out1, type = \"l\"); points(out2, col = \"red\")\n\n\n\n"} {"package":"runstats","topic":"RunningCov","snippet":"### Name: RunningCov\n### Title: Fast Running Covariance Computation\n### Aliases: RunningCov\n\n### ** Examples\n\nx <- sin(seq(0, 1, length.out = 1000) * 2 * pi * 6)\ny <- x[1:100]\nout1 <- RunningCov(x, y, circular = TRUE)\nout2 <- RunningCov(x, y, circular = FALSE)\nplot(out1, type = \"l\"); points(out2, col = \"red\")\n\n\n\n"} {"package":"runstats","topic":"RunningL2Norm","snippet":"### Name: RunningL2Norm\n### Title: Fast Running L2 Norm Computation\n### Aliases: RunningL2Norm\n\n### ** Examples\n\n## Ex.1.\nx <- sin(seq(0, 1, length.out = 1000) * 2 * pi * 6)\ny1 <- x[1:100] + rnorm(100)\ny2 <- rnorm(100)\nout1 <- RunningL2Norm(x, y1)\nout2 <- RunningL2Norm(x, y2)\nplot(out1, type = \"l\"); points(out2, col = \"blue\")\n## Ex.2.\nx <- sin(seq(0, 1, length.out = 1000) * 2 * pi * 6)\ny <- x[1:100] + rnorm(100)\nout1 <- RunningL2Norm(x, y, circular = TRUE)\nout2 <- RunningL2Norm(x, y, circular = FALSE)\nplot(out1, type = \"l\"); points(out2, col = \"red\")\n\n\n"} {"package":"runstats","topic":"RunningMean","snippet":"### Name: RunningMean\n### Title: Fast Running Mean Computation\n### Aliases: RunningMean\n\n### ** Examples\n\nx <- rnorm(10)\nRunningMean(x, 3, circular = FALSE)\nRunningMean(x, 3, circular = TRUE)\n\n\n\n"} {"package":"runstats","topic":"RunningSd","snippet":"### Name: RunningSd\n### Title: Fast Running Standard Deviation Computation\n### Aliases: RunningSd\n\n### ** Examples\n\nx <- rnorm(10)\nRunningSd(x, 3, circular = FALSE)\nRunningSd(x, 3, circular = FALSE)\n\n\n\n"} {"package":"runstats","topic":"RunningVar","snippet":"### Name: RunningVar\n### Title: Fast Running Variance Computation\n### Aliases: RunningVar\n\n### ** Examples\n\nx <- rnorm(10)\nRunningVar(x, W = 3, circular = FALSE)\nRunningVar(x, W = 3, circular = TRUE)\n\n\n\n"} {"package":"runstats","topic":"runstats.demo","snippet":"### Name: runstats.demo\n### Title: Demo visualization of package functions\n### Aliases: runstats.demo\n\n### ** Examples\n\n## Not run: \n##D runstats.demo(func.name = \"RunningMean\")\n##D runstats.demo(func.name = \"RunningSd\")\n##D runstats.demo(func.name = \"RunningVar\")\n##D runstats.demo(func.name = \"RunningCov\")\n##D runstats.demo(func.name = \"RunningCor\")\n##D runstats.demo(func.name = \"RunningL2Norm\")\n## End(Not run)\n\n\n\n"} {"package":"LowWAFOMNX","topic":"LowWAFOMNX-package","snippet":"### Name: LowWAFOMNX-package\n### Title: Low WAFOM Niederreiter-Xing Sequence\n### Aliases: LowWAFOMNX-package LowWAFOMNX\n\n### ** Examples\n\nsrange <- lowWAFOMNX.dimMinMax()\nmrange <- lowWAFOMNX.dimF2MinMax(srange[1])\npoints <- lowWAFOMNX.points(dimR=srange[1], dimF2=mrange[1])\npoints <- lowWAFOMNX.points(dimR=srange[1], dimF2=mrange[1], digitalShift=TRUE)\n\n\n"} {"package":"label.switching","topic":"aic","snippet":"### Name: aic\n### Title: Artificial Identifiability Constraints\n### Aliases: aic\n\n### ** Examples\n\n#load a toy example: MCMC output consists of the random beta model\n# applied to a normal mixture of \\code{K=2} components. The number of\n# observations is equal to \\code{n=5}. The number of MCMC samples is\n# equal to \\code{m=300}. The 1000 generated MCMC samples are stored \n#to array mcmc.pars. \ndata(\"mcmc_output\")\nmcmc.pars<-data_list$\"mcmc.pars\"\n\n# mcmc parameters are stored to array \\code{mcmc.pars}\n# mcmc.pars[,,1]: simulated means of the two components\n# mcmc.pars[,,2]: simulated variances of the two components\n# mcmc.pars[,,3]: simulated weights of the two components\n# We will apply AIC by ordering the means\n# which corresponds to value \\code{constraint=1}\nrun<-aic(mcmc = mcmc.pars,constraint=1)\n# apply the permutations returned by typing:\nreordered.mcmc<-permute.mcmc(mcmc.pars,run$permutations)\n# reordered.mcmc[,,1]: reordered means of the two components\n# reordered.mcmc[,,2]: reordered variances of the components\n# reordered.mcmc[,,3]: reordered weights \n\n\n"} {"package":"label.switching","topic":"dataBased","snippet":"### Name: dataBased\n### Title: Data-based labelling\n### Aliases: dataBased\n\n### ** Examples\n\n#load a toy example: MCMC output consists of the random beta model\n# applied to a normal mixture of \\code{K=2} components. The number of\n# observations is equal to \\code{n=5}. The number of MCMC samples is\n# equal to \\code{m=300}. The 1000 generated MCMC samples are stored \n#to array mcmc.pars. \ndata(\"mcmc_output\")\nz<-data_list$\"z\"\nK<-data_list$\"K\"\nx<-data_list$\"x\"\nmcmc.pars<-data_list$\"mcmc.pars\"\n# mcmc parameters are stored to array \\code{mcmc.pars}\n# mcmc.pars[,,1]: simulated means of the two components\n# mcmc.pars[,,2]: simulated variances of the two components\n# mcmc.pars[,,3]: simulated weights of the two components\n# Apply dataBased relabelling\nrun<-dataBased(x = x, K = K, z = z)\n# apply the permutations returned by typing:\nreordered.mcmc<-permute.mcmc(mcmc.pars,run$permutations)\n# reordered.mcmc[,,1]: reordered means of the two components\n# reordered.mcmc[,,2]: reordered variances of the components\n# reordered.mcmc[,,3]: reordered weights \n\n\n"} {"package":"label.switching","topic":"ecr","snippet":"### Name: ecr\n### Title: ECR algorithm (default version)\n### Aliases: ecr\n\n### ** Examples\n\n#load a toy example: MCMC output consists of the random beta model\n#\tapplied to a normal mixture of \\code{K=2} components. The\n# \tnumber of observations is equal to \\code{n=5}. The number\n#\tof MCMC samples is equal to \\code{m=300}. The 300 \n#\tsimulated allocations are stored to array \\code{z}. The \n#\tcomplete MAP estimate corresponds to iteration \\code{mapindex}.\ndata(\"mcmc_output\")\nz<-data_list$\"z\"\nK<-data_list$\"K\"\nmapindex<-data_list$\"mapindex\"\n\n# mcmc parameters are stored to array \\code{mcmc.pars}\nmcmc.pars<-data_list$\"mcmc.pars\"\n# mcmc.pars[,,1]: simulated means of the two components\n# mcmc.pars[,,2]: simulated variances \n# mcmc.pars[,,3]: simulated weights\nrun<-ecr(zpivot = z[mapindex,],z = z, K = K)\n# apply the permutations returned by typing:\nreordered.mcmc<-permute.mcmc(mcmc.pars,run$permutations)\n# reordered.mcmc[,,1]: reordered means of the two components\n# reordered.mcmc[,,2]: reordered variances\n# reordered.mcmc[,,3]: reordered weights\n\n\n"} {"package":"label.switching","topic":"ecr.iterative.1","snippet":"### Name: ecr.iterative.1\n### Title: ECR algorithm (iterative version 1)\n### Aliases: ecr.iterative.1\n\n### ** Examples\n\n#load a toy example: MCMC output consists of the random beta model\n# applied to a normal mixture of \\code{K=2} components. The number of\n# observations is equal to \\code{n=5}. The number of MCMC samples is\n# equal to \\code{m=1000}. The 300 simulated allocations are stored to\n# array \\code{z}. \ndata(\"mcmc_output\")\n# mcmc parameters are stored to array \\code{mcmc.pars}\nmcmc.pars<-data_list$\"mcmc.pars\"\nz<-data_list$\"z\"\nK<-data_list$\"K\"\n# mcmc.pars[,,1]: simulated means of the two components\n# mcmc.pars[,,2]: simulated variances \n# mcmc.pars[,,3]: simulated weights\n# the relabelling algorithm will run with the default initialization\n# (no opt_init is specified)\nrun<-ecr.iterative.1(z = z, K = K)\n# apply the permutations returned by typing:\nreordered.mcmc<-permute.mcmc(mcmc.pars,run$permutations)\n# reordered.mcmc[,,1]: reordered means of the two components\n# reordered.mcmc[,,2]: reordered variances\n# reordered.mcmc[,,3]: reordered weights\n\n\n"} {"package":"label.switching","topic":"ecr.iterative.2","snippet":"### Name: ecr.iterative.2\n### Title: ECR algorithm (iterative version 2)\n### Aliases: ecr.iterative.2\n\n### ** Examples\n\n#load a toy example: MCMC output consists of the random beta model\n# applied to a normal mixture of \\code{K=2} components. The number of\n# observations is equal to \\code{n=5}. The number of MCMC samples is\n# equal to \\code{m=1000}. The 300 simulated allocations are stored to\n# array \\code{z}. The matrix of allocation probabilities is stored to\n# array \\code{p}. \ndata(\"mcmc_output\")\nz<-data_list$\"z\"\nK<-data_list$\"K\"\np<-data_list$\"p\"\n# mcmc parameters are stored to array \\code{mcmc.pars}\nmcmc.pars<-data_list$\"mcmc.pars\"\n# mcmc.pars[,,1]: simulated means of the two components\n# mcmc.pars[,,2]: simulated variances \n# mcmc.pars[,,3]: simulated weights\n# the relabelling algorithm will run with the default initialization\n# (no opt_init is specified)\nrun<-ecr.iterative.2(z = z, K = 2, p = p)\n# apply the permutations returned by typing:\nreordered.mcmc<-permute.mcmc(mcmc.pars,run$permutations)\n# reordered.mcmc[,,1]: reordered means of the two mixture components\n# reordered.mcmc[,,2]: reordered variances of the two components\n# reordered.mcmc[,,3]: reordered weights of the two components\n\n\n"} {"package":"label.switching","topic":"label.switching","snippet":"### Name: label.switching\n### Title: Main calling function\n### Aliases: label.switching\n\n### ** Examples\n\n# We will apply the following methods:\n# ECR, ECR-ITERATIVE-1, PRA, AIC and DATA-BASED.\n# default ECR will use two different pivots.\n\n#load a toy example: MCMC output consists of the random beta model\n# applied to a normal mixture of \\code{K=2} components. The number of\n# observations is equal to \\code{n=5}. The number of MCMC samples is\n# equal to \\code{m=300}. simulated allocations are stored to array \\code{z}. \ndata(\"mcmc_output\")\nmcmc.pars<-data_list$\"mcmc.pars\"\n# mcmc parameters are stored to array \\code{mcmc.pars}\n# mcmc.pars[,,1]: simulated means of the two components\n# mcmc.pars[,,2]: simulated variances \n# mcmc.pars[,,3]: simulated weights \n# We will use two pivots for default ECR algorithm:\n# the first one corresponds to iteration \\code{mapindex} (complete MAP)\n# the second one corresponds to iteration \\code{mapindex.non} (observed MAP)\n\nz<-data_list$\"z\"\nK<-data_list$\"K\"\nx<-data_list$\"x\"\nmapindex<-data_list$\"mapindex\"\nmapindex.non<-data_list$\"mapindex.non\"\n# The PRA method will use as pivot the iteration that corresponds to\n# the observed MAP estimate (mapindex). \n\n#Apply (a subset of the available) methods by typing:\n\nls<-label.switching(method=c(\"ECR\",\"ECR-ITERATIVE-1\",\"PRA\", \"AIC\",\"DATA-BASED\"),\nzpivot=z[c(mapindex,mapindex.non),],z = z,K = K, data = x,\nprapivot = mcmc.pars[mapindex,,],mcmc = mcmc.pars)\n\n#plot the raw and reordered means of the K=2 normal mixture components for each method\npar(mfrow = c(2,4))\n#raw MCMC output for the means (with label switching)\nmatplot(mcmc.pars[,,1],type=\"l\",\nxlab=\"iteration\",main=\"Raw MCMC output\",ylab = \"means\")\n# Reordered outputs\nmatplot(permute.mcmc(mcmc.pars,ls$permutations$\"ECR-1\")$output[,,1],type=\"l\",\nxlab=\"iteration\",main=\"ECR (1st pivot)\",ylab = \"means\")\nmatplot(permute.mcmc(mcmc.pars,ls$permutations$\"ECR-2\")$output[,,1],type=\"l\",\nxlab=\"iteration\",main=\"ECR (2nd pivot)\",ylab = \"means\")\nmatplot(permute.mcmc(mcmc.pars,ls$permutations$\"ECR-ITERATIVE-1\")$output[,,1],\ntype=\"l\",xlab=\"iteration\",main=\"ECR-iterative-1\",ylab = \"means\")\nmatplot(permute.mcmc(mcmc.pars,ls$permutations$\"PRA\")$output[,,1],type=\"l\",\nxlab=\"iteration\",main=\"PRA\",ylab = \"means\")\nmatplot(permute.mcmc(mcmc.pars,ls$permutations$\"AIC\")$output[,,1],type=\"l\",\nxlab=\"iteration\",main=\"AIC\",ylab = \"means\")\nmatplot(permute.mcmc(mcmc.pars,ls$permutations$\"DATA-BASED\")$output[,,1],type=\"l\",\nxlab=\"iteration\",main=\"DATA-BASED\",ylab = \"means\")\n\n#######################################################\n# if the useR wants to apply the STEPHENS and SJW algorithm as well:\n# The STEPHENS method requires the classification probabilities\np<-data_list$\"p\"\n\n# The SJW method needs to define the complete log-likelihood of the\n# model. For the univariate normal mixture, this is done as follows:\n\ncomplete.normal.loglikelihood<-function(x,z,pars){\n\t#x: denotes the n data points\n\t#z: denotes an allocation vector (size=n)\n\t#pars: K\\times 3 vector of means,variance, weights\n\t# pars[k,1]: corresponds to the mean of component k\n\t# pars[k,2]: corresponds to the variance of component k\n\t# pars[k,3]: corresponds to the weight of component k\n\tg <- dim(pars)[1]\n\tn <- length(x)\n\tlogl<- rep(0, n)\n \tlogpi <- log(pars[,3])\n\tmean <- pars[,1]\n\tsigma <- sqrt(pars[,2])\n\tlogl<-logpi[z] + dnorm(x,mean = mean[z],sd = sigma[z],log = T)\n\treturn(sum(logl))\n}\n\n# and then run (after removing all #):\n#ls<-label.switching(method=c(\"ECR\",\"ECR-ITERATIVE-1\",\"ECR-ITERATIVE-2\",\n#\"PRA\",\"STEPHENS\",\"SJW\",\"AIC\",\"DATA-BASED\"),\n#zpivot=z[c(mapindex,mapindex.non),],z = z,\n#K = K,prapivot = mcmc.pars[mapindex,,],p=p,\n#complete = complete.normal.loglikelihood,mcmc.pars,\n#data = x)\n\n\n\n"} {"package":"label.switching","topic":"permute.mcmc","snippet":"### Name: permute.mcmc\n### Title: Reorder MCMC samples\n### Aliases: permute.mcmc\n\n### ** Examples\n\n#load MCMC simulated data\ndata(\"mcmc_output\")\nmcmc.pars<-data_list$\"mcmc.pars\"\nz<-data_list$\"z\"\nK<-data_list$\"K\"\n\n#apply \\code{ecr.iterative.1} algorithm\nrun<-ecr.iterative.1(z = z, K = 2)\n#reorder the MCMC output according to this method:\nreordered.mcmc<-permute.mcmc(mcmc.pars,run$permutations)\n# reordered.mcmc[,,1]: reordered means of the two components\n# reordered.mcmc[,,2]: reordered variances of the components\n# reordered.mcmc[,,3]: reordered weights of the two components\n\n\n"} {"package":"label.switching","topic":"pra","snippet":"### Name: pra\n### Title: PRA algorithm\n### Aliases: pra\n\n### ** Examples\n\n#load a toy example: MCMC output consists of the random beta model\n# applied to a normal mixture of \\code{K=2} components. The number of\n# observations is equal to \\code{n=5}. The number of MCMC samples is\n# equal to \\code{m=300}. The 1000 generated MCMC samples are stored \n#to array mcmc.pars. \ndata(\"mcmc_output\")\nmcmc.pars<-data_list$\"mcmc.pars\"\nmapindex<-data_list$\"mapindex\"\n\n# mcmc parameters are stored to array \\code{mcmc.pars}\n# mcmc.pars[,,1]: simulated means of the two components\n# mcmc.pars[,,2]: simulated variances of the two components\n# mcmc.pars[,,3]: simulated weights of the two components\n# We will apply PRA using as pivot the complete MAP estimate\n# which corresponds to \\code{mcmc.pars[mapindex,,]}\nrun<-pra(mcmc = mcmc.pars, pivot = mcmc.pars[mapindex,,])\n# apply the permutations returned by typing:\nreordered.mcmc<-permute.mcmc(mcmc.pars,run$permutations)\n# reordered.mcmc[,,1]: reordered means of the two components\n# reordered.mcmc[,,2]: reordered variances of the components\n# reordered.mcmc[,,3]: reordered weights \n\n\n"} {"package":"label.switching","topic":"sjw","snippet":"### Name: sjw\n### Title: Probabilistic relabelling algorithm\n### Aliases: sjw\n\n### ** Examples\n\n#load a toy example: MCMC output consists of the random beta model\n# applied to a normal mixture of \\code{K=2} components. The number of\n# observations is equal to \\code{n=5}. The number of MCMC samples is\n# equal to \\code{m=300}. \ndata(\"mcmc_output\")\nmcmc.pars<-data_list$\"mcmc.pars\"\nz<-data_list$\"z\"\nK<-data_list$\"K\"\nx<-data_list$\"x\"\n\n# mcmc parameters are stored to array \\code{mcmc.pars}\n# mcmc.pars[,,1]: simulated means of the two components\n# mcmc.pars[,,2]: simulated variances\n# mcmc.pars[,,3]: simulated weights \n# The number of different parameters for the univariate\n# normal mixture is equal to J = 3: means, variances \n# and weights. The generated allocations variables are \n# stored to \\code{z}. The observed data is stored to \\code{x}. \n# The complete data log-likelihood is defined as follows:\ncomplete.normal.loglikelihood<-function(x,z,pars){\n#\tx: data (size = n)\n#\tz: allocation vector (size = n)\n#\tpars: K\\times J vector of normal mixture parameters:\n#\t\tpars[k,1] = mean of the k-normal component\n#\t\tpars[k,2] = variance of the k-normal component\n#\t\tpars[k,3] = weight of the k-normal component\n#\t\t\tk = 1,...,K\n\tg <- dim(pars)[1] #K (number of mixture components)\n\tn <- length(x)\t#this denotes the sample size\n\tlogl<- rep(0, n)\t\n \tlogpi <- log(pars[,3])\n\tmean <- pars[,1]\n\tsigma <- sqrt(pars[,2])\n\tlogl<-logpi[z] + dnorm(x,mean = mean[z],sd = sigma[z],log = TRUE)\n\treturn(sum(logl))\n}\n\n#run the algorithm:\nrun<-sjw(mcmc = mcmc.pars,z = z, \ncomplete = complete.normal.loglikelihood,x = x, init=0,threshold = 1e-4)\n# apply the permutations returned by typing:\nreordered.mcmc<-permute.mcmc(mcmc.pars,run$permutations)\n# reordered.mcmc[,,1]: reordered means of the two components\n# reordered.mcmc[,,2]: reordered variances \n# reordered.mcmc[,,3]: reordered weights\n\n\n"} {"package":"label.switching","topic":"stephens","snippet":"### Name: stephens\n### Title: Stephens' algorithm\n### Aliases: stephens\n\n### ** Examples\n\n#load a toy example: MCMC output consists of the random beta model\n# applied to a normal mixture of \\code{K=2} components. The number \n# of observations is equal to \\code{n=5}. The number of MCMC samples\n# is equal to \\code{m=300}. The matrix of allocation probabilities \n# is stored to matrix \\code{p}. \ndata(\"mcmc_output\")\n# mcmc parameters are stored to array \\code{mcmc.pars}\nmcmc.pars<-data_list$\"mcmc.pars\"\n# mcmc.pars[,,1]: simulated means of the two components\n# mcmc.pars[,,2]: simulated variances \n# mcmc.pars[,,3]: simulated weights \n# the computed allocation matrix is p\np<-data_list$\"p\"\nrun<-stephens(p)\n# apply the permutations returned by typing:\nreordered.mcmc<-permute.mcmc(mcmc.pars,run$permutations)\n# reordered.mcmc[,,1]: reordered means of the components\n# reordered.mcmc[,,2]: reordered variances\n# reordered.mcmc[,,3]: reordered weights\n\n\n"} {"package":"HSEtest","topic":"HSE","snippet":"### Name: HSE\n### Title: Homogeneity of Stratum Effects Test\n### Aliases: HSE\n\n### ** Examples\n\ntable1 <- matrix(c(30,4,3,20),ncol=2)\ntable2 <- matrix(c(25,2,3,20),ncol=2)\ntable <- rbind(table1,table2)\nHSE(table)\n\n\n"} {"package":"na.tools","topic":"all_na","snippet":"### Name: all_na\n### Title: Tests for missing values\n### Aliases: all_na all_na.default any_na is_na which_na\n\n### ** Examples\n\n\n all_na( c( NA, NA, 1 ) ) # FALSE\n all_na( c( NA, NA, NA ) ) # TRUE\n \n df <- data.frame( char = rep(NA_character_, 3), nums=1:3)\n all_na(df) # FALSE\n \n df <- data.frame( char = rep(NA_character_, 3), nums=rep(NA_real_,3))\n all_na(df) # TRUE\n \n any_na( 1:10 ) # FALSE\n any_na( c( 1, NA, 3 ) ) # TRUE\n\n\n x <- c( 1, NA, NA, 4:6 )\n which_na(x)\n \n names(x) <- letters[1:6]\n which_na(x)\n \n\n\n"} {"package":"na.tools","topic":"coerce_safe","snippet":"### Name: coerce_safe\n### Title: coerce_safe\n### Aliases: coerce_safe\n\n### ** Examples\n\n\n## Not run: \n##D # Error\n##D coerce_safe(1.01, \"integer\") # 1.01 != 1\n##D coerce_safe( c(\"1\",\"2\",\"a\"), \"integer\" )\n## End(Not run)\n\n\n\n"} {"package":"na.tools","topic":"impute-commutative","snippet":"### Name: impute-commutative\n### Title: Imputation by Cummutative Functions Impute using replacement\n### values calculated from a univariate, cummuative function.\n### Aliases: impute-commutative na.max na.min na.mean na.median na.quantile\n### na.mode na.most_freq\n\n### ** Examples\n\n na.median( c(1,2,NA_real_,3) )\n \n na.quantile( c(1,2,NA_real_,3), prob=0.4 )\n\n na.mode( c(1,1,NA,4) )\n na.mode( c(1,1,4,4,NA) ) \n\n\n\n"} {"package":"na.tools","topic":"impute-constant","snippet":"### Name: impute-constant\n### Title: Impute by Constant Value Replaces 'NA's by a constant\n### Aliases: impute-constant na.constant na.inf na.neginf na.true na.false\n### na.zero\n\n### ** Examples\n\n na.constant( c(1,NA,2), -1 )\n \n na.inf( c( 1, 2, NA, 4) )\n na.neginf( c( 1, 2, NA, 4) ) \n \n na.true( c(TRUE, NA_logical, FALSE) ) # T T F\n na.false( c(TRUE, NA_logical, FALSE) ) # T F F\n\n \n \n na.zero( c(1,NA,3) ) # 1 0 3 \n\n \n \n\n\n"} {"package":"na.tools","topic":"n_na","snippet":"### Name: n_na\n### Title: Counts how many values are NA\n### Aliases: n_na na.howmany na.n pct_na na.pct\n\n### ** Examples\n\n x <- c( 1, NA, NA, 4:5 )\n n_na(x)\n pct_na(x)\n\n\n\n"} {"package":"na.tools","topic":"na.bootstrap","snippet":"### Name: na.bootstrap\n### Title: na.bootstrap\n### Aliases: na.bootstrap na.resample\n\n### ** Examples\n\n x <- c(1,NA,3)\n na.bootstrap(x)\n \n\n\n"} {"package":"na.tools","topic":"na.replace","snippet":"### Name: na.replace\n### Title: Replace Missing Values\n### Aliases: na.replace na.explicit\n\n### ** Examples\n\n\n # Integers and numerics\n na.replace( c(1,NA,3,NA), 2 ) # 1 2 3 2 \n na.replace( c(1,NA,3,NA), 1:4 ) # 1 2 3 4\n\n # This produces an error because it would change the type\n ## Not run: \n##D na.replace( c(1,NA,3,NA), letters[1:4] ) # \"1\" \"b\" \"3\" \"d\"\n##D \n## End(Not run)\n \n # Characters \n lets <- letters[1:5]\n lets[ c(2,4) ] <- NA\n na.replace(lets) # replace with NA_explicit_\n\n # Factors \n fct <- as.factor( c( NA, letters[2:4], NA) )\n fct\n na.replace(fct, \"z\") # z b c d z -- level z added\n na.replace(fct, letters[1:5] )\n na.replace(fct)\n \n ## Not run: \n##D na.replace( rep(NA,3), rep(NA,3) )\n##D \n## End(Not run)\n \n\n\n"} {"package":"na.tools","topic":"na.unreplace","snippet":"### Name: na.unreplace\n### Title: na.unreplace\n### Aliases: na.unreplace na.unreplace.default na.unreplace.character\n### na.unreplace.factor na.implicit\n\n### ** Examples\n\n\n na.unreplace( c(1,2,3,4), 3 )\n na.unreplace( c(\"A\", \"(NA)\", \"B\", \"C\") )\n na.unreplace( c(\"A\", NA_explicit_, \"B\", \"C\") )\n \n df <- data.frame( char=c('A', 'NA', 'C', NA_explicit_), num=1:4 ) \n na.unreplace(df)\n \n \n\n\n"} {"package":"ExtremeBounds","topic":"eba","snippet":"### Name: eba\n### Title: Extreme Bounds Analysis\n### Aliases: eba summary.eba coefficients.eba\n### Keywords: models regression nonlinear robust multivariate\n\n### ** Examples\n\n# perform Extreme Bounds Analysis\n\neba.results <- eba(formula = mpg ~ wt | hp + gear | cyl + disp + drat + qsec + vs + am + carb,\n data = mtcars[1:10, ], exclusive = ~ cyl + disp + hp | am + gear)\n\n# The same result can be achieved by running:\n# eba.results <- eba(data = mtcars[1:10, ], y = \"mpg\", free = \"wt\",\n# doubtful = c(\"cyl\", \"disp\", \"hp\", \"drat\", \"qsec\", \n# \"vs\", \"am\", \"gear\", \"carb\"),\n# focus = c(\"hp\", \"gear\"), \n# exclusive = list(c(\"cyl\", \"disp\", \"hp\"), \n# c(\"am\", \"gear\")))\n\n# print out results\nprint(eba.results)\n\n# create histograms\nhist(eba.results, variables = c(\"hp\",\"gear\"),\n main = c(\"hp\" = \"Gross horsepower\", \"gear\" = \"Number of forward gears\"))\n\n\n"} {"package":"ExtremeBounds","topic":"hist.eba","snippet":"### Name: hist.eba\n### Title: Histograms for Extreme Bounds Analysis\n### Aliases: hist.eba\n### Keywords: models regression nonlinear robust multivariate\n\n### ** Examples\n\n# perform Extreme Bounds Analysis\n\neba.results <- eba(formula = mpg ~ wt | hp + gear | cyl + disp + drat + qsec + vs + am + carb,\n data = mtcars[1:10, ], k = 0:2)\n\n# The same result can be achieved by running:\n# eba.results <- eba(data = mtcars[1:10, ], y = \"mpg\", free = \"wt\",\n# doubtful = c(\"cyl\",\"disp\",\"hp\",\"drat\",\"qsec\",\"vs\",\"am\",\"gear\",\"carb\"),\n# focus = c(\"hp\",\"gear\"), k = 0:2)\n\n# create histograms, keeping the default settings\nhist(eba.results)\n\n# re-create histograms with customized settings\nhist(eba.results, variables = c(\"hp\",\"gear\"),\n main = c(\"hp\" = \"Gross horsepower\", \"gear\" = \"Number of forward gears\"),\n mu.visible=FALSE, normal.show=TRUE, normal.lwd=1)\n\n\n"} {"package":"ExtremeBounds","topic":"print.eba","snippet":"### Name: print.eba\n### Title: Print Extreme Bounds Analysis Results\n### Aliases: print.eba\n### Keywords: models regression nonlinear robust multivariate\n\n### ** Examples\n\n# perform Extreme Bounds Analysis\n\neba.results <- eba(formula = mpg ~ wt | hp + gear | cyl + disp + drat + qsec + vs + am + carb,\n data = mtcars[1:10, ], k = 0:2)\n\n# The same result can be achieved by running:\n# eba.results <- eba(data = mtcars[1:10, ], y = \"mpg\", free = \"wt\",\n# doubtful = c(\"cyl\",\"disp\",\"hp\",\"drat\",\"qsec\",\"vs\",\"am\",\"gear\",\"carb\"),\n# focus = c(\"hp\",\"gear\"), k = 0:2)\n\n# print out results, rounded to 2 decimal places\nprint(eba.results, digits = 2)\n\n\n"} {"package":"hmm.discnp","topic":"anova.hmm.discnp","snippet":"### Name: anova.hmm.discnp\n### Title: Anova for hmm.discnp models\n### Aliases: anova.hmm.discnp\n### Keywords: models methods\n\n### ** Examples\n\nxxx <- with(SydColDisc,split(y,f=list(locn,depth)))\nfit1 <- hmm(xxx,K=1,itmax=10)\nfit2 <- hmm(xxx,K=2,itmax=10)\nanova(fit1,fit2)\n\n\n"} {"package":"hmm.discnp","topic":"ccprSim","snippet":"### Name: ccprSim\n### Title: Simulated monocyte counts and psychosis symptoms.\n### Aliases: ccprSim\n### Keywords: datasets\n\n### ** Examples\n\n## Not run: \n##D # Takes too long.\n##D fit <- hmm(ccprSim,K=2,indep=FALSE,itmax=5,verbose=TRUE)\n## End(Not run)\n\n\n"} {"package":"hmm.discnp","topic":"cnvrtRho","snippet":"### Name: cnvrtRho\n### Title: Convert Rho between forms.\n### Aliases: cnvrtRho\n### Keywords: utilities\n\n### ** Examples\n\nYval <- LETTERS[1:10]\nTpm <- matrix(c(0.75,0.25,0.25,0.75),ncol=2,byrow=TRUE)\nRho <- cbind(c(rep(1,5),rep(0,5)),c(rep(0,5),rep(1,5)))/5\nrownames(Rho) <- Yval\nnewRho <- cnvrtRho(Rho)\noldRho <- cnvrtRho(newRho)\n\n\n"} {"package":"hmm.discnp","topic":"fitted.hmm.discnp","snippet":"### Name: fitted.hmm.discnp\n### Title: Fitted values of a discrete non-parametric hidden Markov model.\n### Aliases: fitted.hmm.discnp\n### Keywords: models\n\n### ** Examples\n\nP <- matrix(c(0.7,0.3,0.1,0.9),2,2,byrow=TRUE)\nR <- matrix(c(0.5,0,0.1,0.1,0.3,\n 0.1,0.1,0,0.3,0.5),5,2)\nset.seed(42)\nlll <- sample(250:350,20,TRUE)\ny <- rhmm(ylengths=lll,nsim=1,drop=TRUE,tpm=P,Rho=R)\nfit <- hmm(y,K=2,verb=TRUE,keep.y=TRUE,itmax=10)\nfv <- fitted(fit)\n\n\n"} {"package":"hmm.discnp","topic":"hmm","snippet":"### Name: hmm\n### Title: Fit a hidden Markov model to discrete data.\n### Aliases: hmm\n### Keywords: models\n\n### ** Examples\n\n# TO DO: Create one or more bivariate examples.\n#\n# The value of itmax in the following examples is so much\n# too small as to be risible. This is just to speed up the\n# R CMD check process.\n# 1.\nYval <- LETTERS[1:10]\nTpm <- matrix(c(0.75,0.25,0.25,0.75),ncol=2,byrow=TRUE)\nRho <- cbind(c(rep(1,5),rep(0,5)),c(rep(0,5),rep(1,5)))/5\nrownames(Rho) <- Yval\nset.seed(42)\nxxx <- rhmm(ylengths=rep(1000,5),nsim=1,tpm=Tpm,Rho=Rho,yval=Yval,drop=TRUE)\nfit <- hmm(xxx,par0=list(tpm=Tpm,Rho=Rho),itmax=10)\nprint(fit$Rho) # A data frame\nprint(cnvrtRho(fit$Rho)) # A matrix of probabilities\n # whose columns sum to 1.\n\n# 2.\n# See the help for logLikHmm() for how to generate y.num.\n## Not run: \n##D fit.num <- hmm(y.num,K=2,verb=TRUE,itmax=10)\n##D fit.num.mix <- hmm(y.num,K=2,verb=TRUE,mixture=TRUE,itmax=10)\n##D print(fit.num[c(\"tpm\",\"Rho\")])\n## End(Not run)\n# Note that states 1 and 2 get swapped.\n\n# 3.\nxxx <- with(SydColDisc,split(y,f=list(locn,depth)))\nYval <- c(\"lo\",\"mlo\",\"m\",\"mhi\",\"hi\")\n# Two states: above and below the thermocline.\nfitSydCol <- hmm(xxx,yval=Yval,K=2,verb=TRUE,itmax=10)\n\n# 4.\nX <- split(SydColDisc[,c(\"ma.com\",\"nh.com\",\"bo.com\")],\n f=with(SydColDisc,list(locn,depth)))\nX <- lapply(X,function(x){\n as.matrix(as.data.frame(lapply(x,as.numeric)))-1})\nfit.wap <- hmm(xxx,yval=Yval,K=2,X=X,verb=TRUE,itmax=10)\n# wap <--> with auxiliary predictors.\n\n# 5.\n## Not run: \n##D # Takes too long.\n##D fitlm <- hmm(xxx,yval=Yval,K=2,method=\"LM\",verb=TRUE)\n##D fitem <- hmm(xxx,yval=Yval,K=2,verb=TRUE)\n##D # Algorithm terminates due to a decrease in the log likelihood\n##D # at EM step 64.\n##D newfitlm <- hmm(xxx,yval=Yval,par0=fitem,method=\"LM\",verb=TRUE)\n##D # The log likelihood improves from -1900.988 to -1820.314\n## End(Not run)\n\n# 6.\nfitLesCount <- hmm(lesionCount,K=2,itmax=10) # Two states: relapse and remission.\n\n\n"} {"package":"hmm.discnp","topic":"linLandFlows","snippet":"### Name: hydroDat\n### Title: Canadian hydrological data sets.\n### Aliases: linLandFlows ftLiardFlows portMannFlows portMannSedLoads\n### portMannSedCon\n### Keywords: datasets\n\n### ** Examples\n\nfit <- hmm(linLandFlows$deciles,K=4,itmax=10)\n\n\n"} {"package":"hmm.discnp","topic":"logLikHmm","snippet":"### Name: logLikHmm\n### Title: Log likelihood of a hidden Markov model\n### Aliases: logLikHmm\n### Keywords: models\n\n### ** Examples\n\n# TO DO: One or more bivariate examples.\nP <- matrix(c(0.7,0.3,0.1,0.9),2,2,byrow=TRUE)\nR <- matrix(c(0.5,0,0.1,0.1,0.3,\n 0.1,0.1,0,0.3,0.5),5,2)\nset.seed(42)\nlll <- sample(250:350,20,TRUE)\nset.seed(909)\ny.num <- rhmm(ylengths=lll,nsim=1,tpm=P,Rho=R,drop=TRUE)\nset.seed(909)\ny.let <- rhmm(ylengths=lll,nsim=1,tpm=P,Rho=R,yval=letters[1:5],drop=TRUE)\nrow.names(R) <- 1:5\nll1 <- logLikHmm(y.num,tpm=P,Rho=R)\nrow.names(R) <- letters[1:5]\nll2 <- logLikHmm(y.let,tpm=P,Rho=R)\nll3 <- logLikHmm(y.let,tpm=P,Rho=R,ispd=c(0.5,0.5))\nfit <- hmm(y.num,K=2,itmax=10)\nll4 <- logLikHmm(y.num,fit) # Use the fitted rather than the \"true\" parameters.\n\n\n"} {"package":"hmm.discnp","topic":"misstify","snippet":"### Name: misstify\n### Title: Insert missing values.\n### Aliases: misstify\n### Keywords: datagen\n\n### ** Examples\n\nP <- matrix(c(0.7,0.3,0.1,0.9),2,2,byrow=TRUE)\nR <- matrix(c(0.5,0,0.1,0.1,0.3,\n 0.1,0.1,0,0.3,0.5),5,2)\nset.seed(42)\nlll <- sample(250:350,20,TRUE)\ny1 <- rhmm(ylengths=lll,nsim=1,tpm=P,Rho=R)\ny1m <- misstify(y1,nafrac=0.5,fep=list(TRUE))\ny2 <- rhmm(ylengths=lll,nsim=5,tpm=P,Rho=R)\nset.seed(127)\ny2m <- misstify(y2,nafrac=0.5,fep=list(TRUE))\nnafracCalc(y2m) # A list all of whose entries are close to 0.5.\nset.seed(127)\ny2ma <- lapply(y2,misstify,nafrac=0.5,fep=list(TRUE))\n## Not run: \n##D nafracCalc(y2ma) # Throws an error.\n## End(Not run)\nsapply(y2ma,nafracCalc) # Effectively the same as nafracCalc(y2m).\n\n\n"} {"package":"hmm.discnp","topic":"mps","snippet":"### Name: mps\n### Title: Most probable states.\n### Aliases: mps\n### Keywords: models\n\n### ** Examples\n\n## Not run: \n##D P <- matrix(c(0.7,0.3,0.1,0.9),2,2,byrow=TRUE)\n##D rownames(P) <- 1:2\n##D R <- matrix(c(0.5,0,0.1,0.1,0.3,\n##D 0.1,0.1,0,0.3,0.5),5,2)\n##D set.seed(42)\n##D lll <- sample(250:350,20,TRUE)\n##D set.seed(909)\n##D y.num <- rhmm(ylengths=lll,nsim=1,tpm=P,Rho=R,drop=TRUE)\n##D fit.num <- hmm(y.num,K=2,verb=TRUE)\n##D s.1 <- mps(y.num,fit.num)\n##D s.2 <- mps(y.num,tpm=P,ispd=c(0.25,0.75),Rho=R)\n##D # The order of the states has got swapped; \n##D # note that ifelse(s.1[[1]]==\"1\",\"2\",\"1\") is much\n##D # more similar to s.2[[1]] than is s.1[[1]].\n## End(Not run)\n\n\n"} {"package":"hmm.discnp","topic":"nafracCalc","snippet":"### Name: nafracCalc\n### Title: Calculate fractions of missing values.\n### Aliases: nafracCalc\n### Keywords: utilities\n\n### ** Examples\n\nxxx <- with(SydColDisc,split(y,f=list(locn,depth)))\nnafracCalc(xxx) # 0.7185199\n\n\n"} {"package":"hmm.discnp","topic":"pr","snippet":"### Name: pr\n### Title: Probability of state sequences.\n### Aliases: pr\n### Keywords: models\n\n### ** Examples\n\n## Not run: \n##D P <- matrix(c(0.7,0.3,0.1,0.9),2,2,byrow=TRUE)\n##D R <- matrix(c(0.5,0,0.1,0.1,0.3,\n##D 0.1,0.1,0,0.3,0.5),5,2)\n##D set.seed(42)\n##D lll <- sample(250:350,20,TRUE)\n##D set.seed(909)\n##D y.num <- rhmm(ylengths=lll,nsim=1,tpm=P,Rho=R,drop=TRUE)\n##D fit.num <- hmm(y.num,K=2,keep.y=TRUE,verb=TRUE)\n##D # Using fitted parmeters.\n##D s.vit.1 <- viterbi(y.num,fit.num)\n##D pr.vit.1 <- pr(s.vit.1,model=fit.num)\n##D # Using true parameters from which y.num was generated.\n##D s.vit.2 <- viterbi(y.num,tpm=P,Rho=R)\n##D pr.vit.2 <- pr(s.vit.2,y.num,tpm=P,Rho=R)\n##D set.seed(202)\n##D y.mult <- rhmm(fit.num,nsim=4)\n##D s.vit.3 <- viterbi(y.mult,tpm=fit.num$tpm,Rho=fit.num$Rho)\n##D pr.vit.3 <- pr(s.vit.3,y.mult,tpm=fit.num$tpm,Rho=fit.num$Rho)\n## End(Not run)\n\n\n"} {"package":"hmm.discnp","topic":"predict.hmm.discnp","snippet":"### Name: predict.hmm.discnp\n### Title: Predicted values of a discrete non-parametric hidden Markov\n### model.\n### Aliases: predict.hmm.discnp\n### Keywords: models\n\n### ** Examples\n\nP <- matrix(c(0.7,0.3,0.1,0.9),2,2,byrow=TRUE)\nR <- matrix(c(0.5,0,0.1,0.1,0.3,\n 0.1,0.1,0,0.3,0.5),5,2)\nset.seed(42)\nll1 <- sample(250:350,20,TRUE)\ny1 <- rhmm(ylengths=ll1,nsim=1,tpm=P,Rho=R,drop=TRUE)\nfit <- hmm(y1,K=2,verb=TRUE,keep.y=TRUE,itmax=10)\nfv <- fitted(fit)\nset.seed(176)\nll2 <- sample(250:350,20,TRUE)\ny2 <- rhmm(ylengths=ll2,nsim=1,tpm=P,Rho=R,drop=TRUE)\npv <- predict(fit,y=y2)\nyval <- letters[1:5]\nset.seed(171)\ny3 <- rhmm(ylengths=ll2,yval=yval,nsim=1,tpm=P,Rho=R,drop=TRUE)\nfit3 <- hmm(y3,K=2,verb=TRUE,keep.y=TRUE,itmax=10)\npv3 <- predict(fit3) # Same as fitted(fit3).\n\n\n"} {"package":"hmm.discnp","topic":"rhmm","snippet":"### Name: rhmm\n### Title: Simulate discrete data from a non-parametric hidden Markov\n### model.\n### Aliases: rhmm rhmm.default rhmm.hmm.discnp\n### Keywords: datagen\n\n### ** Examples\n\n# To do: one or more bivariate examples.\n## Not run: \n##D y <- list(linLandFlows$deciles,ftLiardFlows$deciles)\n##D fit <- hmm(y,K=3)\n##D simX <- rhmm(fit)\n## End(Not run)\n\n\n"} {"package":"hmm.discnp","topic":"scovmat","snippet":"### Name: scovmat\n### Title: Simulation based covariance matrix.\n### Aliases: scovmat\n### Keywords: utility\n\n### ** Examples\n\n## Not run: \n##D y <- list(lindLandFlows$deciles,ftLiardFlows$deciles)\n##D fit <- hmm(y,K=3)\n##D ccc <- scovmat(fit,nsim=100)\n## End(Not run)\n\n\n"} {"package":"hmm.discnp","topic":"sp","snippet":"### Name: sp\n### Title: Calculate the conditional state probabilities.\n### Aliases: sp\n### Keywords: models\n\n### ** Examples\n\nP <- matrix(c(0.7,0.3,0.1,0.9),2,2,byrow=TRUE)\nR <- matrix(c(0.5,0,0.1,0.1,0.3,\n 0.1,0.1,0,0.3,0.5),5,2)\nset.seed(42)\ny <- rhmm(ylengths=rep(300,20),nsim=1,tpm=P,Rho=R,drop=TRUE)\nfit <- hmm(y,K=2,verb=TRUE,keep.y=TRUE,itmax=10)\ncpe1 <- sp(model=fit) # Using the estimated parameters.\ncpe2 <- sp(y,tpm=P,Rho=R,warn=FALSE) # Using the ``true'' parameters.\n# The foregoing would issue a warning that Rho had no row names\n# were it not for the fact that \"warn\" has been set to FALSE.\n\n\n"} {"package":"hmm.discnp","topic":"squantCI","snippet":"### Name: squantCI\n### Title: Simulation-quantile based confidence intervals.\n### Aliases: squantCI\n### Keywords: utility\n\n### ** Examples\n\n## Not run: \n##D y <- list(lindLandFlows$deciles,ftLiardFlows$deciles)\n##D fit <- hmm(y,K=3)\n##D CIs <- squantCI(fit,nsim=100)\n## End(Not run)\n\n\n"} {"package":"hmm.discnp","topic":"update.hmm.discnp","snippet":"### Name: update.hmm.discnp\n### Title: Update a fitted 'hmm.discnp' model.\n### Aliases: update.hmm.discnp\n### Keywords: methods models\n\n### ** Examples\n\nset.seed(294)\nfit <- hmm(WoodPeweeSong,K=2,rand.start=list(tpm=TRUE,Rho=TRUE),itmax=10)\nxxx <- rhmm(fit,nsim=1)\nsfit <- update(fit,data=xxx,itmax=10)\nyyy <- with(SydColDisc,split(y,f=list(locn,depth)))\nf1 <- hmm(yyy,K=1)\nf2 <- update(f1,data=yyy,Kplus1=TRUE) # Big improvement, but ...\n## Not run: \n##D g2 <- hmm(yyy,K=2) # Substantially better than f2. \n## End(Not run)\n\n\n"} {"package":"hmm.discnp","topic":"viterbi","snippet":"### Name: viterbi\n### Title: Most probable state sequence.\n### Aliases: viterbi\n### Keywords: models\n\n### ** Examples\n\n# See the help for logLikHmm() for how to generate y.num and y.let.\n## Not run: \n##D fit.num <- hmm(y.num,K=2,verb=TRUE,keep.y=TRUE)\n##D v.1 <- viterbi(model=fit.num)\n##D rownames(R) <- 1:5 # Avoids a (harmless) warning.\n##D v.2 <- viterbi(y.num,tpm=P,Rho=R)\n##D # P and R as in the help for logLikHmm() and for sp().\n##D \n##D # Note that the order of the states has gotten swapped; 3-v.1[[1]]\n##D # is identical to v.2[[1]]; for other k = 2, ..., 20, 3-v.1[[k]]\n##D # is much more similar to v.2[[k]] than is v.1[[k]].\n##D \n##D fit.let <- hmm(y.let,K=2,verb=TRUE,keep.y=TRUE))\n##D v.3 <- viterbi(model=fit.let)\n##D rownames(R) <- letters[1:5]\n##D v.4 <- viterbi(y.let,tpm=P,Rho=R)\n## End(Not run)\n\n\n"} {"package":"hmm.discnp","topic":"weissData","snippet":"### Name: weissData\n### Title: Data from \"An Introduction to Discrete-Valued Time Series\"\n### Aliases: weissData Bovine Cryptosporidiosis Downloads EricssonB_Jul2\n### FattyLiver FattyLiver2 goldparticle380 Hanta InfantEEGsleepstates IPs\n### LegionnairesDisease OffshoreRigcountsAlaska PriceStability Strikes\n### WoodPeweeSong\n### Keywords: datasets\n\n### ** Examples\n\n## Not run: \n##D fit1 <- hmm(WoodPeweeSong,K=2,verbose=TRUE)\n##D # EM converges in 6 steps --- suspicious.\n##D set.seed(321)\n##D fit2 <- hmm(WoodPeweeSong,K=2,verbose=TRUE,rand.start=list(tpm=TRUE,Rho=TRUE))\n##D # 52 steps --- note the huge difference between fit1$log.like and fit2$log.like!\n##D set.seed(321)\n##D fit3 <- hmm(WoodPeweeSong,K=2,verbose=TRUE,method=\"bf\",\n##D rand.start=list(tpm=TRUE,Rho=TRUE))\n##D # log likelihood essentially the same as for fit2\n## End(Not run)\n\n\n"} {"package":"GAabbreviate","topic":"GAabbreviate","snippet":"### Name: GAabbreviate\n### Title: Abbreviating items (from questionnaire or other) measures using\n### Genetic Algorithms (GAs)\n### Aliases: GAabbreviate\n### Keywords: optimize multivariate survey\n\n### ** Examples\n\n### Example using random generated data\nnsubject = 100\nnitems = 15\nset.seed(123)\nitems = matrix(sample(1:5, nsubject*nitems, replace = TRUE), \n nrow = nsubject, ncol = nitems)\nscales = cbind(rowSums(items[,1:10]), rowSums(items[,11:15]))\n\nGAA = GAabbreviate(items, scales, itemCost = 0.01, maxItems = 5, \n popSize = 50, maxiter = 300, run = 100)\nplot(GAA)\nsummary(GAA)\n# more info can be retrieved using\nGAA$best\nGAA$measure\n\n\n"} {"package":"extremevalues","topic":"evGui","snippet":"### Name: evGui\n### Title: GUI to explore options and results of the \"extremevalues\"\n### package\n### Aliases: evGui\n### Keywords: outliers\n\n### ** Examples\n\n## Not run: \n##D y <- rnorm(100)\n##D evGui(y)\n##D \n## End(Not run)\n\n\n"} {"package":"extremevalues","topic":"fitNormal","snippet":"### Name: fitFunctions\n### Title: Fit model distributions\n### Aliases: fitNormal fitLognormal fitExponential fitPareto fitWeibull\n### Keywords: internal\n\n### ** Examples\n\ny = 10^rnorm(50);\nL <- getOutliers(y,rho=0.5);\n\n\n"} {"package":"extremevalues","topic":"getNormalLimit","snippet":"### Name: getLimit\n### Title: Determine outlier limit\n### Aliases: getNormalLimit getLognormalLimit getExponentialLimit\n### getParetoLimit getWeibullLimit\n### Keywords: internal\n\n### ** Examples\n\ny <- sort(exp(rnorm(100)));\np <- seq(1,100)/100;\nII <- seq(10,90)\nL <- getExponentialLimit(y[II],p[II],100,1.0);\n\n\n"} {"package":"extremevalues","topic":"getOutliers","snippet":"### Name: getOutliers\n### Title: Detect outliers\n### Aliases: getOutliers getOutliersI getOutliersII\n\n### ** Examples\n\ny <- rlnorm(100)\ny <- c(0.1*min(y),y,10*max(y))\nK <- getOutliers(y,method=\"I\",distribution=\"lognormal\")\nL <- getOutliers(y,method=\"II\",distribution=\"lognormal\")\npar(mfrow=c(1,2))\noutlierPlot(y,K,mode=\"qq\")\noutlierPlot(y,L,mode=\"residual\")\n\n\n"} {"package":"extremevalues","topic":"qqLognormalLimit","snippet":"### Name: getQQLimit\n### Title: Determine outlier limit\n### Aliases: qqLognormalLimit qqExponentialLimit qqParetoLimit\n### qqWeibullLimit qqNormalLimit\n### Keywords: internal\n\n### ** Examples\n\ny <- sort(exp(rnorm(100)));\np <- seq(1,100)/1000;\nL <- qqExponentialLimit(y,p,seq(10,90),0.05);\n\n\n"} {"package":"extremevalues","topic":"invErf","snippet":"### Name: invErf\n### Title: Inverse error function\n### Aliases: invErf\n\n### ** Examples\n\nx <-seq(-0.99,0.99,0.01);\nplot(x,invErf(x),'l');\n\n\n"} {"package":"extremevalues","topic":"outlierPlot","snippet":"### Name: outlierPlot\n### Title: Plot results of outlierdetection\n### Aliases: outlierPlot qqFitPlot plotMethodII\n\n### ** Examples\n\ny <- rlnorm(100)\ny <- c(0.1*min(y),y,10*max(y))\nK <- getOutliers(y,method=\"I\",distribution=\"lognormal\")\nL <- getOutliers(y,method=\"II\",distribution=\"lognormal\")\npar(mfrow=c(1,2))\noutlierPlot(y,K,mode=\"qq\")\noutlierPlot(y,L,mode=\"residual\")\n\n\n"} {"package":"extremevalues","topic":"dpareto","snippet":"### Name: pareto\n### Title: Pareto distribution\n### Aliases: dpareto qpareto rpareto\n\n### ** Examples\n\nq <- qpareto(0.5);\n\n\n"} {"package":"tglkmeans","topic":"TGL_kmeans","snippet":"### Name: TGL_kmeans\n### Title: kmeans++ with return value similar to R kmeans\n### Aliases: TGL_kmeans\n\n### ** Examples\n\n## Don't show: \ntglkmeans.set_parallel(1)\n## End(Don't show)\n\n# create 5 clusters normally distributed around 1:5\nd <- simulate_data(n = 100, sd = 0.3, nclust = 5, dims = 2, add_true_clust = FALSE)\nhead(d)\n\n# cluster\nkm <- TGL_kmeans(d, k = 5, \"euclid\", verbose = TRUE)\nnames(km)\nkm$centers\nhead(km$cluster)\nkm$size\n\n\n"} {"package":"tglkmeans","topic":"TGL_kmeans_tidy","snippet":"### Name: TGL_kmeans_tidy\n### Title: TGL kmeans with 'tidy' output\n### Aliases: TGL_kmeans_tidy\n\n### ** Examples\n\n## Don't show: \ntglkmeans.set_parallel(1)\n## End(Don't show)\n\n# create 5 clusters normally distributed around 1:5\nd <- simulate_data(n = 100, sd = 0.3, nclust = 5, dims = 2, add_true_clust = FALSE)\nhead(d)\n\n# cluster\nkm <- TGL_kmeans_tidy(d, k = 5, \"euclid\", verbose = TRUE)\nkm\n\n\n"} {"package":"tglkmeans","topic":"simulate_data","snippet":"### Name: simulate_data\n### Title: Simulate normal data for kmeans tests\n### Aliases: simulate_data\n\n### ** Examples\n\nsimulate_data(n = 100, sd = 0.3, nclust = 5, dims = 2)\n\n# add 20% missing data\nsimulate_data(n = 100, sd = 0.3, nclust = 5, dims = 2, frac_na = 0.2)\n\n\n"} {"package":"tglkmeans","topic":"tglkmeans.set_parallel","snippet":"### Name: tglkmeans.set_parallel\n### Title: Set parallel threads\n### Aliases: tglkmeans.set_parallel\n\n### ** Examples\n\n## No test: \ntglkmeans.set_parallel(8)\n## End(No test)\n\n\n"} {"package":"dynamAedes","topic":"dynamAedes.m","snippet":"### Name: dynamAedes.m\n### Title: Life cycle simulation of _Aedes_ mosquitoes\n### Aliases: dynamAedes.m\n\n### ** Examples\n\n## No test: \n## Run dynamAedes at local scale for 5 days\n# Make a toy temperature time series\nw <- matrix(seq(20,25,length.out=5),ncol=5)*1000\n# Run the model\n\tdynamAedes.m(\n\tspecies=\"koreicus\", \n\tscale=\"ws\",\n\tintro.eggs=10, \n\tjhwv=2, \n\ttemps.matrix=w, \n\tstartd=\"2021-06-21\", \n\tendd=\"2021-06-25\",\n\tlat=42,\n\tlong=8,\n\tn.clusters=1, \n\titer=1,\n\tcompressed.output=TRUE)\n## End(No test)\n\n\n"} {"package":"BeQut","topic":"dataLong","snippet":"### Name: dataLong\n### Title: dataLong\n### Aliases: dataLong\n\n### ** Examples\n\ndata(dataLong)\n\n\n"} {"package":"BeQut","topic":"deviance","snippet":"### Name: deviance\n### Title: 'deviance' returns the deviance based on the conditional\n### likelihood associated with the survival part.\n### Aliases: deviance\n\n### ** Examples\n\n\n## No test: \n#---- load data\ndata(dataLong)\n\n#---- Fit quantile regression joint model for the median\nqrjm_50 <- qrjm(formFixed = y ~ visit,\n formRandom = ~ visit,\n formGroup = ~ ID,\n formSurv = survival::Surv(time, event) ~ X1 + X2,\n survMod = \"weibull\",\n param = \"value\",\n timeVar= \"visit\",\n data = dataLong,\n save_va = TRUE,\n parallel = FALSE,\n tau = 0.5)\n\ndeviance(qrjm_50, M=200)\n## End(No test)\n\n\n\n"} {"package":"BeQut","topic":"lqm","snippet":"### Name: lqm\n### Title: 'lqm' fits linear quantile regression model\n### Aliases: lqm\n\n### ** Examples\n\n\n## No test: \n#---- Use data\ndata(wave)\n\n#---- Fit regression model for the first quartile\nlqm_025 <- lqm(formula = h110d~vent_vit_moy,\n data = wave,\n n.iter = 1000,\n n.burnin = 500,\n tau = 0.25)\n\n#---- Get the posterior mean of parameters\nlqm_025$mean\n\n#---- Visualize the trace for beta parameters\njagsUI::traceplot(lqm_025$out_jagsUI, parameters = \"beta\" )\n\n#---- Summary of output\nsummary(lqm_025)\n## End(No test)\n\n\n\n"} {"package":"BeQut","topic":"lqmm","snippet":"### Name: lqmm\n### Title: 'lqmm' fits linear quantile mixed model\n### Aliases: lqmm\n\n### ** Examples\n\n\n## No test: \n#---- Use dataLong dataset\ndata(dataLong)\n\n#---- Fit regression model for the first quartile\nlqmm_075 <- lqmm(formFixed = y ~ visit,\n formRandom = ~ visit,\n formGroup = ~ ID,\n data = dataLong,\n tau = 0.75,\n n.iter = 10000,\n n.burnin = 1000)\n\n#---- Get the posterior means\nlqmm_075$mean\n\n#---- Visualize the trace for beta parameters\njagsUI::traceplot(lqmm_075$out_jagsUI, parameters = \"beta\")\n\n#---- Summary of output\nsummary(lqmm_075)\n## End(No test)\n\n\n\n"} {"package":"BeQut","topic":"qrjm","snippet":"### Name: qrjm\n### Title: 'qrjm' fits quantile regression joint model\n### Aliases: qrjm\n\n### ** Examples\n\n\n## No test: \n#---- load data\ndata(dataLong)\n\n#---- Fit quantile regression joint model for the first quartile\nqrjm_75 <- qrjm(formFixed = y ~ visit,\n formRandom = ~ visit,\n formGroup = ~ ID,\n formSurv = Surv(time, event) ~ X1 + X2,\n survMod = \"weibull\",\n param = \"value\",\n timeVar= \"visit\",\n data = dataLong,\n tau = 0.75)\n\n#---- Visualize the trace for beta parameters\njagsUI::traceplot(qrjm_75$out_jagsUI, parameters = \"beta\")\n\n#---- Get the estimated coefficients: posterior means\nqrjm_75$mean\n\n#---- Summary of output\nsummary(qrjm_75)\n## End(No test)\n\n\n\n"} {"package":"BeQut","topic":"wave","snippet":"### Name: wave\n### Title: Data of wave\n### Aliases: wave\n\n### ** Examples\n\ndata(wave)\n\n\n"} {"package":"varycoef","topic":"GLS_chol","snippet":"### Name: GLS_chol\n### Title: GLS Estimate using Cholesky Factor\n### Aliases: GLS_chol GLS_chol.spam.chol.NgPeyton GLS_chol.matrix\n\n### ** Examples\n\n# generate data\nn <- 10\nX <- cbind(1, 20+1:n)\ny <- rnorm(n)\nA <- matrix(runif(n^2)*2-1, ncol=n)\nSigma <- t(A) %*% A\n# two possibilities\n## using standard Cholesky decomposition\nR_mat <- chol(Sigma); str(R_mat)\nmu_mat <- GLS_chol(R_mat, X, y)\n## using spam\nR_spam <- chol(spam::as.spam(Sigma)); str(R_spam)\nmu_spam <- GLS_chol(R_spam, X, y)\n# should be identical to the following\nmu <- solve(crossprod(X, solve(Sigma, X))) %*%\n crossprod(X, solve(Sigma, y))\n## check\nabs(mu - mu_mat)\nabs(mu - mu_spam)\n\n\n"} {"package":"varycoef","topic":"SVC_mle","snippet":"### Name: SVC_mle\n### Title: MLE of SVC model\n### Aliases: SVC_mle SVC_mle.default SVC_mle.formula\n\n### ** Examples\n\n## ---- toy example ----\n## We use the sampled, i.e., one dimensional SVCs\nstr(SVCdata)\n# sub-sample data to have feasible run time for example\nset.seed(123)\nid <- sample(length(SVCdata$locs), 50)\n\n## SVC_mle call with matrix arguments\nfit <- with(SVCdata, SVC_mle(\n y[id], X[id, ], locs[id], \n control = SVC_mle_control(profileLik = TRUE, cov.name = \"mat32\")))\n\n## SVC_mle call with formula\ndf <- with(SVCdata, data.frame(y = y[id], X = X[id, -1]))\nfit <- SVC_mle(\n y ~ X, data = df, locs = SVCdata$locs[id], \n control = SVC_mle_control(profileLik = TRUE, cov.name = \"mat32\")\n)\nclass(fit)\n\nsummary(fit)\n\n## No test: \n## ---- real data example ----\nrequire(sp)\n## get data set\ndata(\"meuse\", package = \"sp\")\n\n# construct data matrix and response, scale locations\ny <- log(meuse$cadmium)\nX <- model.matrix(~1+dist+lime+elev, data = meuse)\nlocs <- as.matrix(meuse[, 1:2])/1000\n\n\n## starting MLE\n# the next call takes a couple of seconds\nfit <- SVC_mle(\n y = y, X = X, locs = locs,\n # has 4 fixed effects, but only 3 random effects (SVC)\n # elev is missing in SVC\n W = X[, 1:3],\n control = SVC_mle_control(\n # inital values for 3 SVC\n # 7 = (3 * 2 covariance parameters + nugget)\n init = c(rep(c(0.4, 0.2), 3), 0.2),\n profileLik = TRUE\n )\n)\n\n## summary and residual output\nsummary(fit)\nplot(fit)\n\n## predict\n# new locations\nnewlocs <- expand.grid(\n x = seq(min(locs[, 1]), max(locs[, 1]), length.out = 30),\n y = seq(min(locs[, 2]), max(locs[, 2]), length.out = 30))\n# predict SVC for new locations\nSVC <- predict(fit, newlocs = as.matrix(newlocs))\n# visualization\nsp.SVC <- SVC\ncoordinates(sp.SVC) <- ~loc_1+loc_2\nspplot(sp.SVC, colorkey = TRUE)\n## End(No test)\n\n\n"} {"package":"varycoef","topic":"SVC_mle_control","snippet":"### Name: SVC_mle_control\n### Title: Set Parameters for 'SVC_mle'\n### Aliases: SVC_mle_control SVC_mle_control.default\n### SVC_mle_control.SVC_mle\n\n### ** Examples\n\ncontrol <- SVC_mle_control(init = rep(0.3, 10))\n# or\ncontrol <- SVC_mle_control()\ncontrol$init <- rep(0.3, 10)\n\n## No test: \n# Code for setting up parallel computing\nrequire(parallel)\n# exchange number of nodes (1) for detectCores()-1 or appropriate number\ncl <- makeCluster(1, setup_strategy = \"sequential\")\nclusterEvalQ(\n cl = cl,\n {\n library(spam)\n library(varycoef)\n })\n# use this list for parallel argument in SVC_mle_control\nparallel.control <- list(cl = cl, forward = TRUE, loginfo = TRUE)\n# SVC_mle goes here ...\n# DO NOT FORGET TO STOP THE CLUSTER!\nstopCluster(cl); rm(cl)\n## End(No test)\n\n\n"} {"package":"varycoef","topic":"SVC_selection_control","snippet":"### Name: SVC_selection_control\n### Title: SVC Selection Parameters\n### Aliases: SVC_selection_control\n\n### ** Examples\n\n# Initializing parameters and switching logLik to FALSE\nselection_control <- SVC_selection_control(\n CD.conv = list(N = 20L, delta = 1e-06, logLik = FALSE)\n)\n# or\nselection_control <- SVC_selection_control()\nselection_control$CD.conv$logLik <- FALSE\n\n\n\n"} {"package":"varycoef","topic":"check_cov_lower","snippet":"### Name: check_cov_lower\n### Title: Check Lower Bound of Covariance Parameters\n### Aliases: check_cov_lower\n\n### ** Examples\n\n# first one is true, all other are false\ncheck_cov_lower(c(0.1, 0, 0.2, 1, 0.2), q = 2)\ncheck_cov_lower(c(0 , 0, 0.2, 1, 0.2), q = 2)\ncheck_cov_lower(c(0.1, 0, 0.2, 1, 0 ), q = 2)\ncheck_cov_lower(c(0.1, 0, 0.2, -1, 0 ), q = 2)\n\n\n"} {"package":"varycoef","topic":"plot.SVC_mle","snippet":"### Name: plot.SVC_mle\n### Title: Plotting Residuals of 'SVC_mle' model\n### Aliases: plot.SVC_mle\n\n### ** Examples\n\n#' ## ---- toy example ----\n## sample data\n# setting seed for reproducibility\nset.seed(123)\nm <- 7\n# number of observations\nn <- m*m\n# number of SVC\np <- 3\n# sample data\ny <- rnorm(n)\nX <- matrix(rnorm(n*p), ncol = p)\n# locations on a regular m-by-m-grid\nlocs <- expand.grid(seq(0, 1, length.out = m),\n seq(0, 1, length.out = m))\n\n## preparing for maximum likelihood estimation (MLE)\n# controls specific to MLE\ncontrol <- SVC_mle_control(\n # initial values of optimization\n init = rep(0.1, 2*p+1),\n # using profile likelihood\n profileLik = TRUE\n)\n\n# controls specific to optimization procedure, see help(optim)\nopt.control <- list(\n # number of iterations (set to one for demonstration sake)\n maxit = 1,\n # tracing information\n trace = 6\n)\n\n## starting MLE\nfit <- SVC_mle(y = y, X = X, locs = locs,\n control = control,\n optim.control = opt.control)\n\n## output: convergence code equal to 1, since maxit was only 1\nsummary(fit)\n\n## plot residuals\n# only QQ-plot\nplot(fit, which = 2)\n\n# two plots next to each other\noldpar <- par(mfrow = c(1, 2))\nplot(fit)\npar(oldpar)\n\n\n\n"} {"package":"varycoef","topic":"predict.SVC_mle","snippet":"### Name: predict.SVC_mle\n### Title: Prediction of SVCs (and response variable)\n### Aliases: predict.SVC_mle\n\n### ** Examples\n\n## ---- toy example ----\n## We use the sampled, i.e., one dimensional SVCs\nstr(SVCdata)\n# sub-sample data to have feasible run time for example\nset.seed(123)\nid <- sample(length(SVCdata$locs), 50)\n\n## SVC_mle call with matrix arguments\nfit_mat <- with(SVCdata, SVC_mle(\n y[id], X[id, ], locs[id], \n control = SVC_mle_control(profileLik = TRUE, cov.name = \"mat32\")))\n\n## SVC_mle call with formula\ndf <- with(SVCdata, data.frame(y = y[id], X = X[id, -1]))\nfit_form <- SVC_mle(\n y ~ X, data = df, locs = SVCdata$locs[id], \n control = SVC_mle_control(profileLik = TRUE, cov.name = \"mat32\")\n)\n\n## prediction\n\n# predicting SVCs\npredict(fit_mat, newlocs = 1:2)\npredict(fit_form, newlocs = 1:2)\n\n# predicting SVCs and response providing new covariates\npredict(\n fit_mat, \n newX = matrix(c(1, 1, 3, 4), ncol = 2), \n newW = matrix(c(1, 1, 3, 4), ncol = 2), \n newlocs = 1:2\n)\npredict(fit_form, newdata = data.frame(X = 3:4), newlocs = 1:2)\n\n\n\n"} {"package":"varycoef","topic":"sample_SVCdata","snippet":"### Name: sample_SVCdata\n### Title: Sample Function for GP-based SVC Model for Given Locations\n### Aliases: sample_SVCdata\n\n### ** Examples\n\nset.seed(123)\n# SVC parameters\n(df.pars <- data.frame(\n var = c(2, 1),\n scale = c(3, 1),\n mean = c(1, 2)))\n# nugget standard deviation\ntau <- 0.5\n\n# sample locations\ns <- sort(runif(500, min = 0, max = 10))\nSVCdata <- sample_SVCdata(\n df.pars = df.pars, nugget.sd = tau, locs = s, cov.name = \"mat32\"\n)\n\n\n"} {"package":"varycoef","topic":"varycoef","snippet":"### Name: varycoef\n### Title: varycoef: Modeling Spatially Varying Coefficients\n### Aliases: varycoef\n\n### ** Examples\n\nvignette(\"manual\", package = \"varycoef\")\nmethods(class = \"SVC_mle\")\n\n\n\n"} {"package":"distrom","topic":"dmr","snippet":"### Name: dmr\n### Title: Distributed Multinomial Regression\n### Aliases: dmr distrom predict.dmr coef.dmr\n\n### ** Examples\n\n\nlibrary(MASS)\ndata(fgl)\n\n## make your cluster \n## FORK is faster but memory heavy, and doesn't work on windows.\ncl <- makeCluster(2,type=ifelse(.Platform$OS.type==\"unix\",\"FORK\",\"PSOCK\")) \nprint(cl)\n\n## fit in parallel\nfits <- dmr(cl, fgl[,1:9], fgl$type, verb=1)\n\n## its good practice stop the cluster once you're done\nstopCluster(cl)\n\n## Individual Poisson model fits and AICc selection\npar(mfrow=c(3,2))\nfor(j in 1:6){\n\tplot(fits[[j]])\n\tmtext(names(fits)[j],font=2,line=2) }\n\n## AICc model selection\nB <- coef(fits)\n\n## Fitted probability by true response\npar(mfrow=c(1,1))\nP <- predict(B, fgl[,1:9], type=\"response\")\nboxplot(P[cbind(1:214,fgl$type)]~fgl$type, \n\tylab=\"fitted prob of true class\")\n\n\n\n\n"} {"package":"distrom","topic":"dmrcoef-class","snippet":"### Name: dmrcoef-class\n### Title: Class '\"dmrcoef\"'\n### Aliases: dmrcoef-class predict,dmrcoef-method\n### Keywords: classes\n\n### ** Examples\nshowClass(\"dmrcoef\")\n\n"} {"package":"glinvci","topic":"clone_model","snippet":"### Name: clone_model\n### Title: Clone a GLInv model\n### Aliases: clone_model clone_model.glinv_gauss clone_model.glinv\n\n### ** Examples\n\nrepar = get_restricted_ou(H=NULL, theta=c(0,0), Sig='diag', lossmiss=NULL)\nmod1 = glinv(tree = ape::rtree(10),\n x0 = c(0,0),\n X = NULL,\n repar = repar)\nmod2 = mod1\nmod3 = clone_model(mod1)\ntraits = matrix(rnorm(20), 2, 10)\nset_tips(mod1, traits)\nprint(has_tipvals(mod1)) # TRUE\nprint(has_tipvals(mod2)) # TRUE\nprint(has_tipvals(mod3)) # FALSE\n\n\n"} {"package":"glinvci","topic":"get_restricted_ou","snippet":"### Name: get_restricted_ou\n### Title: Convenience function for constructing restricted/reparameterised\n### OU parameterisation function.\n### Aliases: get_restricted_ou\n\n### ** Examples\n\n### --- STEP 1: Make an example tree and trait data\nntips = 200\nk = 2 # No. of trait dimensions\ntr = ape::rtree(ntips) \nX = matrix(rnorm(k*ntips), k, ntips)\nx0 = rnorm(k)\n\n### --- STEP 2: Make a model which has unrestricted H, fixed theta and diagonal Sigma_x'.\nrepar = get_restricted_ou(H=NULL, theta=c(3,1), Sig='diag', lossmiss=NULL)\nmod = glinv(tr, x0, X,\n pardims =repar$nparams(k),\n parfns =repar$par,\n parjacs =repar$jac,\n parhess =repar$hess)\n# Actually, to save typing, the following short-cut call is the same as the above:\n# mod = glinv(tr, x0, X, repar=repar)\n\n### --- STEP 3: Use the model as usual, say, we compute the likelihood at a specified parameter.\nH = matrix(c(1,0,0,-1), k)\ntheta = c(3,1)\nsig = matrix(c(0.25,0,0,0.25), k)\nsig_x = t(chol(sig))\nLIK = lik(mod)(c(H, c(0.5,0.5)))\n\n### --- STEP 4: Confirm the restricted model does indeed match the unrestricted.\nmod_unrestricted = glinv(tr, x0, X,\n pardims=nparams_ou(k),\n parfns=oupar,\n parjacs=oujac,\n parhess=ouhess)\nLIK_unrestricted = lik(mod_unrestricted)(c(H,theta,sig_x[lower.tri(sig_x, diag=TRUE)]))\nprint(LIK == LIK_unrestricted)\n# [1] TRUE\n\n### --- STEP 5: Confirm the this is indeed the same as making everything manually\nmod_manual = glinv(tr, x0, X,\n pardims = nparams_ou_fixedtheta_diagSig(k),\n parfns = ou_fixedtheta_diagSig(oupar, theta=c(3,1)),\n parjacs = dou_fixedtheta_diagSig(oujac, theta=c(3,1)),\n parhess = hou_fixedtheta_diagSig(ouhess, theta=c(3,1)))\nLIK_manual = lik(mod_manual)(c(H=H, sig_x=c(0.5,0.5)))\nprint(LIK == LIK_manual)\n# [1] TRUE\n\n\n\n"} {"package":"glinvci","topic":"glinv","snippet":"### Name: glinv\n### Title: Construct an GLInv model with respect to user-specified\n### parametrisation\n### Aliases: glinv print.glinv lik.glinv grad.glinv hess.glinv plot.glinv\n\n### ** Examples\n\n## No test: \n### --- STEP 1: Making an example tree and trait data\nntips = 200\nk = 2 # No. of trait dimensions\ntr = ape::rtree(ntips) \nX = matrix(rnorm(k*ntips), k, ntips)\nx0 = rnorm(k)\n\n### --- STEP 2: Making a model object. We use OU as an example.\n### Assume H is a positively definite diagonal matrix.\nmod = glinv(tr, x0, X,\n parfns = list(ou_logdiagH(ou_haltlost(oupar))),\n pardims = list(nparams_ou_diagH(k)),\n parjacs = list(dou_logdiagH(dou_haltlost(oujac))),\n parhess = list(hou_logdiagH(hou_haltlost(ouhess))))\n\n### --- STEP 3: Try getting the likelihood, gradient etc.\nH = matrix(c(1,0,0,-1), k)\ntheta = c(0,0)\nsig = matrix(c(0.5,0,0,0.5), k)\nsig_x = t(chol(sig))\n# glinvci ALWAYS assumes diagonals of sig_x is in log scale.\ndiag(sig_x) = log(diag(sig_x))\npar_init = c(H=diag(H),theta=theta,sig_x=sig_x[lower.tri(sig_x,diag=TRUE)])\nprint(par_init)\nprint(lik(mod)(par_init))\nprint(grad(mod)(par_init))\nprint(hess(mod)(par_init))\n\n### --- STEP 4: Fitting a model\nfitted = fit(mod, par_init)\nprint(fitted)\n\n### --- STEP 5: Estimating variance-covariance of the MLE\nv_estimate = varest(mod, fitted)\n\n### --- STEP 6: Get marginal confidence intervals\nprint(marginal_ci(v_estimate, lvl=0.95)) \n## End(No test)\n\n\n"} {"package":"glinvci","topic":"glinv_gauss","snippet":"### Name: glinv_gauss\n### Title: Construct an object representing a GLInv model with respect to\n### the underlying Gaussian process parameters.\n### Aliases: glinv_gauss lik.glinv_gauss grad.glinv_gauss hess.glinv_gauss\n### print.glinv_gauss\n\n### ** Examples\n\ntr = ape::rtree(3)\nmodel = glinv_gauss(tr, x0=c(0,0), X=matrix(rnorm(6),2,3))\npar = unlist(\n list(\n list('Phi' = c(1,0,0,1), # Parameters for node #1, a tip\n 'w' = c(-1,1),\n 'V' = c(1,0,1)), # Lower triangular part of a 2D identity matrix\n list('Phi' = c(2,0,0,2), # For node #2, a tip\n 'w' = c(-2,2),\n 'V' = c(2,0,2)),\n list('Phi' = c(3,0,0,3), # For node #3, a tip\n 'w' = c(-3,3),\n 'V' = c(3,0,3)),\n list('Phi' = c(4,0,0,4), # For node #5. Node #4 skipped as it is the root\n 'w' = c(-4,4),\n 'V' = c(4,0,4))\n ))\nprint(par)\nlik(model, par)\ngrad(model, par)\nhess(model, par)\n\n\n"} {"package":"glinvci","topic":"set_tips","snippet":"### Name: set_tips\n### Title: Set trait values at the tip for a 'glinv_gauss' model.\n### Aliases: set_tips set_tips.glinv_gauss set_tips.glinv\n\n### ** Examples\n\ntr = ape::rtree(10)\nmodel = glinv_gauss(tr, x0=c(0,0)) # The `X` argument is implicitly NULL\nmodel2 = model # This is not copied!\ntraits = matrix(rnorm(20), 2, 10)\nset_tips(model, traits)\n\n\n"} {"package":"lorad","topic":"lorad_estimate","snippet":"### Name: lorad_estimate\n### Title: Calculates the LoRaD estimate of the marginal likelihood\n### Aliases: lorad_estimate\n\n### ** Examples\n\nnormals <- rnorm(1000000,0,10)\nprob_normals <- dnorm(normals,0,10,log=TRUE) \nproportions <- rbeta(1000000,1,2)\nprob_proportions <- dbeta(proportions,1,2,log=TRUE)\nlengths <- rgamma(1000000, 10, 1)\nprob_lengths <- dgamma(lengths,10,1,log=TRUE)\nparamsdf <- data.frame(\n normals,prob_normals,\n proportions, prob_proportions,\n lengths, prob_lengths)\ncolumnkey <- c(\n \"normals\"=\"unconstrained\", \n \"prob_normals\"=\"posterior\", \n \"proportions\"=\"proportion\", \n \"prob_proportions\"=\"posterior\", \n \"lengths\"=\"positive\", \n \"prob_lengths\"=\"posterior\")\nresults <- lorad_estimate(paramsdf, columnkey, 0.5, 'random', 0.1)\nlorad_summary(results)\n\n\n\n"} {"package":"lorad","topic":"lorad_summary","snippet":"### Name: lorad_summary\n### Title: Summarize output from 'lorad_estimate()'\n### Aliases: lorad_summary\n\n### ** Examples\n\nnormals <- rnorm(1000000,0,10)\nprob_normals <- dnorm(normals,0,10,log=TRUE) \nparamsdf <- data.frame(normals,prob_normals)\ncolumnkey <- c(\"normals\"=\"unconstrained\", \"prob_normals\"=\"posterior\")\nresults <- lorad_estimate(paramsdf, columnkey, 0.5, 'left', 0.1)\nlorad_summary(results)\n\n\n"} {"package":"JM","topic":"dns","snippet":"### Name: DerivSplines\n### Title: Derivatives and Integrals of B-splines and Natural Cubic splines\n### Aliases: dns dbs ins ibs\n### Keywords: regression\n\n### ** Examples\n\nx <- rnorm(10)\ndns(x, df = 4)\nins(x, df = 4)\n\n\n"} {"package":"JM","topic":"aids","snippet":"### Name: aids\n### Title: Didanosine versus Zalcitabine in HIV Patients\n### Aliases: aids aids.id\n### Keywords: datasets\n\n### ** Examples\n\nsummary(aids.id)\n\n\n"} {"package":"JM","topic":"anova.jointModel","snippet":"### Name: anova\n### Title: Anova Method for Fitted Joint Models\n### Aliases: anova.jointModel\n### Keywords: methods\n\n### ** Examples\n\n## Not run: \n##D # linear mixed model fit without treatment effect\n##D fitLME.null <- lme(sqrt(CD4) ~ obstime, \n##D random = ~ 1 | patient, data = aids)\n##D # cox model fit without treatment effect\n##D fitCOX.null <- coxph(Surv(Time, death) ~ 1, \n##D data = aids.id, x = TRUE)\n##D # joint model fit without treatment effect\n##D fitJOINT.null <- jointModel(fitLME.null, fitCOX.null, \n##D timeVar = \"obstime\", method = \"weibull-PH-aGH\")\n##D \n##D # linear mixed model fit with treatment effect\n##D fitLME.alt <- lme(sqrt(CD4) ~ obstime * drug - drug, \n##D random = ~ 1 | patient, data = aids)\n##D # cox model fit with treatment effect\n##D fitCOX.alt <- coxph(Surv(Time, death) ~ drug, \n##D data = aids.id, x = TRUE)\n##D # joint model fit with treatment effect\n##D fitJOINT.alt <- jointModel(fitLME.alt, fitCOX.alt, timeVar = \"obstime\", \n##D method = \"weibull-PH-aGH\")\n##D \n##D # likelihood ratio test for treatment effect\n##D anova(fitJOINT.null, fitJOINT.alt)\n## End(Not run)\n\n\n"} {"package":"JM","topic":"aucJM","snippet":"### Name: aucJM\n### Title: Time-Dependent AUCs for Joint Models\n### Aliases: aucJM aucJM.jointModel\n### Keywords: methods\n\n### ** Examples\n\n## Not run: \n##D # we construct the composite event indicator (transplantation or death)\n##D pbc2$status2 <- as.numeric(pbc2$status != \"alive\")\n##D pbc2.id$status2 <- as.numeric(pbc2.id$status != \"alive\")\n##D \n##D # we fit the joint model using splines for the subject-specific \n##D # longitudinal trajectories and a spline-approximated baseline\n##D # risk function\n##D lmeFit <- lme(log(serBilir) ~ ns(year, 3),\n##D random = list(id = pdDiag(form = ~ ns(year, 3))), data = pbc2)\n##D survFit <- coxph(Surv(years, status2) ~ drug, data = pbc2.id, x = TRUE)\n##D jointFit <- jointModel(lmeFit, survFit, timeVar = \"year\", \n##D method = \"piecewise-PH-aGH\")\n##D \n##D # AUC using data up to year 5 with horizon at year 8\n##D aucJM(jointFit, pbc2, Tstart = 5, Thoriz = 8)\n## End(Not run)\n\n\n"} {"package":"JM","topic":"coef.jointModel","snippet":"### Name: coef\n### Title: Estimated Coefficients for Joint Models\n### Aliases: coef.jointModel fixef.jointModel\n### Keywords: methods\n\n### ** Examples\n\n## Not run: \n##D # linear mixed model fit\n##D fitLME <- lme(sqrt(CD4) ~ obstime * drug - drug, \n##D random = ~ 1 | patient, data = aids)\n##D # cox model fit\n##D fitCOX <- coxph(Surv(Time, death) ~ drug, data = aids.id, x = TRUE)\n##D \n##D # joint model fit\n##D fitJOINT <- jointModel(fitLME, fitCOX, \n##D timeVar = \"obstime\")\n##D \n##D # fixed effects for the longitudinal process\n##D fixef(fitJOINT)\n##D \n##D # fixed effects + random effects estimates for the longitudinal \n##D # process\n##D coef(fitJOINT)\n##D \n##D # fixed effects for the event process\n##D fixef(fitJOINT, process = \"Event\")\n##D coef(fitJOINT, process = \"Event\")\n## End(Not run)\n\n\n"} {"package":"JM","topic":"crLong","snippet":"### Name: crLong\n### Title: Transform Competing Risks Data in Long Format\n### Aliases: crLong\n### Keywords: methods\n\n### ** Examples\n\nhead(crLong(pbc2.id, \"status\", \"alive\"))\n\n\n"} {"package":"JM","topic":"dynCJM","snippet":"### Name: dynCJM\n### Title: A Dynamic Discrimination Index for Joint Models\n### Aliases: dynCJM dynCJM.jointModel\n### Keywords: methods\n\n### ** Examples\n\n## Not run: \n##D # we construct the composite event indicator (transplantation or death)\n##D pbc2$status2 <- as.numeric(pbc2$status != \"alive\")\n##D pbc2.id$status2 <- as.numeric(pbc2.id$status != \"alive\")\n##D \n##D # we fit the joint model using splines for the subject-specific \n##D # longitudinal trajectories and a spline-approximated baseline\n##D # risk function\n##D lmeFit <- lme(log(serBilir) ~ ns(year, 3),\n##D random = list(id = pdDiag(form = ~ ns(year, 3))), data = pbc2)\n##D survFit <- coxph(Surv(years, status2) ~ drug, data = pbc2.id, x = TRUE)\n##D jointFit <- jointModel(lmeFit, survFit, timeVar = \"year\", \n##D method = \"piecewise-PH-aGH\")\n##D \n##D # dynamic discrimination index up to year 10 using a two-year interval \n##D dynCJM(jointFit, pbc2, Dt = 2, t.max = 10)\n## End(Not run)\n\n\n"} {"package":"JM","topic":"fitted.jointModel","snippet":"### Name: fitted\n### Title: Fitted Values for Joint Models\n### Aliases: fitted.jointModel\n### Keywords: methods\n\n### ** Examples\n\n## Not run: \n##D # linear mixed model fit\n##D fitLME <- lme(log(serBilir) ~ drug * year, \n##D random = ~ 1 | id, data = pbc2)\n##D # survival regression fit\n##D fitSURV <- survreg(Surv(years, status2) ~ drug, \n##D data = pbc2.id, x = TRUE)\n##D # joint model fit, under the (default) Weibull model\n##D fitJOINT <- jointModel(fitLME, fitSURV, timeVar = \"year\")\n##D \n##D # fitted for the longitudinal process\n##D head(cbind(\n##D \"Marg\" = fitted(fitJOINT), \n##D \"Subj\" = fitted(fitJOINT, type = \"Subject\")\n##D ))\n##D \n##D # fitted for the event process - survival function\n##D head(cbind(\n##D \"Marg\" = fitted(fitJOINT, process = \"Ev\"), \n##D \"Subj\" = fitted(fitJOINT, process = \"Ev\", type = \"Subject\")\n##D ))\n##D \n##D # fitted for the event process - cumulative hazard function\n##D head(cbind(\n##D \"Marg\" = fitted(fitJOINT, process = \"Ev\", \n##D scale = \"cumulative-Hazard\"), \n##D \"Subj\" = fitted(fitJOINT, process = \"Ev\", type = \"Subject\", \n##D scale = \"cumulative-Hazard\")\n##D ))\n## End(Not run)\n\n\n"} {"package":"JM","topic":"jointModel","snippet":"### Name: jointModel\n### Title: Joint Models for Longitudinal and Survival Data\n### Aliases: jointModel\n### Keywords: multivariate regression\n\n### ** Examples\n\n## Not run: \n##D # linear mixed model fit (random intercepts)\n##D fitLME <- lme(log(serBilir) ~ drug * year, random = ~ 1 | id, data = pbc2)\n##D # survival regression fit\n##D fitSURV <- survreg(Surv(years, status2) ~ drug, data = pbc2.id, x = TRUE)\n##D # joint model fit, under the (default) Weibull model\n##D fitJOINT <- jointModel(fitLME, fitSURV, timeVar = \"year\")\n##D fitJOINT\n##D summary(fitJOINT)\n##D \n##D # linear mixed model fit (random intercepts + random slopes)\n##D fitLME <- lme(log(serBilir) ~ drug * year, random = ~ year | id, data = pbc2)\n##D # survival regression fit\n##D fitSURV <- survreg(Surv(years, status2) ~ drug, data = pbc2.id, x = TRUE)\n##D # joint model fit, under the (default) Weibull model\n##D fitJOINT <- jointModel(fitLME, fitSURV, timeVar = \"year\")\n##D fitJOINT\n##D summary(fitJOINT)\n##D \n##D # we also include an interaction term of log(serBilir) with drug\n##D fitJOINT <- jointModel(fitLME, fitSURV, timeVar = \"year\",\n##D interFact = list(value = ~ drug, data = pbc2.id))\n##D fitJOINT\n##D summary(fitJOINT)\n##D \n##D \n##D # a joint model in which the risk for and event depends both on the true value of\n##D # marker and the true value of the slope of the longitudinal trajectory\n##D lmeFit <- lme(sqrt(CD4) ~ obstime * drug, random = ~ obstime | patient, data = aids)\n##D coxFit <- coxph(Surv(Time, death) ~ drug, data = aids.id, x = TRUE)\n##D \n##D # to fit this model we need to specify the 'derivForm' argument, which is a list\n##D # with first component the derivative of the fixed-effects formula of 'lmeFit' with\n##D # respect to 'obstime', second component the indicator of which fixed-effects \n##D # coefficients correspond to the previous defined formula, third component the \n##D # derivative of the random-effects formula of 'lmeFit' with respect to 'obstime', \n##D # and fourth component the indicator of which random-effects correspond to the \n##D # previous defined formula\n##D dForm <- list(fixed = ~ 1 + drug, indFixed = c(2, 4), random = ~ 1, indRandom = 2)\n##D jointModel(lmeFit, coxFit, timeVar = \"obstime\", method = \"spline-PH-aGH\",\n##D parameterization = \"both\", derivForm = dForm)\n##D \n##D \n##D # Competing Risks joint model\n##D # we first expand the PBC dataset in the competing risks long format\n##D # with two competing risks being death and transplantation\n##D pbc2.idCR <- crLong(pbc2.id, \"status\", \"alive\")\n##D \n##D # we fit the linear mixed model as before\n##D lmeFit.pbc <- lme(log(serBilir) ~ drug * ns(year, 3), \n##D random = list(id = pdDiag(form = ~ ns(year, 3))), data = pbc2)\n##D \n##D # however, for the survival model we need to use the data in the long\n##D # format, and include the competing risks indicator as a stratification\n##D # factor. We also take interactions of the baseline covariates with the\n##D # stratification factor in order to allow the effect of these covariates\n##D # to be different for each competing risk\n##D coxCRFit.pbc <- coxph(Surv(years, status2) ~ (drug + sex)*strata + strata(strata), \n##D data = pbc2.idCR, x = TRUE)\n##D \n##D # the corresponding joint model is fitted simply by including the above\n##D # two submodels as main arguments, setting argument CompRisk to TRUE, \n##D # and choosing as method = \"spline-PH-aGH\". Similarly as above, we also \n##D # include strata as an interaction factor to allow serum bilirubin to \n##D # have a different effect for each of the two competing risks\n##D jmCRFit.pbc <- jointModel(lmeFit.pbc, coxCRFit.pbc, timeVar = \"year\", \n##D method = \"spline-PH-aGH\", \n##D interFact = list(value = ~ strata, data = pbc2.idCR), \n##D CompRisk = TRUE)\n##D summary(jmCRFit.pbc)\n##D \n##D # linear mixed model fit\n##D fitLME <- lme(sqrt(CD4) ~ obstime * drug - drug, \n##D random = ~ 1 | patient, data = aids)\n##D # cox model fit\n##D fitCOX <- coxph(Surv(Time, death) ~ drug, data = aids.id, x = TRUE)\n##D # joint model fit with a spline-approximated baseline hazard function\n##D fitJOINT <- jointModel(fitLME, fitCOX, \n##D timeVar = \"obstime\", method = \"spline-PH-aGH\")\n##D fitJOINT\n##D summary(fitJOINT)\n## End(Not run)\n\n\n"} {"package":"JM","topic":"pbc2","snippet":"### Name: pbc2\n### Title: Mayo Clinic Primary Biliary Cirrhosis Data\n### Aliases: pbc2 pbc2.id\n### Keywords: datasets\n\n### ** Examples\n\nsummary(pbc2.id)\n\n\n"} {"package":"JM","topic":"piecewiseExp.ph","snippet":"### Name: piecewiseExp.ph\n### Title: Proportional Hazards Models with Piecewise Constant Baseline\n### Hazard Function\n### Aliases: piecewiseExp.ph\n### Keywords: multivariate regression\n\n### ** Examples\n\ncoxFit <- coxph(Surv(Time, death) ~ drug, data = aids.id, x = TRUE)\npiecewiseExp.ph(coxFit)\n\n\n"} {"package":"JM","topic":"plot.rocJM","snippet":"### Name: plot.rocJM\n### Title: Plot Method for rocJM Objects\n### Aliases: plot.rocJM\n### Keywords: methods\n\n### ** Examples\n\n## Not run: \n##D fitLME <- lme(sqrt(CD4) ~ obstime + obstime:(drug + AZT + prevOI + gender), \n##D random = ~ obstime | patient, data = aids)\n##D fitSURV <- coxph(Surv(Time, death) ~ drug + AZT + prevOI + gender, \n##D data = aids.id, x = TRUE)\n##D fit.aids <- jointModel(fitLME, fitSURV, timeVar = \"obstime\", \n##D method = \"piecewise-PH-aGH\")\n##D \n##D ND <- aids[aids$patient == \"7\", ]\n##D roc <- rocJM(fit.aids, dt = c(2, 4, 8), ND, idVar = \"patient\")\n##D plot(roc, lwd = 2, legend = TRUE)\n##D plot(roc, type = \"AUC\")\n## End(Not run)\n\n\n"} {"package":"JM","topic":"plot.survfitJM","snippet":"### Name: plot.survfitJM\n### Title: Plot Method for survfitJM Objects\n### Aliases: plot.survfitJM\n### Keywords: methods\n\n### ** Examples\n\n# linear mixed model fit\nfitLME <- lme(sqrt(CD4) ~ obstime + obstime:drug, \n random = ~ 1 | patient, data = aids)\n# cox model fit\nfitCOX <- coxph(Surv(Time, death) ~ drug, data = aids.id, x = TRUE)\n\n# joint model fit\nfitJOINT <- jointModel(fitLME, fitCOX, \n timeVar = \"obstime\", method = \"weibull-PH-aGH\")\n\n# sample of the patients who are still alive\nND <- aids[aids$patient == \"141\", ]\nss <- survfitJM(fitJOINT, newdata = ND, idVar = \"patient\", M = 50)\nplot(ss)\nplot(ss, include.y = TRUE, add.last.time.axis.tick = TRUE, legend = TRUE)\n\n\n"} {"package":"JM","topic":"plot.jointModel","snippet":"### Name: plot\n### Title: Plot Diagnostics for Joint Models\n### Aliases: plot.jointModel\n### Keywords: methods\n\n### ** Examples\n\n## Not run: \n##D # linear mixed model fit\n##D fitLME <- lme(log(serBilir) ~ drug * year, random = ~ 1 | id, data = pbc2)\n##D # survival regression fit\n##D fitSURV <- survreg(Surv(years, status2) ~ drug, data = pbc2.id, x = TRUE)\n##D # joint model fit, under the (default) Weibull model\n##D fitJOINT <- jointModel(fitLME, fitSURV, timeVar = \"year\")\n##D \n##D plot(fitJOINT, 3, add.KM = TRUE, col = \"red\", lwd = 2)\n##D \n##D par(mfrow = c(2, 2))\n##D plot(fitJOINT)\n## End(Not run)\n\n\n"} {"package":"JM","topic":"prederrJM","snippet":"### Name: prederrJM\n### Title: Prediction Errors for Joint Models\n### Aliases: prederrJM prederrJM.jointModel\n### Keywords: methods\n\n### ** Examples\n\n## Not run: \n##D # we construct the composite event indicator (transplantation or death)\n##D pbc2$status2 <- as.numeric(pbc2$status != \"alive\")\n##D pbc2.id$status2 <- as.numeric(pbc2.id$status != \"alive\")\n##D \n##D # we fit the joint model using splines for the subject-specific \n##D # longitudinal trajectories and a spline-approximated baseline\n##D # risk function\n##D lmeFit <- lme(log(serBilir) ~ ns(year, 3),\n##D random = list(id = pdDiag(form = ~ ns(year, 3))), data = pbc2)\n##D survFit <- coxph(Surv(years, status2) ~ drug, data = pbc2.id, x = TRUE)\n##D jointFit <- jointModel(lmeFit, survFit, timeVar = \"year\", \n##D method = \"piecewise-PH-aGH\")\n##D \n##D # prediction error at year 10 using longitudinal data up to year 5 \n##D prederrJM(jointFit, pbc2, Tstart = 5, Thoriz = 10)\n##D prederrJM(jointFit, pbc2, Tstart = 5, Thoriz = 6.5, interval = TRUE)\n## End(Not run)\n\n\n"} {"package":"JM","topic":"predict.jointModel","snippet":"### Name: predict\n### Title: Predictions for Joint Models\n### Aliases: predict.jointModel\n### Keywords: methods\n\n### ** Examples\n\n## Not run: \n##D # linear mixed model fit\n##D fitLME <- lme(log(serBilir) ~ drug * year, \n##D random = ~ year | id, data = pbc2)\n##D # survival regression fit\n##D fitSURV <- survreg(Surv(years, status2) ~ drug, \n##D data = pbc2.id, x = TRUE)\n##D # joint model fit, under the (default) Weibull model\n##D fitJOINT <- jointModel(fitLME, fitSURV, timeVar = \"year\")\n##D \n##D DF <- with(pbc2, expand.grid(drug = levels(drug),\n##D year = seq(min(year), max(year), len = 100)))\n##D Ps <- predict(fitJOINT, DF, interval = \"confidence\", return = TRUE)\n##D require(lattice)\n##D xyplot(pred + low + upp ~ year | drug, data = Ps,\n##D type = \"l\", col = c(2,1,1), lty = c(1,2,2), lwd = 2,\n##D ylab = \"Average log serum Bilirubin\")\n##D \n##D \n##D # Subject-specific predictions\n##D ND <- pbc2[pbc2$id == 2, ]\n##D Ps.ss <- predict(fitJOINT, ND, type = \"Subject\",\n##D interval = \"confidence\", return = TRUE)\n##D require(lattice)\n##D xyplot(pred + low + upp ~ year | id, data = Ps.ss,\n##D type = \"l\", col = c(2,1,1), lty = c(1,2,2), lwd = 2,\n##D ylab = \"Average log serum Bilirubin\")\n## End(Not run)\n\n\n"} {"package":"JM","topic":"prothro","snippet":"### Name: prothro\n### Title: Prednisone versus Placebo in Liver Cirrhosis Patients\n### Aliases: prothro prothros\n### Keywords: datasets\n\n### ** Examples\n\nsummary(prothros)\n\n\n"} {"package":"JM","topic":"ranef.jointModel","snippet":"### Name: ranef\n### Title: Random Effects Estimates for Joint Models\n### Aliases: ranef.jointModel\n### Keywords: methods\n\n### ** Examples\n\n## Not run: \n##D # linear mixed model fit\n##D fitLME <- lme(log(serBilir) ~ drug * year, random = ~ 1 | id, data = pbc2)\n##D # survival regression fit\n##D fitSURV <- survreg(Surv(years, status2) ~ drug, data = pbc2.id, x = TRUE)\n##D \n##D # joint model fit, under the (default) Weibull model\n##D fitJOINT <- jointModel(fitLME, fitSURV, timeVar = \"year\")\n##D ranef(fitJOINT)\n## End(Not run)\n\n\n"} {"package":"JM","topic":"residuals.jointModel","snippet":"### Name: residuals\n### Title: Residuals for Joint Models\n### Aliases: residuals.jointModel\n### Keywords: methods\n\n### ** Examples\n\n## Not run: \n##D # linear mixed model fit\n##D fitLME <- lme(sqrt(CD4) ~ obstime * drug - drug, \n##D random = ~ 1 | patient, data = aids)\n##D # cox model fit\n##D fitCOX <- coxph(Surv(Time, death) ~ drug, data = aids.id, x = TRUE)\n##D \n##D # joint model fit, under the additive log cumulative hazard model\n##D fitJOINT <- jointModel(fitLME, fitCOX, \n##D timeVar = \"obstime\")\n##D \n##D # residuals for the longitudinal outcome\n##D head(cbind(\n##D \"Marginal\" = residuals(fitJOINT),\n##D \"std-Marginal\" = residuals(fitJOINT, type = \"stand-Marginal\"),\n##D \"Subject\" = residuals(fitJOINT, type = \"Subject\"),\n##D \"std-Subject\" = residuals(fitJOINT, type = \"stand-Subject\")\n##D ))\n##D \n##D # residuals for the survival outcome\n##D head(cbind(\n##D \"Martingale\" = residuals(fitJOINT, process = \"Event\", type = \"Martingale\"),\n##D \"CoxSnell\" = residuals(fitJOINT, process = \"Event\", type = \"CoxSnell\")\n##D ))\n## End(Not run)\n\n\n"} {"package":"JM","topic":"rocJM","snippet":"### Name: rocJM\n### Title: Predictive Accuracy Measures for Longitudinal Markers under a\n### Joint Modelling Framework\n### Aliases: rocJM rocJM.jointModel\n### Keywords: methods\n\n### ** Examples\n\n## Not run: \n##D fitLME <- lme(sqrt(CD4) ~ obstime * (drug + AZT + prevOI + gender), \n##D random = ~ obstime | patient, data = aids)\n##D fitSURV <- coxph(Surv(Time, death) ~ drug + AZT + prevOI + gender, \n##D data = aids.id, x = TRUE)\n##D fit.aids <- jointModel(fitLME, fitSURV, timeVar = \"obstime\", \n##D method = \"piecewise-PH-aGH\")\n##D \n##D # the following will take some time to execute...\n##D ND <- aids[aids$patient == \"7\", ]\n##D roc <- rocJM(fit.aids, dt = c(2, 4, 8), ND, idVar = \"patient\")\n##D roc\n## End(Not run)\n\n\n"} {"package":"JM","topic":"simulateJM","snippet":"### Name: simulate\n### Title: Simulate from Joint Models.\n### Aliases: simulateJM simulate.jointModel\n### Keywords: methods\n\n### ** Examples\n\n## Not run: \n##D prothro$t0 <- as.numeric(prothro$time == 0)\n##D lmeFit <- lme(pro ~ treat * (time + t0), random = ~ time | id, data = prothro)\n##D survFit <- coxph(Surv(Time, death) ~ treat, data = prothros, x = TRUE)\n##D jointFit <- jointModel(lmeFit, survFit, timeVar = \"time\", \n##D method = \"weibull-PH-aGH\")\n##D \n##D newData <- simulate(jointFit, nsim = 1, times = seq(0, 11, len = 15))\n##D newData\n## End(Not run)\n\n\n"} {"package":"JM","topic":"summary.weibull.frailty","snippet":"### Name: summary.weibull.frailty\n### Title: Summary Method for weibull.frailty Objects\n### Aliases: summary.weibull.frailty\n### Keywords: methods\n\n### ** Examples\n\nfit <- weibull.frailty(Surv(time, status) ~ age + sex, kidney)\nsummary(fit)\nsummary(fit, TRUE)\n\n\n"} {"package":"JM","topic":"survfitJM","snippet":"### Name: survfitJM\n### Title: Prediction in Joint Models\n### Aliases: survfitJM survfitJM.jointModel\n### Keywords: methods\n\n### ** Examples\n\n# linear mixed model fit\nfitLME <- lme(sqrt(CD4) ~ obstime + obstime:drug, \n random = ~ 1 | patient, data = aids)\n# cox model fit\nfitCOX <- coxph(Surv(Time, death) ~ drug, data = aids.id, x = TRUE)\n\n# joint model fit\nfitJOINT <- jointModel(fitLME, fitCOX, \n timeVar = \"obstime\", method = \"weibull-PH-aGH\")\n\n# sample of the patients who are still alive\nND <- aids[aids$patient == \"141\", ]\nss <- survfitJM(fitJOINT, newdata = ND, idVar = \"patient\", M = 50)\nss\n\n\n"} {"package":"JM","topic":"wald.strata","snippet":"### Name: wald.strata\n### Title: Wald Test for Stratification Factors\n### Aliases: wald.strata\n### Keywords: multivariate regression\n\n### ** Examples\n\n## Not run: \n##D fitLME <- lme(log(serBilir) ~ drug * year - drug, random = ~ year | id, \n##D data = pbc2)\n##D fitSURV <- coxph(Surv(years, status2) ~ drug + strata(hepatomegaly), \n##D data = pbc2.id, x = TRUE)\n##D fit.pbc <- jointModel(fitLME, fitSURV, timeVar = \"year\", method = \"spline-PH-aGH\")\n##D wald.strata(fit.pbc)\n## End(Not run)\n\n\n"} {"package":"JM","topic":"weibull.frailty","snippet":"### Name: weibull.frailty\n### Title: Weibull Model with Gamma Frailties\n### Aliases: weibull.frailty\n### Keywords: multivariate regression\n\n### ** Examples\n\nweibull.frailty(Surv(time, status) ~ age + sex, kidney)\n\n\n"} {"package":"JM","topic":"xtable.jointModel","snippet":"### Name: xtable\n### Title: xtable Method from Joint Models.\n### Aliases: xtable.jointModel\n### Keywords: methods\n\n### ** Examples\n\n## Not run: \n##D require(xtable)\n##D prothro$t0 <- as.numeric(prothro$time == 0)\n##D lmeFit <- lme(pro ~ treat * (time + t0), random = ~ time | id, data = prothro)\n##D survFit <- coxph(Surv(Time, death) ~ treat, data = prothros, x = TRUE)\n##D jointFit <- jointModel(lmeFit, survFit, timeVar = \"time\", \n##D method = \"weibull-PH-aGH\")\n##D \n##D xtable(jointFit, math.style.negative = TRUE)\n## End(Not run)\n\n\n"} {"package":"hashr","topic":"hash","snippet":"### Name: hash\n### Title: Hash R objects to 32bit integers\n### Aliases: hash hash.default hash.character hash.list\n\n### ** Examples\n\n\n# hash some complicated R object (not a list).\nm <- lm(height ~ weight, data=women)\nhash(m)\n\n# hash a character vector element by element:\nx <- c(\"Call any vegetable\"\n , \"and the chances are good\"\n , \"that the vegetable will respond to you\")\nhash(x)\n\n# hash a character vector as one object:\nhash(x, recursive=FALSE)\n\n# hash a list recursively\nL <- strsplit(x,\" \")\nhash(L)\n\n# recursive really means recursive, so nested lists are recursed over:\nL <- list(\n x = 10\n , y = list(\n foo = \"bob\"\n , bar = lm(Sepal.Width ~ Sepal.Length, data=iris)\n )\n)\n\nhash(L)\nhash(L,recursive=FALSE)\n\n\n\n\n\n\n"} {"package":"splash","topic":"calc_daily_evap","snippet":"### Name: calc_daily_evap\n### Title: Calculate daily evaporation fluxes\n### Aliases: calc_daily_evap\n\n### ** Examples\n\nevap <- splash::calc_daily_evap(lat = 37.7,\n n = 172,\n elv = 142,\n y = 2000,\n sf = 1,\n tc = 23.0,\n sw = 0.9)\ncat(sprintf(\"Evaporation values:\\n\"))\ncat(sprintf(\" s: %0.6f Pa/K\\n\", evap$s_pa.k))\ncat(sprintf(\" Lv: %0.6f MJ/kg\\n\", (1e-6) * evap$lv_j.kg))\ncat(sprintf(\" Patm: %0.6f bar\\n\", (1e-5) * evap$patm_pa))\ncat(sprintf(\" pw: %0.6f kg/m^3\\n\", evap$pw_kg.m3))\ncat(sprintf(\" gamma: %0.6f Pa/K\\n\", evap$gam_pa.k))\ncat(sprintf(\" Econ: %0.6f mm^3/J\\n\", (1e9) * evap$econ_m3.j))\ncat(sprintf(\" Cn: %0.6f mm\\n\", evap$cond_mm))\ncat(sprintf(\" rx: %0.6f\\n\", evap$rx))\ncat(sprintf(\" hi: %0.6f degrees\\n\", evap$hi_deg))\ncat(sprintf(\" EET: %0.6f mm\\n\", evap$eet_mm))\ncat(sprintf(\" PET: %0.6f mm\\n\", evap$pet_mm))\ncat(sprintf(\" AET: %0.6f mm\\n\", evap$aet_mm))\n\n\n"} {"package":"splash","topic":"calc_daily_solar","snippet":"### Name: calc_daily_solar\n### Title: Calculate daily solar radiation fluxes\n### Aliases: calc_daily_solar\n\n### ** Examples\n\nsolar <- splash::calc_daily_solar(lat = 37.7,\n n = 172,\n elv = 142,\n y = 2000,\n sf = 1,\n tc = 23.0)\ncat(sprintf(\"Solar values:\\n\"))\ncat(sprintf(\" kn: %d\\n\", solar$kN))\ncat(sprintf(\" nu: %0.6f degrees\\n\", solar$nu_deg))\ncat(sprintf(\" lambda: %0.6f degrees\\n\", solar$lambda_deg))\ncat(sprintf(\" rho: %0.6f\\n\", solar$rho))\ncat(sprintf(\" dr: %0.6f\\n\", solar$dr))\ncat(sprintf(\" delta: %0.6f degrees\\n\", solar$delta_deg))\ncat(sprintf(\" ru: %0.6f\\n\", solar$ru))\ncat(sprintf(\" rv: %0.6f\\n\", solar$rv))\ncat(sprintf(\" rw: %0.6f\\n\", solar$rw))\ncat(sprintf(\" hs: %0.6f degrees\\n\", solar$hs_deg))\ncat(sprintf(\" hn: %0.6f degrees\\n\", solar$hn_deg))\ncat(sprintf(\" tau_o: %0.6f\\n\", solar$tau_o))\ncat(sprintf(\" tau: %0.6f\\n\", solar$tau))\ncat(sprintf(\" Qn: %0.6f mol/m^2\\n\", solar$ppfd_mol.m2))\ncat(sprintf(\" Rnl: %0.6f w/m^2\\n\", solar$rnl_w.m2))\ncat(sprintf(\" Ho: %0.6f MJ/m^2\\n\", (1.0e-6) * solar$ra_j.m2))\ncat(sprintf(\" Hn: %0.6f MJ/m^2\\n\", (1.0e-6) * solar$rn_j.m2))\ncat(sprintf(\" Hnn: %0.6f MJ/m^2\\n\", (1.0e-6) * solar$rnn_j.m2))\n\n\n"} {"package":"splash","topic":"run_one_day","snippet":"### Name: run_one_day\n### Title: Runs SPLASH at a single location for one day\n### Aliases: run_one_day\n\n### ** Examples\n\nsoil <- run_one_day(lat = 37.7,\n elv = 142,\n n = 172,\n y = 2000,\n wn = 75,\n sf = 1,\n tc = 23,\n pn = 5)\ncat(sprintf(\"Soil moisture (run one day):\\n\"))\ncat(sprintf(\" Ho: %0.6f J/m2\\n\", soil$ho))\ncat(sprintf(\" Hn: %0.6f J/m2\\n\", soil$hn))\ncat(sprintf(\" PPFD: %0.6f mol/m2\\n\", soil$ppfd))\ncat(sprintf(\" EET: %0.6f mm/d\\n\", soil$eet))\ncat(sprintf(\" PET: %0.6f mm/d\\n\", soil$pet))\ncat(sprintf(\" AET: %0.6f mm/d\\n\", soil$aet))\ncat(sprintf(\" Cn: %0.6f mm/d\\n\", soil$cond))\ncat(sprintf(\" Wn: %0.6f mm\\n\", soil$wn))\ncat(sprintf(\" RO: %0.6f mm\\n\", soil$ro))\n\n\n"} {"package":"splash","topic":"spin_up","snippet":"### Name: spin_up\n### Title: Calculate daily totals\n### Aliases: spin_up\n\n### ** Examples\n\ndaily_totals <- matrix(data = rep(0, 366), nrow = 366, ncol = 1)\ndaily_totals <- as.data.frame(daily_totals)\nnames(daily_totals) <- c(\"wn\")\nmy_file <- system.file(\"extdata/example_data.csv\", package = \"splash\")\nmy_data <- splash::read_csv(my_file, 2000)\nmy_data$lat_deg <- 37.7\nmy_data$elv_m <- 142\ndaily_totals <- splash::spin_up(my_data, daily_totals)\ncat(sprintf(\"Spin-Up:\\n\"))\nfor (i in seq(from = 1, to = my_data$num_lines, by = 1)) {\n if (i == 1) cat(sprintf(\"Day\\tWn (mm)\\n\"))\n cat(sprintf(\"%d\\t%0.6f\\n\", i, daily_totals$wn[i]))\n}\n\n\n"} {"package":"mixgb","topic":"createNA","snippet":"### Name: createNA\n### Title: Create missing values for a dataset\n### Aliases: createNA\n\n### ** Examples\n\n# Create 30% MCAR data across all variables in a dataset\nwithNA.df <- createNA(data = iris, p = 0.3)\n\n# Create 30% MCAR data in a specified variable in a dataset\nwithNA.df <- createNA(data = iris, var.names = c(\"Sepal.Length\"), p = 0.3)\n\n# Create MCAR data in several specified variables in a dataset\nwithNA.df <- createNA(\n data = iris,\n var.names = c(\"Sepal.Length\", \"Petal.Width\", \"Species\"),\n p = c(0.3, 0.2, 0.1)\n)\n\n\n"} {"package":"mixgb","topic":"data_clean","snippet":"### Name: data_clean\n### Title: Data cleaning\n### Aliases: data_clean\n\n### ** Examples\n\nrawdata <- nhanes3\n\nrawdata[4, 4] <- NaN\nrawdata[5, 5] <- Inf\nrawdata[6, 6] <- -Inf\n\ncleandata <- data_clean(rawdata = rawdata)\n\n\n"} {"package":"mixgb","topic":"impute_new","snippet":"### Name: impute_new\n### Title: Impute new data with a saved 'mixgb' imputer object\n### Aliases: impute_new\n\n### ** Examples\n\nset.seed(2022)\nn <- nrow(nhanes3)\nidx <- sample(1:n, size = round(0.7 * n), replace = FALSE)\ntrain.data <- nhanes3[idx, ]\ntest.data <- nhanes3[-idx, ]\n\nparams <- list(max_depth = 3, subsample = 0.7, nthread = 2)\nmixgb.obj <- mixgb(data = train.data, m = 2, xgb.params = params, nrounds = 10, save.models = TRUE)\n\n# obtain m imputed datasets for train.data\ntrain.imputed <- mixgb.obj$imputed.data\ntrain.imputed\n\n# use the saved imputer to impute new data\ntest.imputed <- impute_new(object = mixgb.obj, newdata = test.data)\ntest.imputed\n\n\n"} {"package":"mixgb","topic":"mixgb","snippet":"### Name: mixgb\n### Title: Multiple imputation through XGBoost\n### Aliases: mixgb\n\n### ** Examples\n\n# obtain m multiply datasets without saving models\nparams <- list(max_depth = 3, subsample = 0.7, nthread = 2)\nmixgb.data <- mixgb(data = nhanes3, m = 2, xgb.params = params, nrounds = 10)\n\n# obtain m multiply imputed datasets and save models for imputing new data later on\nmixgb.obj <- mixgb(data = nhanes3, m = 2, xgb.params = params, nrounds = 10, save.models = TRUE)\n\n\n"} {"package":"mixgb","topic":"mixgb_cv","snippet":"### Name: mixgb_cv\n### Title: Use cross-validation to find the optimal 'nrounds'\n### Aliases: mixgb_cv\n\n### ** Examples\n\nparams <- list(max_depth = 3, subsample = 0.7, nthread = 2)\ncv.results <- mixgb_cv(data = nhanes3, xgb.params = params)\ncv.results$best.nrounds\n\nimputed.data <- mixgb(data = nhanes3, m = 3, xgb.params = params, nrounds = cv.results$best.nrounds)\n\n\n"} {"package":"mixgb","topic":"plot_1num1fac","snippet":"### Name: plot_1num1fac\n### Title: Box plots with points for one numeric variable vs one factor (or\n### integer) variable.\n### Aliases: plot_1num1fac\n\n### ** Examples\n\n# obtain m multiply datasets\nparams <- list(max_depth = 3, subsample = 0.8, nthread = 2)\nimputed.data <- mixgb(data = nhanes3, m = 3, xgb.params = params, nrounds = 30)\n\n# plot the multiply imputed values for variables \"BMPHEAD\" versus \"HSSEX\"\nplot_1num1fac(\n imputation.list = imputed.data, var.num = \"BMPHEAD\", var.fac = \"HSSEX\",\n original.data = nhanes3\n)\n\n\n"} {"package":"mixgb","topic":"plot_1num2fac","snippet":"### Name: plot_1num2fac\n### Title: Box plots with overlaying data points for a numeric variable vs\n### a factor condition on another factor\n### Aliases: plot_1num2fac\n\n### ** Examples\n\n# create some extra missing values in factor variables \"HSSEX\" and \"DMARETHN\"\nnhanes3_NA <- createNA(nhanes3, var.names = c(\"HSSEX\", \"DMARETHN\"), p = 0.1)\n\n# obtain m multiply datasets\nparams <- list(max_depth = 3, subsample = 0.8, nthread = 2)\nimputed.data <- mixgb(data = nhanes3_NA, m = 3, xgb.params = params, nrounds = 30)\n\n# plot the multiply imputed values for variables \"BMPRECUM\" versus \"HSSEX\" conditional on \"DMARETHN\"\nplot_1num2fac(\n imputation.list = imputed.data, var.fac = \"HSSEX\", var.num = \"BMPRECUM\",\n con.fac = \"DMARETHN\", original.data = nhanes3_NA\n)\n\n\n"} {"package":"mixgb","topic":"plot_2fac","snippet":"### Name: plot_2fac\n### Title: Bar plots for two imputed factor variables\n### Aliases: plot_2fac\n\n### ** Examples\n\n# create some extra missing values in factor variables \"HSSEX\" and \"DMARETHN\"\nnhanes3_NA <- createNA(nhanes3, var.names = c(\"HSSEX\", \"DMARETHN\"), p = 0.1)\n\n# obtain m multiply datasets\nparams <- list(max_depth = 3, subsample = 0.8, nthread = 2)\nimputed.data <- mixgb(data = nhanes3_NA, m = 3, xgb.params = params, nrounds = 30)\n\n\n# plot the multiply imputed values for variables \"HSSEX\" versus \"DMARETHN\"\nplot_2fac(\n imputation.list = imputed.data, var.fac1 = \"DMARETHN\", var.fac2 = \"HSSEX\",\n original.data = nhanes3_NA\n)\n\n\n"} {"package":"mixgb","topic":"plot_2num","snippet":"### Name: plot_2num\n### Title: Scatter plots for two imputed numeric variables\n### Aliases: plot_2num\n\n### ** Examples\n\n# obtain m multiply datasets\nparams <- list(max_depth = 3, subsample = 0.8, nthread = 2)\nimputed.data <- mixgb(data = nhanes3, m = 3, xgb.params = params, nrounds = 30)\n\n# plot the multiply imputed values for variables \"BMPRECUM\" versus \"BMPHEAD\"\nplot_2num(\n imputation.list = imputed.data, var.x = \"BMPHEAD\", var.y = \"BMPRECUM\",\n original.data = nhanes3\n)\n\n\n"} {"package":"mixgb","topic":"plot_2num1fac","snippet":"### Name: plot_2num1fac\n### Title: Scatter plots for two imputed numeric variables conditional on a\n### factor\n### Aliases: plot_2num1fac\n\n### ** Examples\n\n# obtain m multiply datasets\nparams <- list(max_depth = 3, subsample = 0.8, nthread = 2)\nimputed.data <- mixgb(data = nhanes3, m = 3, xgb.params = params, nrounds = 30)\n\n# plot the multiply imputed values for variables \"BMPRECUM\" versus \"BMPHEAD\" conditional on \"HSSEX\"\nplot_2num1fac(\n imputation.list = imputed.data, var.x = \"BMPHEAD\", var.y = \"BMPRECUM\",\n con.fac = \"HSSEX\", original.data = nhanes3\n)\n\n\n"} {"package":"mixgb","topic":"plot_bar","snippet":"### Name: plot_bar\n### Title: Bar plots for multiply imputed values for a single factor\n### variable\n### Aliases: plot_bar\n\n### ** Examples\n\n# create some extra missing values in a factor variable \"HSSEX\" (originally fully observed)\nnhanes3_NA <- createNA(nhanes3, var.names = \"HSSEX\", p = 0.1)\n\n# obtain m multiply datasets\nparams <- list(max_depth = 3, subsample = 0.8, nthread = 2)\nimputed.data <- mixgb(data = nhanes3_NA, m = 3, xgb.params = params, nrounds = 30)\n\n# plot the multiply imputed values for variable \"HSSEX\"\nplot_bar(\n imputation.list = imputed.data, var.name = \"HSSEX\",\n original.data = nhanes3_NA\n)\n\n\n"} {"package":"mixgb","topic":"plot_box","snippet":"### Name: plot_box\n### Title: Boxplots with data points for multiply imputed values for a\n### single numeric variable\n### Aliases: plot_box\n\n### ** Examples\n\n# obtain m multiply datasets\nparams <- list(max_depth = 3, subsample = 0.8, nthread = 2)\nimputed.data <- mixgb(data = nhanes3, m = 3, xgb.params = params, nrounds = 30)\n\n# plot the multiply imputed values for variable \"BMPHEAD\"\nplot_box(\n imputation.list = imputed.data, var.name = \"BMPHEAD\",\n original.data = nhanes3\n)\n\n\n"} {"package":"mixgb","topic":"plot_hist","snippet":"### Name: plot_hist\n### Title: Histogram with density plots for multiply imputed values for a\n### single numeric variable\n### Aliases: plot_hist\n\n### ** Examples\n\n# obtain m multiply datasets\nparams <- list(max_depth = 3, subsample = 0.8, nthread = 2)\nimputed.data <- mixgb(data = nhanes3, m = 3, xgb.params = params, nrounds = 30)\n\n# plot the multiply imputed values for variable \"BMPHEAD\"\nplot_hist(\n imputation.list = imputed.data, var.name = \"BMPHEAD\",\n original.data = nhanes3\n)\n\n\n"} {"package":"mixgb","topic":"show_var","snippet":"### Name: show_var\n### Title: Show multiply imputed values for a single variable\n### Aliases: show_var\n\n### ** Examples\n\n# obtain m multiply datasets\nparams <- list(max_depth = 3, subsample = 1, nthread = 2)\nmixgb.data <- mixgb(data = nhanes3, m = 3, xgb.params = params, nrounds = 20)\n\nimputed.BMPHEAD <- show_var(\n imputation.list = mixgb.data, var.name = \"BMPHEAD\",\n original.data = nhanes3\n)\nimputed.BMPHEAD\n\n\n"} {"package":"imcExperiment","topic":"cellIntensity","snippet":"### Name: cellIntensity\n### Title: finds the intensities getter.\n### Aliases: cellIntensity cellIntensity,imcExperiment-method\n### cellIntensity<- cellIntensity<-,imcExperiment,matrix-method\n\n### ** Examples\n\ndata(imcdata)\ndim(cellIntensity(imcdata))\ndata(imcdata);dim(cellIntensity(imcdata))\nhead(t(cellIntensity(imcdata)))\ndata(imcdata)\nx<-asinh(counts(imcdata))\ncellIntensity(imcdata)<-x\n\n\n"} {"package":"imcExperiment","topic":"getCoordinates<-","snippet":"### Name: getCoordinates<-\n### Title: Sets the coordinate positions of each cell (matrix), columns are\n### X,Y positions.\n### Aliases: getCoordinates<-\n\n### ** Examples\n\ndata(imcdata)\nx<-getCoordinates(imcdata)\ngetCoordinates(imcdata)<-as.matrix(x)\n\n\n"} {"package":"imcExperiment","topic":"getCoordinates","snippet":"### Name: getCoordinates\n### Title: finds the spatial coords, getter.\n### Aliases: getCoordinates getCoordinates,imcExperiment-method\n### getCoordinates<-,imcExperiment,matrix-method\n\n### ** Examples\n\ndata(imcdata)\ngetCoordinates(imcdata)\ndata(imcdata)\ngetCoordinates(imcdata)\ndata(imcdata)\nx<-getCoordinates(imcdata)\ngetCoordinates(imcdata)<-as.matrix(x)\n\n\n"} {"package":"imcExperiment","topic":"getDistance<-","snippet":"### Name: getDistance<-\n### Title: re-assigns the distance matrix (rows are cells)\n### Aliases: getDistance<-\n\n### ** Examples\n\ndata(imcdata)\nnewD<-matrix(1,nrow=ncol(imcdata),ncol=1)\ngetDistance(imcdata)<-newD\n\n\n"} {"package":"imcExperiment","topic":"getMorphology<-","snippet":"### Name: getMorphology<-\n### Title: re-assigns morphological features can be stored (matrix) rows\n### are cells and columns are Area, etc.\n### Aliases: getMorphology<-\n\n### ** Examples\n\ndata(imcdata)\nx<-matrix(1,nrow=ncol(imcdata),ncol=4)\ngetMorphology(imcdata)<-x\n\n\n"} {"package":"imcExperiment","topic":"getNeighborhood","snippet":"### Name: getNeighborhood\n### Title: finds the neighborhood information.\n### Aliases: getNeighborhood getNeighborhood,imcExperiment-method\n### getNeighborhood<- getNeighborhood<-,imcExperiment,matrix-method\n\n### ** Examples\n\ndata(imcdata)\ngetNeighborhood(imcdata)\ndata(imcdata)\nx<-matrix(1,nrow=ncol(imcdata),ncol=2)\ngetNeighborhood(imcdata)<-x\ndata(imcdata)\nx<-matrix(1,nrow=ncol(imcdata),ncol=2)\ngetNeighborhood(imcdata)<-x\n\n\n"} {"package":"imcExperiment","topic":"getNetwork<-","snippet":"### Name: getNetwork<-\n### Title: re-assigns the network assignment (matrix)\n### Aliases: getNetwork<-\n\n### ** Examples\n\ndata(imcdata)\nx<-data.frame(ID=seq_len(ncol(imcdata)))\ngetNetwork(imcdata)<-x\n\n\n"} {"package":"imcExperiment","topic":"imcExperiment-class","snippet":"### Name: imcExperiment-class\n### Title: a summarized experiment of IMC runs, dimensions of the spatial\n### and intensity data are regulated.#'\n### Aliases: imcExperiment-class .imcExperiment\n### getNetwork,imcExperiment-method getNetwork imcExperiment-method\n### getNetwork<-,imcExperiment,data.frame-method\n### getDistance,imcExperiment-method getDistance\n### getDistance<-,imcExperiment,matrix-method\n### getMorphology,imcExperiment-method getMorphology\n### getMorphology<-,imcExperiment,matrix-method spatial\n### getLabel,imcExperiment-method getLabel\n\n### ** Examples\n\nx<-imcExperiment(cellIntensity=matrix(1,nrow=10,ncol=10),\ncoordinates=matrix(1,nrow=10,ncol=2),\nneighborHood=matrix(1,nrow=10,ncol=10),\nnetwork=data.frame(matrix(1,nrow=10,ncol=10)),\ndistance=matrix(1,nrow=10,ncol=10),\nmorphology=matrix(1,nrow=10,ncol=10),\nuniqueLabel=paste0(\"A\",seq_len(10)),\npanel=letters[1:10],\nROIID=data.frame(ROIID=rep(\"A\",10)))\ndata(imcdata)\ngetNetwork(imcdata)\ndata(imcdata)\ngetNetwork(imcdata)\ndata(imcdata)\nx<-data.frame(ID=seq_len(ncol(imcdata)))\ngetNetwork(imcdata)<-x\ndata(imcdata)\ngetDistance(imcdata)\ndata(imcdata)\ngetDistance(imcdata)\ndata(imcdata)\nnewD<-matrix(1,nrow=ncol(imcdata),ncol=1)\ngetDistance(imcdata)<-newD\ndata(imcdata)\ngetMorphology(imcdata)\ndata(imcdata)\ngetMorphology(imcdata)\ndata(imcdata)\nx<-matrix(1,nrow=ncol(imcdata),ncol=4)\ngetMorphology(imcdata)<-x\ndata(imcdata)\ngetLabel(imcdata)\ndata(imcdata)\ngetLabel(imcdata)\n\n\n"} {"package":"imcExperiment","topic":"imcExperiment","snippet":"### Name: imcExperiment\n### Title: Initializes a imcExperiment and performs some rudimentary\n### checks. Many of the arguments CAN be NULL; determination of which is\n### required is done at run-time. A imcExperiment must contain at least\n### the expressions and spatial/coordinate assays.\n### Aliases: imcExperiment\n\n### ** Examples\n\nx<-imcExperiment(cellIntensity=matrix(1,nrow=10,ncol=10),\ncoordinates=matrix(1,nrow=10,ncol=2),\nneighborHood=matrix(1,nrow=10,ncol=10),\nnetwork=data.frame(matrix(1,nrow=10,ncol=10)),\ndistance=matrix(1,nrow=10,ncol=10),\nmorphology=matrix(1,nrow=10,ncol=10),\nuniqueLabel=paste0(\"A\",seq_len(10)),\npanel=letters[1:10],\nROIID=data.frame(ROIID=rep(\"A\",10)))\n\n\n\n"} {"package":"imcExperiment","topic":"imcExperimentToHyperFrame","snippet":"### Name: imcExperimentToHyperFrame\n### Title: map to point pattern from imcExperiment class.\n### Aliases: imcExperimentToHyperFrame\n\n### ** Examples\n\ndata(imcdata)\nH<-imcExperimentToHyperFrame(imcExperiment=imcdata,phenotypeToUse = 1)\n\n\n"} {"package":"imcExperiment","topic":"percentilenormalize","snippet":"### Name: percentilenormalize\n### Title: given a matrix of intensity counts, perform min/max norm.\n### Aliases: percentilenormalize\n\n### ** Examples\n\n data(data)\n dim(data)\n expr<-data[,3:36]\n normExp<-percentilenormalize(data=expr,percentile=0.99)\n normExp<-as.matrix(normExp)\n\n\n"} {"package":"imcExperiment","topic":"selectCases","snippet":"### Name: selectCases\n### Title: subsets the imcExperiment to a case along with all slots for a\n### selected multiple ROIs.\n### Aliases: selectCases selectCases,imcExperiment-method\n\n### ** Examples\n\ndata(imcdata)\nmyCases<-selectCases(imcdata,c(\"30-BM16-202_7Pre_s1_p1_r4_a4_ac\",\"B17_350_14post_s1_p1_r5_a5_ac\"))\nmyCases\ntable(colData(myCases)$ROIID)\ndata(imcdata)\nmyCases<-selectCases(imcdata,c(\"30-BM16-202_7Pre_s1_p1_r4_a4_ac\",\"B17_350_14post_s1_p1_r5_a5_ac\"))\nmyCases\ntable(colData(myCases)$ROIID)\n\n\n"} {"package":"imcExperiment","topic":"subsetCase","snippet":"### Name: subsetCase\n### Title: subsets the imcExperiment to a case along with all slots for a\n### single ROI, using for distance analysis\n### Aliases: subsetCase subsetCase,imcExperiment-method\n\n### ** Examples\n\ndata(imcdata)\nmyCase<-subsetCase(imcdata,\"30-BM16-202_7Pre_s1_p1_r4_a4_ac\")\nmyCase\ndata(imcdata)\nmyCase<-subsetCase(imcdata,\"30-BM16-202_7Pre_s1_p1_r4_a4_ac\")\nmyCase\n\n\n"} {"package":"highMLR","topic":"hnscc","snippet":"### Name: hnscc\n### Title: High dimensional head and neck cancer survival and gene\n### expression data\n### Aliases: hnscc\n### Keywords: datasets\n\n### ** Examples\n\ndata(hnscc)\n\n\n"} {"package":"highMLR","topic":"mlclassCox","snippet":"### Name: mlclassCox\n### Title: Applications of machine learning in survival analysis by\n### prognostic classification of genes by CoxPH model.\n### Aliases: mlclassCox\n\n### ** Examples\n\n## Not run: \n##D data(srdata)\n##D mlclassCox(m=50,n=59,idSurv=\"OS\",idEvent=\"event\",Time=\"Visit\",s_ID=\"ID\",per=20,fold=3,data=srdata)\n## End(Not run)\n\n\n"} {"package":"highMLR","topic":"mlclassKap","snippet":"### Name: mlclassKap\n### Title: Applications of machine learning in survival analysis by\n### prognostic classification of genes by Kaplan-Meier estimator.\n### Aliases: mlclassKap\n\n### ** Examples\n\n ## Not run: \n##D ##\n##D mlclassKap(m=50,n=59,idSurv=\"OS\",idEvent=\"event\",Time=\"Visit\",s_ID=\"ID\",per=20,fold=3,data=srdata)\n##D ##\n## End(Not run)\n\n\n"} {"package":"highMLR","topic":"mlhighCox","snippet":"### Name: mlhighCox\n### Title: mlhighCox\n### Aliases: mlhighCox\n\n### ** Examples\n\n## Not run: \n##D data(hnscc)\n##D mlhighCox(cols=c(6:15), idSurv=\"OS\", idEvent=\"Death\", per=20, fold = 3, data=hnscc)\n## End(Not run)\n\n\n"} {"package":"highMLR","topic":"mlhighFrail","snippet":"### Name: mlhighFrail\n### Title: mlhighFrail\n### Aliases: mlhighFrail\n\n### ** Examples\n\n## Not run: \n##D data(hnscc)\n##D mlhighFrail(cols=c(10:20), idSurv=\"OS\", idEvent=\"Death\", idFrail=\"ID\", dist=\"gaussian\",\n##D per=20, fold = 3, data=hnscc)\n## End(Not run)\n\n\n"} {"package":"highMLR","topic":"mlhighHet","snippet":"### Name: mlhighHet\n### Title: mlhighHet\n### Aliases: mlhighHet\n\n### ** Examples\n\n## Not run: \n##D data(hnscc)\n##D mlhighHet(cols=c(27:32), idSurv=\"OS\", idEvent=\"Death\", idFrail=\"ID\", num=2, fold = 3, data=hnscc)\n## End(Not run)\n\n\n"} {"package":"highMLR","topic":"mlhighKap","snippet":"### Name: mlhighKap\n### Title: mlhighKap\n### Aliases: mlhighKap\n\n### ** Examples\n\n## Not run: \n##D data(hnscc)\n##D mlhighKap(cols=c(6:15), idSurv=\"OS\", idEvent=\"Death\", per=20, fold = 3, data=hnscc)\n## End(Not run)\n\n\n"} {"package":"highMLR","topic":"srdata","snippet":"### Name: srdata\n### Title: High dimensional protein gene expression data\n### Aliases: srdata\n### Keywords: datasets\n\n### ** Examples\n\ndata(srdata)\n\n\n"} {"package":"rivernet","topic":"rivernet-package","snippet":"### Name: rivernet-package\n### Title: Read, Analyse and Plot River Networks\n### Aliases: rivernet-package rivernet\n### Keywords: package\n\n### ** Examples\n\ncoord <- data.frame(Reach_ID=c(1,1,2,2,2,2,2,3,3,4,4),\n X=c(5,5,5,7,8,9,10,5,0,0,2),\n Y=c(0,2,2,4,7,6, 8,2,6,6,7),\n Z=c(0,1,1,2,3,4, 5,1,2,2,3))\nattrib.reach <- data.frame(Reach_ID=c(1,2,3,4),\n State =c(0,0.2,0.8,0.8),\n Flow =c(4,2,2,2))\nattrib.node <- data.frame(X=c(5,5,0,10,2),\n Y=c(0,2,6, 8,7),\n Height=c(0,0,1,0,0))\nwrite.table(coord ,\"rivernet_example_coord.csv\",sep=\";\",col.names=TRUE,row.names=FALSE)\nwrite.table(attrib.reach,\"rivernet_example_reach.csv\",sep=\";\",col.names=TRUE,row.names=FALSE)\nwrite.table(attrib.node ,\"rivernet_example_node.csv\" ,sep=\";\",col.names=TRUE,row.names=FALSE)\nnet <- rivernet.read(\"rivernet_example_coord.csv\",\n \"rivernet_example_reach.csv\",\n \"rivernet_example_node.csv\",\n sep=\";\")\nplot(net,col=ifelse(net$attrib.reach$State<0.5,\"red\",\"blue\"),lwd=2,pch=19,cex.nodes=1.5,\n col.nodes=ifelse(is.na(net$attrib.node$Height),\n \"black\",\n ifelse(net$attrib.node$Height<0.1,\"blue\",\"red\")))\nnet <- splitreach(net,2,0.4)\nplot(net,col=ifelse(net$attrib.reach$State<0.5,\"red\",\"blue\"),lwd=2,pch=19,cex.nodes=1.5,\n col.nodes=ifelse(is.na(net$attrib.node$Height),\n \"black\",\n ifelse(net$attrib.node$Height<0.1,\"blue\",\"red\")))\nfile.remove(\"rivernet_example_coord.csv\")\nfile.remove(\"rivernet_example_reach.csv\")\nfile.remove(\"rivernet_example_node.csv\")\n\n\n"} {"package":"hybridModels","topic":"findContactChain","snippet":"### Name: findContactChain\n### Title: Finding elements in contact chains of a dynamic network.\n### Aliases: findContactChain\n\n### ** Examples\n\n# Loading data\ndata(networkSample) # help(\"networkSample\"), for more info.\n \n# contact chain function\nselected.nodes <- c(37501, 36811, 36812)\ncontact.chain <- findContactChain(Data = networkSample, from = 'originID',\n to = 'destinationID', Time = 'Day', selected.nodes,\n type = 'chain', numberOfcores = 2)\n\n\n"} {"package":"hybridModels","topic":"hybridModel","snippet":"### Name: hybridModel\n### Title: Hybrid model simulation.\n### Aliases: hybridModel\n\n### ** Examples\n\n# Migration model\n# Parameters and initial conditions for an SIS model\n# loading the data set \ndata(networkSample) # help(\"networkSample\"), for more info\nnetworkSample <- networkSample[which(networkSample$Day < \"2012-03-20\"),]\n\nvar.names <- list(from = 'originID', to = 'destinationID', Time = 'Day',\n arc = 'num.animals')\n \nprop.func <- c('beta * S * I / (S + I)', 'gamma * I')\nstate.var <- c('S', 'I')\nstate.change.matrix <- matrix(c(-1, 1, # S\n 1, -1), # I\n nrow = 2, ncol = 2, byrow = TRUE)\n \nmodel.parms <- c(beta = 0.1, gamma = 0.01)\n\ninit.cond <- rep(100, length(unique(c(networkSample$originID,\n networkSample$destinationID))))\nnames(init.cond) <- paste('S', unique(c(networkSample$originID,\n networkSample$destinationID)), sep = '')\ninit.cond <- c(init.cond, c(I36811 = 10, I36812 = 10)) # adding infection\n \n# running simulations, check the number of cores available (num.cores)\nsim.results <- hybridModel(network = networkSample, var.names = var.names,\n model.parms = model.parms, state.var = state.var,\n prop.func = prop.func, init.cond = init.cond,\n state.change.matrix = state.change.matrix,\n sim.number = 2, num.cores = 2)\n\n# default plot layout (plot.types: 'pop.mean', 'subpop', or 'subpop.mean')\nplot(sim.results, plot.type = 'subpop.mean')\n\n# changing plot layout with ggplot2 (example)\n# uncomment the lines below to test new layout exemple\n#library(ggplot2)\n#plot(sim.results, plot.type = 'subpop') + ggtitle('New Layout') + \n# theme_bw() + theme(axis.title = element_text(size = 14, face = \"italic\"))\n\n# Influence model\n# Parameters and initial conditions for an SIS model\n# loading the data set \ndata(networkSample) # help(\"networkSample\"), for more info\nnetworkSample <- networkSample[which(networkSample$Day < \"2012-03-20\"),]\n\nvar.names <- list(from = 'originID', to = 'destinationID', Time = 'Day',\n arc = 'num.animals')\n \nprop.func <- c('beta * S * (I + i) / (S + I + s + i)', 'gamma * I')\nstate.var <- c('S', 'I')\ninfl.var <- c(S = \"s\", I = \"i\") # mapping influence\nstate.change.matrix <- matrix(c(-1, 1, # S\n 1, -1), # I\n nrow = 2, ncol = 2, byrow = TRUE)\n \nmodel.parms <- c(beta = 0.1, gamma = 0.01)\n\ninit.cond <- rep(100, length(unique(c(networkSample$originID,\n networkSample$destinationID))))\nnames(init.cond) <- paste('S', unique(c(networkSample$originID,\n networkSample$destinationID)), sep = '')\ninit.cond <- c(init.cond, c(I36811 = 10, I36812 = 10)) # adding infection\n \n# running simulations, check num of cores available (num.cores)\n# Uncomment to run\n# sim.results <- hybridModel(network = networkSample, var.names = var.names,\n# model.parms = model.parms, state.var = state.var,\n# infl.var = infl.var, prop.func = prop.func,\n# init.cond = init.cond,\n# state.change.matrix = state.change.matrix,\n# sim.number = 2, num.cores = 2)\n\n# default plot layout (plot.types: 'pop.mean', 'subpop', or 'subpop.mean')\n# plot(sim.results, plot.type = 'subpop.mean')\n\n\n\n"} {"package":"hybridModels","topic":"plot","snippet":"### Name: plot\n### Title: Summary plots for hybrid Models\n### Aliases: plot plot.HM\n\n### ** Examples\n\n# Parameters and initial conditions for an SIS model\n# loading the data set \ndata(networkSample) # help(\"networkSample\"), for more info\nnetworkSample <- networkSample[which(networkSample$Day < \"2012-03-20\"),]\n\nvar.names <- list(from = 'originID', to = 'destinationID', Time = 'Day',\n arc = 'num.animals')\n \nprop.func <- c('beta * S * I / (S + I)', 'gamma * I')\nstate.var <- c('S', 'I')\nstate.change.matrix <- matrix(c(-1, 1, # S\n 1, -1), # I\n nrow = 2, ncol = 2, byrow = TRUE)\n \nmodel.parms <- c(beta = 0.1, gamma = 0.01)\n\ninit.cond <- rep(100, length(unique(c(networkSample$originID,\n networkSample$destinationID))))\nnames(init.cond) <- paste('S', unique(c(networkSample$originID,\n networkSample$destinationID)), sep = '')\ninit.cond <- c(init.cond, c(I36811 = 10, I36812 = 10)) # adding infection\n \n# running simulations, check num of cores available (num.cores)\nsim.results <- hybridModel(network = networkSample, var.names = var.names,\n model.parms = model.parms, state.var = state.var,\n prop.func = prop.func, init.cond = init.cond,\n state.change.matrix = state.change.matrix,\n sim.number = 2, num.cores = 2)\n\n# default plot layout (plot.types: 'pop.mean', 'subpop', or 'subpop.mean')\nplot(sim.results, plot.type = 'subpop.mean')\n\n# changing plot layout with ggplot2 (example)\n# uncomment the lines below to test new layout exemple\n#library(ggplot2)\n#plot(sim.results, plot.type = 'subpop') + ggtitle('New Layout') + \n# theme_bw() + theme(axis.title = element_text(size = 14, face = \"italic\"))\n\n\n\n"} {"package":"hybridModels","topic":"summary","snippet":"### Name: summary\n### Title: summary for hybrid models\n### Aliases: summary summary.HM\n\n### ** Examples\n\n# Parameters and initial conditions for an SIS model\n# loading the data set \ndata(networkSample) # help(\"networkSample\"), for more info\nnetworkSample <- networkSample[which(networkSample$Day < \"2012-03-20\"),]\n\nvar.names <- list(from = 'originID', to = 'destinationID', Time = 'Day',\n arc = 'num.animals')\n \nprop.func <- c('beta * S * I / (S + I)', 'gamma * I')\nstate.var <- c('S', 'I')\nstate.change.matrix <- matrix(c(-1, 1, # S\n 1, -1), # I\n nrow = 2, ncol = 2, byrow = TRUE)\n \nmodel.parms <- c(beta = 0.1, gamma = 0.01)\n\ninit.cond <- rep(100, length(unique(c(networkSample$originID,\n networkSample$destinationID))))\nnames(init.cond) <- paste('S', unique(c(networkSample$originID,\n networkSample$destinationID)), sep = '')\ninit.cond <- c(init.cond, c(I36811 = 10, I36812 = 10)) # adding infection\n \n# running simulations, check num of cores available (num.cores)\nsim.results <- hybridModel(network = networkSample, var.names = var.names,\n model.parms = model.parms, state.var = state.var,\n prop.func = prop.func, init.cond = init.cond,\n state.change.matrix = state.change.matrix,\n sim.number = 4, num.cores = 2)\n\nsummary(sim.results, stateVars = c('S', 'I'), nodes = c(36812, 36813))\n\n\n\n"} {"package":"scDIFtest","topic":"scDIFtest","snippet":"### Name: scDIFtest\n### Title: A score-based item-wise DIF test\n### Aliases: scDIFtest\n\n### ** Examples\n\n## No test: \nlibrary(mirt)\nlibrary(scDIFtest)\n### data and model\ndat <- expand.table(LSAT7)\nnObs <- dim(dat)[1]\nmod <- mirt(dat, 1, itemtype = \"2PL\", constr = list(c(2, 1)))\n\n### DIF along a metric variable\n### the default test statistic is the Double Maximum (dm)\nmetric <- rnorm(nObs) \nDIF_metric <- scDIFtest(mod, DIF_covariate = metric)\nDIF_metric\nplot(DIF_metric, 1)\n\n### DIF along an ordered categorical variable\n### the default test statistic is the Maximum Lagrange Multiplier Test \n### for Ordered Groups (maxlmo)\nordered <- ordered(sample(1:5, size = nObs, replace = TRUE))\nDIF_ordered <- scDIFtest(mod, DIF_covariate = ordered)\nsummary(DIF_ordered)\n\n### Note that the Generalized Empirical M-Fluctuation Process (gefp) based on all\n### the estimated parameters in the model is an element of the resulting\n### scDIFtest object. This means that one can use this gefp to test the\n### general hypothesis of measurement invariance with respect to the\n### chosen covariate.\n strucchange::sctest(DIF_metric$gefp)\n strucchange::sctest(DIF_ordered$gefp)\n## End(No test)\n\n\n\n\n"} {"package":"polished","topic":"bundle_app","snippet":"### Name: bundle_app\n### Title: Create a tar archive\n### Aliases: bundle_app\n\n### ** Examples\n\n\n## Not run: \n##D bundle_app(\n##D system.file(\"examples/polished_example_01\", package = \"polished\")\n##D )\n## End(Not run)\n\n\n\n\n"} {"package":"polished","topic":"deploy_app","snippet":"### Name: deploy_app\n### Title: Deploy a Shiny app to Polished Hosting\n### Aliases: deploy_app\n\n### ** Examples\n\n\n## Not run: \n##D deploy_app(\n##D app_name = \"polished_example_01\",\n##D app_dir = system.file(\"examples/polished_example_01\", package = \"polished\"),\n##D api_key = \"\"\n##D )\n## End(Not run)\n\n\n\n\n"} {"package":"polished","topic":"firebase_dependencies","snippet":"### Name: firebase_dependencies\n### Title: Load the Firebase JavaScript dependencies into the UI\n### Aliases: firebase_dependencies\n\n### ** Examples\n\n\nfirebase_dependencies()\n\n\n\n"} {"package":"polished","topic":"firebase_init","snippet":"### Name: firebase_init\n### Title: Initialize Firebase\n### Aliases: firebase_init\n\n### ** Examples\n\n\n\n## Not run: \n##D my_config <- list(\n##D apiKey = \"your Firebase API key\",\n##D authDomain = \"your Firebase auth domain\",\n##D projectId = \"your Firebase Project ID\"\n##D )\n##D \n##D firebase_init(my_config)\n## End(Not run)\n\n\n"} {"package":"polished","topic":"polished_config","snippet":"### Name: polished_config\n### Title: global configuration for 'polished' authentication\n### Aliases: polished_config global_sessions_config\n\n### ** Examples\n\n\n## Not run: \n##D # global.R\n##D \n##D polished_config(\n##D app_name = \"\",\n##D api_key = \"\",\n##D firebase_config = list(\n##D apiKey = \"\",\n##D authDomain = \"\"\n##D ),\n##D sign_in_providers = c(\n##D \"email\",\n##D \"google\",\n##D \"microsoft\"\n##D )\n##D )\n##D \n## End(Not run)\n\n\n\n"} {"package":"polished","topic":"secure_rmd","snippet":"### Name: secure_rmd\n### Title: Render and secure R Markdown document\n### Aliases: secure_rmd\n\n### ** Examples\n\n\n## Not run: \n##D \n##D secure_rmd(system.file(\"examples/rmds/flexdashboard.Rmd\", package = \"polished\"))\n##D secure_rmd(\n##D system.file(\"examples/rmds/flexdashboard.Rmd\", package = \"polished\"),\n##D polished_config_args = list(\n##D # any values in this list will override values in YAML header\n##D app_name = \"different_name\"\n##D ),\n##D sign_in_page_args = list(\n##D color = \"#FF5700\"\n##D )\n##D )\n##D secure_rmd(system.file(\"examples/rmds/flexdashboard_shiny.Rmd\", package = \"polished\"))\n##D secure_rmd(system.file(\"examples/rmds/html_document.Rmd\", package = \"polished\"))\n##D secure_rmd(system.file(\"examples/rmds/pdf_document.Rmd\", package = \"polished\"))\n##D io_file_path <- system.file(\n##D \"examples/rmds/ioslides/ioslides_presentation.Rmd\",\n##D package = \"polished\"\n##D )\n##D secure_rmd(io_file_path)\n## End(Not run)\n\n\n"} {"package":"polished","topic":"set_api_key","snippet":"### Name: set_api_key\n### Title: set Polished API key\n### Aliases: set_api_key get_api_key\n\n### ** Examples\n\n\nset_api_key(api_key = \"\")\n\n\n\n\n"} {"package":"WRTDStidal","topic":"aiccrq","snippet":"### Name: aiccrq\n### Title: Akaike's Information Criterion for weighted quantile regression\n### Aliases: aiccrq\n\n### ** Examples\n\n# get wts for a model centered on the first observation\nref_in <- tidobj[1, ]\nref_wts <- getwts(tidobj, ref_in)\n\n# get the model\nmod <- quantreg::crq(\n survival::Surv(res, not_cens, type = \"left\") ~ \n dec_time + flo + sin(2*pi*dec_time) + cos(2*pi*dec_time), \n weights = ref_wts,\n data = tidobj, \n method = \"Portnoy\"\n )\n\naiccrq(mod)\n\n\n"} {"package":"WRTDStidal","topic":"all_sims","snippet":"### Name: all_sims\n### Title: Simulate a response variable time series using all functions\n### Aliases: all_sims\n\n### ** Examples\n\n## Not run: \n##D ## example data\n##D data(daydat)\n##D \n##D ## simulate\n##D tmp <- all_sims(daydat)\n## End(Not run)\n\n\n"} {"package":"WRTDStidal","topic":"annual_agg","snippet":"### Name: annual_agg\n### Title: Create annual aggregations of WRTDS output\n### Aliases: annual_agg annual_agg.default\n\n### ** Examples\n\n## tidal object\nannual_agg(tidfit)\n\n## tidalmean object\nannual_agg(tidfitmean)\n\n\n"} {"package":"WRTDStidal","topic":"chllab","snippet":"### Name: chllab\n### Title: Chlorophyll axis label\n### Aliases: chllab\n\n### ** Examples\n\n\n## default\nchllab()\n\n\n"} {"package":"WRTDStidal","topic":"createsrch","snippet":"### Name: createsrch\n### Title: Create a grid of half-window widths to evaluate\n### Aliases: createsrch\n\n### ** Examples\n\ncreatesrch()\ncreatesrch(1, 1, 1)\n\n\n"} {"package":"WRTDStidal","topic":"dec_time","snippet":"### Name: dec_time\n### Title: Create decimal time from time vector\n### Aliases: dec_time dec_time.Date\n\n### ** Examples\n\ndt <- Sys.Date()\ndts <- seq.Date(dt - 365, dt, by = 'day') \n\ndec_time(dts)\n\n\n"} {"package":"WRTDStidal","topic":"dynaplot","snippet":"### Name: dynaplot\n### Title: Plot model response to salinity or flow as a lineplot for all\n### months\n### Aliases: dynaplot dynaplot.tidal dynaplot.tidalmean\n\n### ** Examples\n\n\n# load a fitted tidal object\ndata(tidfit)\n\n# plot using defaults, \n# defaults to the fiftieth quantile for all years\ndynaplot(tidfit)\n## Not run: \n##D # change the defaults\n##D dynaplot(tidfit, tau = 0.9, month = 2, years = seq(1980, 1990), \n##D col_vec = rainbow(7), alpha = 0.5, size = 3) \n##D \n##D # plot a tidalmean object\n##D data(tidfitmean)\n##D \n##D dynaplot(tidfitmean)\n## End(Not run)\n\n\n"} {"package":"WRTDStidal","topic":"fitmoplot","snippet":"### Name: fitmoplot\n### Title: Plot the fitted results for a tidal object by month\n### Aliases: fitmoplot fitmoplot.tidal fitmoplot.tidalmean\n\n### ** Examples\n\n\n## load a fitted tidal object\ndata(tidfit)\n\n# plot using defaults\nfitmoplot(tidfit)\n## Not run: \n##D \n##D # get the same plot but use default ggplot settings\n##D fitmoplot(tidfit, pretty = FALSE)\n##D \n##D # plot specific quantiles\n##D fitmoplot(tidfit, tau = c(0.1, 0.9))\n##D \n##D # plot the normalized predictions\n##D fitmoplot(tidfit, predicted = FALSE)\n##D \n##D # modify the plot as needed using ggplot scales, etc.\n##D \n##D library(ggplot2)\n##D \n##D fitmoplot(tidfit, pretty = FALSE, linetype = 'dashed') + \n##D theme_classic() + \n##D scale_y_continuous(\n##D 'Chlorophyll', \n##D limits = c(0, 5)\n##D ) +\n##D scale_colour_manual( \n##D 'Predictions', \n##D labels = c('lo', 'md', 'hi'), \n##D values = c('red', 'green', 'blue'), \n##D guide = guide_legend(reverse = TRUE)\n##D ) \n##D \n##D # plot a tidalmean object\n##D data(tidfitmean)\n##D \n##D fitmoplot(tidfitmean) \n## End(Not run)\n\n\n"} {"package":"WRTDStidal","topic":"fitplot","snippet":"### Name: fitplot\n### Title: Plot the fitted results for a tidal object\n### Aliases: fitplot fitplot.tidal fitplot.tidalmean\n\n### ** Examples\n\n\n## load a fitted tidal object\ndata(tidfit)\n\n# plot using defaults\nfitplot(tidfit)\n\n# get the same plot but use default ggplot settings\nfitplot(tidfit, pretty = FALSE)\n\n# plot in log space\nfitplot(tidfit, logspace = TRUE)\n\n# plot specific quantiles\nfitplot(tidfit, tau = c(0.1, 0.9))\n\n# plot the normalized predictions\nfitplot(tidfit, predicted = FALSE)\n\n# plot as monthly values\nfitplot(tidfit, annuals = FALSE) \n\n# format the x-axis is using annual aggregations\nlibrary(ggplot2)\n\nfitplot(tidfit, annual = TRUE) + \n scale_x_date(limits = as.Date(c('2000-01-01', '2012-01-01')))\n\n# modify the plot as needed using ggplot scales, etc.\n\nfitplot(tidfit, pretty = FALSE, linetype = 'dashed') + \n theme_classic() + \n scale_y_continuous(\n 'Chlorophyll', \n limits = c(0, 50)\n ) +\n scale_colour_manual( \n 'Predictions', \n labels = c('lo', 'md', 'hi'), \n values = c('red', 'green', 'blue'), \n guide = guide_legend(reverse = TRUE)\n ) \n \n# plot a tidalmean object\ndata(tidfitmean)\n\nfitplot(tidfitmean) \n\n\n"} {"package":"WRTDStidal","topic":"getwts","snippet":"### Name: getwts\n### Title: Get weights for regression\n### Aliases: getwts getwts.default\n\n### ** Examples\n\n##\ndata(tidobj)\n\n# get weights for first row\nfirst <- tidobj[1, ]\nwts <- getwts(tidobj, first)\n\nplot(wts, type = 'l')\n\n## Not run: \n##D \n##D # get count of observations with grzero weights\n##D sapply(1:nrow(tidobj), function(row) getwts(tidobj, tidobj[row, ], \n##D ngrzero = TRUE))\n## End(Not run)\n\n\n"} {"package":"WRTDStidal","topic":"goodfit","snippet":"### Name: goodfit\n### Title: Quantile regression goodness of fit\n### Aliases: goodfit\n\n### ** Examples\n\n\nlibrary(quantreg)\n\n## random variables\nx <- runif(100, 0, 10)\ny <- x + rnorm(100)\n\n## quantile model\nmod <- rq(y ~ x, tau = 0.5)\nres <- resid(mod)\n\n## non-conditional quantile model\nmod_nl <- rq(y ~ 1, tau = 0.5)\nrsd_nl <- resid(mod_nl)\n\ngoodfit(res, rsd_nl, 0.5)\n\n## r2 of mean model for comparison\nmod_lm <- lm(y ~ x)\n\nsummary(mod_lm)$r.squared\n\n\n"} {"package":"WRTDStidal","topic":"gradcols","snippet":"### Name: gradcols\n### Title: Get colors for plots\n### Aliases: gradcols\n\n### ** Examples\n\n\n## defaults\ngradcols()\n\n## another RColorBrewer palette\ngradcols('Pastel2')\n\n## a silly example\ngradcols(rainbow(7))\n\n\n"} {"package":"WRTDStidal","topic":"gridplot","snippet":"### Name: gridplot\n### Title: Plot variable response to salinity/flow as a gridded surface for\n### all months\n### Aliases: gridplot gridplot.tidal gridplot.tidalmean\n\n### ** Examples\n\n## Not run: \n##D ## load a fitted tidal object\n##D data(tidfit)\n##D \n##D ## defaults to the fiftieth quantile\n##D gridplot(tidfit)\n##D \n##D ## no facets, all months\n##D gridplot(tidfit, month = 'all')\n##D \n##D ## change the defaults\n##D gridplot(tidfit, tau = c(0.1), month = c(3, 6, 9, 12), \n##D col_vec = c('red', 'blue', 'green'), flo_fac = 1)\n##D \n##D ## plot a tidalmean object\n##D data(tidfitmean)\n##D \n##D gridplot(tidfitmean)\n##D \n## End(Not run)\n\n\n"} {"package":"WRTDStidal","topic":"kendallSeasonalTrendTest","snippet":"### Name: kendallSeasonalTrendTest\n### Title: Kendall seasonal trend test\n### Aliases: kendallSeasonalTrendTest kendallSeasonalTrendTest.default\n### kendallSeasonalTrendTest.data.frame kendallSeasonalTrendTest.formula\n### kendallSeasonalTrendTest.matrix\n\n### ** Examples\n\nkendallSeasonalTrendTest(res ~ month + year, tidfitmean)\n\n\n"} {"package":"WRTDStidal","topic":"kendallTrendTest","snippet":"### Name: kendallTrendTest\n### Title: Kendall trend test\n### Aliases: kendallTrendTest kendallTrendTest.default\n### kendallTrendTest.formula\n\n### ** Examples\n\nkendallTrendTest(res ~ dec_time, tidfitmean)\n\n\n"} {"package":"WRTDStidal","topic":"lnQ_sim","snippet":"### Name: lnQ_sim\n### Title: Simulate a discharge time series\n### Aliases: lnQ_sim\n\n### ** Examples\n\n\n## example data\ndata(daydat)\n\n## simulate\nlnQ_sim(daydat)\n\n\n\n"} {"package":"WRTDStidal","topic":"lnres_err","snippet":"### Name: lnres_err\n### Title: Simulate random errors from a time series\n### Aliases: lnres_err\n\n### ** Examples\n\n## Not run: \n##D ## example data\n##D data(daydat)\n##D \n##D ## get errors\n##D lnres_err(daydat)\n## End(Not run)\n\n\n"} {"package":"WRTDStidal","topic":"lnres_sim","snippet":"### Name: lnres_sim\n### Title: Simulate a water quality time series\n### Aliases: lnres_sim\n\n### ** Examples\n\n## Not run: \n##D ## example data\n##D data(daydat)\n##D \n##D ## get simulated discharge\n##D sims <- lnQ_sim(daydat)\n##D \n##D ## get error structure of wq time series\n##D sims <- lnres_err(sims)\n##D \n##D ## get simulated wq time series using results from previous\n##D lnres_sim(sims)\n## End(Not run)\n\n\n"} {"package":"WRTDStidal","topic":"modfit","snippet":"### Name: modfit\n### Title: Fit weighted regression and get predicted/normalized response\n### variable\n### Aliases: modfit modfit.default modfit.tidal modfit.tidalmean\n### modfit.data.frame\n\n### ** Examples\n\n## Not run: \n##D ## load data\n##D data(chldat)\n##D \n##D ## fit the model and get predicted/normalized data for response variable\n##D # default median fit\n##D # grids predicted across salinity range with ten values\n##D res <- modfit(chldat)\n##D \n##D # for mean models\n##D res <- modfit(chldat, resp_type = 'mean')\n##D \n##D ## fit different quantiles and smaller interpolation grid\n##D res <- modfit(chldat, tau = c(0.2, 0.8), flo_div = 5)\n##D \n##D ## fit with different window widths\n##D # half-window widths of one day, five years, and 0.3 salinity\n##D res <- modfit(chldat, wins = list(1, 5, 0.3))\n##D \n##D ## suppress console output\n##D res <- modfit(chldat, trace = FALSE)\n## End(Not run)\n\n\n"} {"package":"WRTDStidal","topic":"nobsplot","snippet":"### Name: nobsplot\n### Title: Plot number of observations in a WRTDS interpolation grid\n### Aliases: nobsplot nobsplot.default nobsplot.tidal nobsplot.tidalmean\n\n### ** Examples\n\n## Not run: \n##D ## load a fitted tidal object\n##D data(tidfit)\n##D \n##D ## default plot\n##D nobsplot(tidfit)\n##D \n##D ## no facets, all months\n##D nobsplot(tidfit)\n##D \n##D ## change the defaults\n##D nobsplot(tidfit, tau = c(0.1), month = c(3, 6, 9, 12), \n##D col_vec = c('red', 'blue', 'green'), flo_fac = 1)\n##D \n##D ## plot a tidalmean object\n##D data(tidfitmean)\n##D \n##D nobsplot(tidfitmean)\n##D \n## End(Not run)\n\n\n"} {"package":"WRTDStidal","topic":"obsplot","snippet":"### Name: obsplot\n### Title: Plot observed response variable and salinity/flow data\n### Aliases: obsplot obsplot.default obsplot.tidal obsplot.tidalmean\n\n### ** Examples\n\n\n## load a fitted tidal object\ndata(tidfit)\n\n## plot using defaults\nobsplot(tidfit)\n \n## changing default\nobsplot(tidfit, alpha = 0.5, size = 4, col = 'blue', lines = FALSE)\n\n## plot a tidalmean object\ndata(tidfitmean)\n\nobsplot(tidfitmean)\n\n\n"} {"package":"WRTDStidal","topic":"prdnrmplot","snippet":"### Name: prdnrmplot\n### Title: Plot combined predicted and normalized results from a tidal\n### object\n### Aliases: prdnrmplot prdnrmplot.tidal prdnrmplot.tidalmean\n\n### ** Examples\n\n\n## load a fitted tidal object\ndata(tidfit)\n\n## plot using defaults\nprdnrmplot(tidfit)\n\n## get the same plot but use default ggplot settings\nprdnrmplot(tidfit, pretty = FALSE)\n\n## plot in log space\nprdnrmplot(tidfit, logspace = TRUE)\n\n## plot specific quantiles\nprdnrmplot(tidfit, tau = c(0.1, 0.9))\n\n## plot the normalized predictions\nprdnrmplot(tidfit, predicted = FALSE)\n\n## plot as monthly values\nprdnrmplot(tidfit, annuals = FALSE) \n\n## format the x-axis is using annual aggregations\nlibrary(ggplot2)\n\nprdnrmplot(tidfit, annual = TRUE) + \n scale_x_date(limits = as.Date(c('2000-01-01', '2012-01-01')))\n\n## modify the plot as needed using ggplot scales, etc.\nprdnrmplot(tidfit, pretty = FALSE, linetype = 'dashed') + \n theme_classic() + \n scale_y_continuous(\n 'Chlorophyll', \n limits = c(0, 50)\n ) +\n scale_colour_manual( \n '', \n labels = c('lo', 'md', 'hi'), \n values = c('red', 'green', 'blue'), \n guide = guide_legend(reverse = TRUE)\n ) \n \n ## plot a tidalmean object\n data(tidfitmean)\n \n prdnrmplot(tidfitmean)\n \n\n\n"} {"package":"WRTDStidal","topic":"resnorm","snippet":"### Name: resnorm\n### Title: Get salinity/flow normalized WRTDS predictions from\n### interpolation grids\n### Aliases: resnorm resnorm.tidal resnorm.tidalmean\n\n### ** Examples\n\n## Not run: \n##D ##\n##D \n##D # load a tidal object\n##D data(tidobj)\n##D \n##D # get flow-normalized values for each quantile\n##D res <- resnorm(tidobj)\n##D \n##D # load a tidalmean object\n##D data(tidobjmean)\n##D \n##D # get flow-normalized values\n##D res <- resnorm(tidobjmean)\n## End(Not run)\n\n\n"} {"package":"WRTDStidal","topic":"respred","snippet":"### Name: respred\n### Title: Get WRTDS predictions from interpolation grids\n### Aliases: respred respred.tidal respred.tidalmean\n\n### ** Examples\n\n##\n\n# load a tidal object\ndata(tidobj)\n\n# get fitted values for each quantile\nres <- respred(tidobj)\n\n# load a tidalmean object\ndata(tidobjmean)\n\n# get predicted values\nres <- respred(tidobjmean)\n\n\n\n"} {"package":"WRTDStidal","topic":"resscls","snippet":"### Name: resscls\n### Title: Get the scale parameters for predicted values\n### Aliases: resscls resscls.tidalmean\n\n### ** Examples\n\n##\n\n# load a tidalmean object\ndata(tidobjmean)\n\n# get predicted values\nres <- resscls(tidobjmean)\n\n\n\n"} {"package":"WRTDStidal","topic":"samp_sim","snippet":"### Name: samp_sim\n### Title: Sample a daily time series at a set frequency\n### Aliases: samp_sim\n\n### ** Examples\n\n## Not run: \n##D ## example data\n##D data(daydat)\n##D \n##D ## simulate\n##D tosamp <- all_sims(daydat)\n##D \n##D ## sample\n##D samp_sim(tosamp)\n##D \n##D ## sample and create test dataset\n##D # test dataset is 30% size of monthly subsample using block sampling with size = 4\n##D samp_sim(tosamp, missper = 0.3, blck = 4)\n## End(Not run)\n\n\n"} {"package":"WRTDStidal","topic":"seasplot","snippet":"### Name: seasplot\n### Title: Plot seasonal trends across all years\n### Aliases: seasplot seasplot.tidal seasplot.tidalmean\n\n### ** Examples\n\n# load a fitted tidal object\ndata(tidfit)\n\n# plot using defaults\n# defaults to all quantiles for tidal object\nseasplot(tidfit)\n\n# tidalmean object\nseasplot(tidfitmean)\n\n\n"} {"package":"WRTDStidal","topic":"seasyrplot","snippet":"### Name: seasyrplot\n### Title: Plot seasonal model response by years\n### Aliases: seasyrplot seasyrplot.tidal seasyrplot.tidalmean\n\n### ** Examples\n\n\n## load a fitted tidal object\ndata(tidfit)\n\n# plot using defaults\nseasyrplot(tidfit)\n\n# get the same plot but use default ggplot settings\nseasyrplot(tidfit, pretty = FALSE)\n\n# plot specific quantiles\nseasyrplot(tidfit, tau = c(0.9))\n\n# plot the normalized predictions\nseasyrplot(tidfit, predicted = FALSE)\n\n# modify the plot as needed using ggplot scales, etc.\n\nlibrary(ggplot2)\n\nseasyrplot(tidfit, pretty = FALSE, linetype = 'dashed') + \n theme_classic() + \n scale_y_continuous(\n 'Chlorophyll', \n limits = c(0, 5)\n )\n \n# plot a tidalmean object\ndata(tidfitmean)\n\nseasyrplot(tidfitmean) \n\n\n"} {"package":"WRTDStidal","topic":"sliceplot","snippet":"### Name: sliceplot\n### Title: Plot time slices within a tidal object\n### Aliases: sliceplot sliceplot.tidal sliceplot.tidalmean\n\n### ** Examples\n\n\n## load a fitted tidal object\ndata(tidfit)\n\n# plot using defaults\nsliceplot(tidfit)\n\n# get different months - march and september\nsliceplot(tidfit, slices = c(3, 9))\n\n# normalized predictions, 10th percentile\nsliceplot(tidfit, tau = 0.1, predicted = FALSE)\n\n# normalized values all months, change line aesthetics, log-space, 90th \n# add title\nlibrary(ggplot2)\nsliceplot(tidfit, \n slices = 1:12, \n size = 1.5, \n tau = 0.9, \n alpha = 0.6, \n predicted = FALSE, \n logspace = TRUE\n) + \nggtitle('Normalized predictions for all months, 90th percentile')\n\n ## plot a tidalmean object\n data(tidfitmean)\n \n sliceplot(tidfitmean)\n\n\n"} {"package":"WRTDStidal","topic":"tidal","snippet":"### Name: tidal\n### Title: Create a tidal class object\n### Aliases: tidal\n\n### ** Examples\n\n## raw data\n\ndata(chldat)\n\n## format\nchldat <- tidal(chldat)\n\n\n\n"} {"package":"WRTDStidal","topic":"tidalmean","snippet":"### Name: tidalmean\n### Title: Create a tidalmean class object\n### Aliases: tidalmean\n\n### ** Examples\n\n## raw data\n\ndata(chldat)\n\n## format\nchldat <- tidalmean(chldat)\n\n\n\n"} {"package":"WRTDStidal","topic":"winsrch_constrOptim","snippet":"### Name: winsrch_constrOptim\n### Title: Find the optimal half-window width combination\n### Aliases: winsrch_constrOptim winsrch_constrOptim.default\n\n### ** Examples\n\n## Not run: \n##D # setup parallel backend\n##D library(doParallel)\n##D ncores <- detectCores() - 1 \n##D registerDoParallel(cores = ncores)\n##D \n##D # run search function - takes a while\n##D res <- winsrch_optim(tidobjmean)\n## End(Not run)\n\n\n"} {"package":"WRTDStidal","topic":"winsrch_grid","snippet":"### Name: winsrch_grid\n### Title: Evaluate half-window width combinations\n### Aliases: winsrch_grid winsrch_grid.default\n\n### ** Examples\n\n## Not run: \n##D ##\n##D # setup parallel backend\n##D library(doParallel)\n##D ncores <- detectCores() - 2 \n##D registerDoParallel(cores = ncores)\n##D \n##D # run search function using default search grid - takes a while\n##D res <- winsrch_grid(tidobjmean)\n##D \n##D # view the error surface \n##D library(ggplot2)\n##D ggplot(res, aes(x = factor(mos), y = factor(yrs), fill = err)) +\n##D geom_tile() + \n##D facet_wrap(~ flo) + \n##D scale_x_discrete(expand = c(0, 0)) +\n##D scale_y_discrete(expand = c(0,0)) +\n##D scale_fill_gradientn(colours = gradcols()) \n##D \n##D # optimal combo\n##D res[which.min(res$err), ]\n##D \n##D ##\n##D # create a custom search grid, e.g. years only\n##D grid_in <- createsrch(mos = 1, yrs = seq(1, 10), flo = 1)\n##D \n##D res <- winsrch_grid(tidobjmean, grid_in)\n##D \n## End(Not run)\n\n\n"} {"package":"WRTDStidal","topic":"winsrch_optim","snippet":"### Name: winsrch_optim\n### Title: Find the optimal half-window width combination\n### Aliases: winsrch_optim winsrch_optim.default\n\n### ** Examples\n\n## Not run: \n##D # setup parallel backend\n##D library(doParallel)\n##D ncores <- detectCores() - 1 \n##D registerDoParallel(cores = ncores)\n##D \n##D # run search function - takes a while\n##D res <- winsrch_optim(tidobjmean)\n## End(Not run)\n\n\n"} {"package":"WRTDStidal","topic":"wrtds","snippet":"### Name: wrtds\n### Title: Get WRTDS prediction grid\n### Aliases: wrtds wrtds.tidal wrtds.tidalmean\n\n### ** Examples\n\n## Not run: \n##D ## load data\n##D data(chldat)\n##D \n##D ## as tidal object\n##D dat_in <- tidal(chldat)\n##D res <- wrtds(dat_in)\n##D \n##D ## as tidalmean object\n##D dat_in <- tidalmean(chldat)\n##D res <- wrtds(dat_in)\n##D \n##D ## multiple quantiles\n##D res <- wrtds(dat_in, tau = c(0.1, 0.5, 0.9))\n## End(Not run)\n\n\n"} {"package":"WRTDStidal","topic":"wrtdscv","snippet":"### Name: wrtdscv\n### Title: Use k-fold cross-validation to evaluate WRTDS model fit\n### Aliases: wrtdscv wrtdscv.default\n\n### ** Examples\n\n## Not run: \n##D \n##D library(doParallel)\n##D ncores <- detectCores() - 1 \n##D registerDoParallel(cores = ncores)\n##D \n##D # half-window widths to evaluate\n##D # months, years, and salinity/flow\n##D wins <- list(0.5, 10, 0.5) \n##D \n##D # get ocv score for k = 10\n##D wrtdscv(tidobjmean, wins = wins)\n##D \n##D # get ocv score k = 2, tau = 0.2 \n##D wrtdscv(tidobj, wins = wins, tau = 0.2)\n## End(Not run)\n\n\n"} {"package":"WRTDStidal","topic":"wrtdsperf","snippet":"### Name: wrtdsperf\n### Title: Get WRTDS performance metrics\n### Aliases: wrtdsperf wrtdsperf.tidal wrtdsperf.tidalmean\n\n### ** Examples\n\n## load a fitted model object\ndata(tidfit)\n\n## get performance metrics\nwrtdsperf(tidfit)\n\n\n"} {"package":"WRTDStidal","topic":"wrtdsrsd","snippet":"### Name: wrtdsrsd\n### Title: Get WRTDS residuals\n### Aliases: wrtdsrsd wrtdsrsd.tidal wrtdsrsd.tidalmean\n\n### ** Examples\n\n## load a fitted model object\ndata(tidfit)\n\n## run the function\nres <- wrtdsrsd(tidfit)\nhead(res)\n\n\n"} {"package":"WRTDStidal","topic":"wrtdstrnd","snippet":"### Name: wrtdstrnd\n### Title: Get WRTDS trends\n### Aliases: wrtdstrnd wrtdstrnd.default wrtdstrnd.tidal\n### wrtdstrnd.tidalmean\n\n### ** Examples\n\n## load a fitted model object\ndata(tidfit)\ndata(tidfitmean)\n\n## get trends\n\n# setup month, year categories\nmobrks <- list(c(1, 2, 3), c(4, 5, 6), c(7, 8, 9), c(10, 11, 12))\nyrbrks <- c(1973, 1985, 1994, 2003, 2012)\nmolabs <- c('JFM', 'AMJ', 'JAS', 'OND')\nyrlabs <- c('1974-1985', '1986-1994', '1995-2003', '2004-2012')\n\nwrtdstrnd(tidfit, mobrks, yrbrks, molabs, yrlabs)\nwrtdstrnd(tidfitmean, mobrks, yrbrks, molabs, yrlabs)\n\n# get averages in each period\nwrtdstrnd(tidfit, mobrks, yrbrks, molabs, yrlabs, aves = TRUE)\n\n\n"} {"package":"WRTDStidal","topic":"wrtdstrnd_sk","snippet":"### Name: wrtdstrnd_sk\n### Title: Get WRTDS trends using seasonal Kendall tests\n### Aliases: wrtdstrnd_sk wrtdstrnd_sk.default wrtdstrnd_sk.tidal\n### wrtdstrnd_sk.tidalmean\n\n### ** Examples\n\n## load a fitted model object\ndata(tidfit)\ndata(tidfitmean)\n\n## get trends\n\n# setup month, year categories\nmobrks <- list(c(1, 2, 3), c(4, 5, 6), c(7, 8, 9), c(10, 11, 12))\nyrbrks <- c(1973, 1985, 1994, 2003, 2012)\nmolabs <- c('JFM', 'AMJ', 'JAS', 'OND')\nyrlabs <- c('1974-1985', '1986-1994', '1995-2003', '2004-2012')\n\nwrtdstrnd_sk(tidfit, mobrks, yrbrks, molabs, yrlabs)\nwrtdstrnd_sk(tidfitmean, mobrks, yrbrks, molabs, yrlabs)\n\n\n\n"} {"package":"WRTDStidal","topic":"wtsplot","snippet":"### Name: wtsplot\n### Title: Plot the weights for an observation\n### Aliases: wtsplot wtsplot.default wtsplot.tidal wtsplot.tidalmean\n\n### ** Examples\n\n\n## load a fitted tidal object\ndata(tidfit)\n\n## plot using defaults, \nwtsplot(tidfit)\n\n## change the defaults\nwtsplot(tidfit, ref = '2000-01-01', wins = list(0.5, 15, Inf), \n dt_rng = c('1990-01-01', '2010-01-01'), \n pt_rng = c(3, 8), col_vec = c('lightgreen', 'lightblue', 'purple'),\n alpha = 0.7)\n\n\n"} {"package":"spdl","topic":"setup","snippet":"### Name: setup\n### Title: Convenience Wrappers for 'RcppSpdlog' Logging From 'spdlog'\n### Aliases: setup init log filesetup drop set_pattern set_level trace\n### debug info warn error critical fmt cat stopwatch elapsed\n\n### ** Examples\n\nspdl::setup(\"exampleDemo\", \"warn\")\n# and spdl::init(\"warn\") and spdl::log(\"warn\") are shortcuts\nspdl::info(\"Not seen as level 'info' below 'warn'\")\nspdl::warn(\"This warning message is seen\")\nspdl::set_level(\"info\")\nspdl::info(\"Now this informational message is seen too\")\nspdl::info(\"Calls use fmtlib::fmt {} as we can see {}\", \"under the hood\", 42L)\n\n\n"} {"package":"grove","topic":"DWT","snippet":"### Name: DWT\n### Title: Discrete wavelet transform\n### Aliases: DWT\n\n### ** Examples\n\ndata <- GenerateSyntheticAnova(st.dev = 5, n.replicates = 10)\nW <- DWT(data$noisy.Y)\n\n\n"} {"package":"grove","topic":"Denoise","snippet":"### Name: Denoise\n### Title: Bayesian wavelet denoising\n### Aliases: Denoise\n\n### ** Examples\n\ndata <- wavethresh::DJ.EX(n = 512, noisy = TRUE, rsnr = 5)$doppler\nW <- DWT(data)\nans <- Denoise(W)\n\n\n"} {"package":"grove","topic":"FAnova","snippet":"### Name: FAnova\n### Title: Bayesian functional ANOVA\n### Aliases: FAnova\n\n### ** Examples\n\n## Not run: \n##D data <- GenerateSyntheticAnova(st.dev = 5, n.replicates = 5)\n##D W <- DWT(data$noisy.Y)\n##D X <- data$X\n##D ans <- FAnova(W, X, ~ 1 + factorA + factorB)\n##D denoised.data <- InvDWT(ans, x = c(0, 0, 1, 0))\n##D PlotFun(denoised.data)\n## End(Not run)\n\n\n"} {"package":"grove","topic":"GenerateSyntheticAnova","snippet":"### Name: GenerateSyntheticAnova\n### Title: Generate synthetic functional ANOVA dataset\n### Aliases: GenerateSyntheticAnova\n\n### ** Examples\n\ndata <- GenerateSyntheticAnova(st.dev = 5, n.replicates = 10)\nix <- 1\nplot(data$clean.Y[ix, ], type = \"l\", col = \"red\", ylab = \"\")\nlines(data$noisy.Y[ix, ], col = \"blue\")\n\n\n"} {"package":"grove","topic":"InvDWT","snippet":"### Name: InvDWT\n### Title: Inverse discrete wavelet transform\n### Aliases: InvDWT\n\n### ** Examples\n\ndata <- wavethresh::DJ.EX(n = 512, noisy = TRUE, rsnr = 5)$doppler\nW <- DWT(data)\nans <- Denoise(W)\ndenoised.data <- InvDWT(ans)\nplot(data, type = \"l\")\nlines(denoised.data[1, ], col = \"red\")\n\n\n"} {"package":"grove","topic":"PlotFun","snippet":"### Name: PlotFun\n### Title: Function to plot the denoised signal\n### Aliases: PlotFun\n\n### ** Examples\n\ndata <- wavethresh::DJ.EX(n = 512, noisy = TRUE, rsnr = 5)$doppler\nW <- DWT(data)\nans <- Denoise(W)\ndenoised.data <- InvDWT(ans)\nPlotFun(denoised.data)\nPlotFun(denoised.data, band.type = \"both\")\n\n\n"} {"package":"grove","topic":"PlotStates","snippet":"### Name: PlotStates\n### Title: Function to plot the hidden states\n### Aliases: PlotStates\n\n### ** Examples\n\n## Not run: \n##D data <- GenerateSyntheticAnova(st.dev = 5, n.replicates = 5)\n##D W <- DWT(data$noisy.Y)\n##D X <- data$X\n##D ans <- FAnova(W, X, ~ 1 + factorA + factorB)\n##D PlotStates(ans)\n##D PlotStates(ans, block = \"factorA\")\n##D PlotStates(ans, block = \"factorB\")\n## End(Not run)\n\n\n"} {"package":"bst","topic":"bst","snippet":"### Name: bst\n### Title: Boosting for Classification and Regression\n### Aliases: bst print.bst predict.bst plot.bst coef.bst fpartial.bst\n### Keywords: classification\n\n### ** Examples\n\nx <- matrix(rnorm(100*5),ncol=5)\nc <- 2*x[,1]\np <- exp(c)/(exp(c)+exp(-c))\ny <- rbinom(100,1,p)\ny[y != 1] <- -1\nx <- as.data.frame(x)\ndat.m <- bst(x, y, ctrl = bst_control(mstop=50), family = \"hinge\", learner = \"ls\")\npredict(dat.m)\ndat.m1 <- bst(x, y, ctrl = bst_control(twinboost=TRUE, \ncoefir=coef(dat.m), xselect.init = dat.m$xselect, mstop=50))\ndat.m2 <- rbst(x, y, ctrl = bst_control(mstop=50, s=0, trace=TRUE), \nrfamily = \"thinge\", learner = \"ls\")\npredict(dat.m2)\n\n\n"} {"package":"bst","topic":"bst.sel","snippet":"### Name: bst.sel\n### Title: Function to select number of predictors\n### Aliases: bst.sel\n### Keywords: models regression\n\n### ** Examples\n\n## Not run: \n##D x <- matrix(rnorm(100*100), nrow = 100, ncol = 100)\n##D y <- x[,1] * 2 + x[,2] * 2.5 + rnorm(100)\n##D sel <- bst.sel(x, y, q=10)\n##D library(\"hdi\")\n##D fit.multi <- hdi(x, y, method = \"multi.split\",\n##D model.selector =bst.sel,\n##D args.model.selector=list(type=\"firstq\", q=10))\n##D fit.multi\n##D fit.multi$pval[1:10] ## the first 10 p-values\n##D fit.multi <- hdi(x, y, method = \"multi.split\",\n##D model.selector =bst.sel,\n##D args.model.selector=list(type=\"cv\"))\n##D fit.multi\n##D fit.multi$pval[1:10] ## the first 10 p-values\n## End(Not run)\n\n\n"} {"package":"bst","topic":"cv.bst","snippet":"### Name: cv.bst\n### Title: Cross-Validation for Boosting\n### Aliases: cv.bst\n\n### ** Examples\n\n## Not run: \n##D x <- matrix(rnorm(100*5),ncol=5)\n##D c <- 2*x[,1]\n##D p <- exp(c)/(exp(c)+exp(-c))\n##D y <- rbinom(100,1,p)\n##D y[y != 1] <- -1\n##D x <- as.data.frame(x)\n##D cv.bst(x, y, ctrl = bst_control(mstop=50), family = \"hinge\", learner = \"ls\", type=\"loss\")\n##D cv.bst(x, y, ctrl = bst_control(mstop=50), family = \"hinge\", learner = \"ls\", type=\"error\")\n##D dat.m <- bst(x, y, ctrl = bst_control(mstop=50), family = \"hinge\", learner = \"ls\")\n##D dat.m1 <- cv.bst(x, y, ctrl = bst_control(twinboost=TRUE, coefir=coef(dat.m), \n##D xselect.init = dat.m$xselect, mstop=50), family = \"hinge\", learner=\"ls\")\n## End(Not run)\n\n\n"} {"package":"bst","topic":"cv.rbst","snippet":"### Name: cv.rbst\n### Title: Cross-Validation for Nonconvex Loss Boosting\n### Aliases: cv.rbst\n\n### ** Examples\n\n## Not run: \n##D x <- matrix(rnorm(100*5),ncol=5)\n##D c <- 2*x[,1]\n##D p <- exp(c)/(exp(c)+exp(-c))\n##D y <- rbinom(100,1,p)\n##D y[y != 1] <- -1\n##D x <- as.data.frame(x)\n##D cv.rbst(x, y, ctrl = bst_control(mstop=50), rfamily = \"thinge\", learner = \"ls\", type=\"lose\")\n##D cv.rbst(x, y, ctrl = bst_control(mstop=50), rfamily = \"thinge\", learner = \"ls\", type=\"error\")\n##D dat.m <- rbst(x, y, ctrl = bst_control(mstop=50), rfamily = \"thinge\", learner = \"ls\")\n##D dat.m1 <- cv.rbst(x, y, ctrl = bst_control(twinboost=TRUE, coefir=coef(dat.m), \n##D xselect.init = dat.m$xselect, mstop=50), family = \"thinge\", learner=\"ls\")\n## End(Not run)\n\n\n"} {"package":"bst","topic":"ex1data","snippet":"### Name: ex1data\n### Title: Generating Three-class Data with 50 Predictors\n### Aliases: ex1data\n### Keywords: classification\n\n### ** Examples\n\n## Not run: \n##D dat <- ex1data(100, p=5)\n##D mhingebst(x=dat$x, y=dat$y)\n## End(Not run)\n\n\n"} {"package":"bst","topic":"mada","snippet":"### Name: mada\n### Title: Multi-class AdaBoost\n### Aliases: mada\n### Keywords: classification\n\n### ** Examples\n\ndata(iris)\nmada(xtr=iris[,-5], ytr=iris[,5])\n\n\n"} {"package":"bst","topic":"mbst","snippet":"### Name: mbst\n### Title: Boosting for Multi-Classification\n### Aliases: mbst print.mbst predict.mbst fpartial.mbst\n### Keywords: classification\n\n### ** Examples\n\nx <- matrix(rnorm(100*5),ncol=5)\nc <- quantile(x[,1], prob=c(0.33, 0.67))\ny <- rep(1, 100)\ny[x[,1] > c[1] & x[,1] < c[2] ] <- 2\ny[x[,1] > c[2]] <- 3\nx <- as.data.frame(x)\ndat.m <- mbst(x, y, ctrl = bst_control(mstop=50), family = \"hinge\", learner = \"ls\")\npredict(dat.m)\ndat.m1 <- mbst(x, y, ctrl = bst_control(twinboost=TRUE, \nf.init=predict(dat.m), xselect.init = dat.m$xselect, mstop=50))\ndat.m2 <- rmbst(x, y, ctrl = bst_control(mstop=50, s=1, trace=TRUE), \nrfamily = \"thinge\", learner = \"ls\")\npredict(dat.m2)\n\n\n"} {"package":"bst","topic":"mhingebst","snippet":"### Name: mhingebst\n### Title: Boosting for Multi-class Classification\n### Aliases: mhingebst print.mhingebst predict.mhingebst fpartial.mhingebst\n### Keywords: classification\n\n### ** Examples\n\n## Not run: \n##D dat <- ex1data(100, p=5)\n##D res <- mhingebst(x=dat$x, y=dat$y)\n## End(Not run)\n\n\n"} {"package":"bst","topic":"mhingeova","snippet":"### Name: mhingeova\n### Title: Multi-class HingeBoost\n### Aliases: mhingeova print.mhingeova\n### Keywords: classification\n\n### ** Examples\n\n## Not run: \n##D dat1 <- read.table(\"http://archive.ics.uci.edu/ml/machine-learning-databases/\n##D thyroid-disease/ann-train.data\")\n##D dat2 <- read.table(\"http://archive.ics.uci.edu/ml/machine-learning-databases/\n##D thyroid-disease/ann-test.data\")\n##D res <- mhingeova(xtr=dat1[,-22], ytr=dat1[,22], xte=dat2[,-22], yte=dat2[,22], \n##D cost=c(2/3, 0.5, 0.5), nu=0.5, learner=\"ls\", m1=100, K=5, cv1=FALSE, \n##D twinboost=TRUE, m2= 200, cv2=FALSE)\n##D res <- mhingeova(xtr=dat1[,-22], ytr=dat1[,22], xte=dat2[,-22], yte=dat2[,22], \n##D cost=c(2/3, 0.5, 0.5), nu=0.5, learner=\"ls\", m1=100, K=5, cv1=FALSE, \n##D twinboost=TRUE, m2= 200, cv2=TRUE)\n## End(Not run)\n\n\n"} {"package":"bst","topic":"rbst","snippet":"### Name: rbst\n### Title: Robust Boosting for Robust Loss Functions\n### Aliases: rbst\n### Keywords: classification\n\n### ** Examples\n\nx <- matrix(rnorm(100*5),ncol=5)\nc <- 2*x[,1]\np <- exp(c)/(exp(c)+exp(-c))\ny <- rbinom(100,1,p)\ny[y != 1] <- -1\ny[1:10] <- -y[1:10]\nx <- as.data.frame(x)\ndat.m <- bst(x, y, ctrl = bst_control(mstop=50), family = \"hinge\", learner = \"ls\")\npredict(dat.m)\ndat.m1 <- bst(x, y, ctrl = bst_control(twinboost=TRUE, \ncoefir=coef(dat.m), xselect.init = dat.m$xselect, mstop=50))\ndat.m2 <- rbst(x, y, ctrl = bst_control(mstop=50, s=0, trace=TRUE), \nrfamily = \"thinge\", learner = \"ls\")\npredict(dat.m2)\n\n\n"} {"package":"bst","topic":"rbstpath","snippet":"### Name: rbstpath\n### Title: Robust Boosting Path for Nonconvex Loss Functions\n### Aliases: rbstpath\n### Keywords: classification\n\n### ** Examples\n\nx <- matrix(rnorm(100*5),ncol=5)\nc <- 2*x[,1]\np <- exp(c)/(exp(c)+exp(-c))\ny <- rbinom(100,1,p)\ny[y != 1] <- -1\ny[1:10] <- -y[1:10]\nx <- as.data.frame(x)\ndat.m <- bst(x, y, ctrl = bst_control(mstop=50), family = \"hinge\", learner = \"ls\")\npredict(dat.m)\ndat.m1 <- bst(x, y, ctrl = bst_control(twinboost=TRUE, \ncoefir=coef(dat.m), xselect.init = dat.m$xselect, mstop=50))\ndat.m2 <- rbst(x, y, ctrl = bst_control(mstop=50, s=0, trace=TRUE), \nrfamily = \"thinge\", learner = \"ls\")\npredict(dat.m2)\nrmstop <- seq(10, 40, by=10)\ndat.m3 <- rbstpath(x, y, rmstop, ctrl=bst_control(s=0), rfamily = \"thinge\", learner = \"ls\")\n\n\n"} {"package":"bst","topic":"rmbst","snippet":"### Name: rmbst\n### Title: Robust Boosting for Multi-class Robust Loss Functions\n### Aliases: rmbst\n### Keywords: classification\n\n### ** Examples\n\nx <- matrix(rnorm(100*5),ncol=5)\nc <- quantile(x[,1], prob=c(0.33, 0.67))\ny <- rep(1, 100)\ny[x[,1] > c[1] & x[,1] < c[2] ] <- 2\ny[x[,1] > c[2]] <- 3\nx <- as.data.frame(x)\nx <- as.data.frame(x)\ndat.m <- mbst(x, y, ctrl = bst_control(mstop=50), family = \"hinge\", learner = \"ls\")\npredict(dat.m)\ndat.m1 <- mbst(x, y, ctrl = bst_control(twinboost=TRUE, \nf.init=predict(dat.m), xselect.init = dat.m$xselect, mstop=50))\ndat.m2 <- rmbst(x, y, ctrl = bst_control(mstop=50, s=1, trace=TRUE), \nrfamily = \"thinge\", learner = \"ls\")\npredict(dat.m2)\n\n\n"} {"package":"webdriver","topic":"key","snippet":"### Name: key\n### Title: Special keys, so that we can refer to them with an easier syntax\n### Aliases: key\n### Keywords: datasets\n\n### ** Examples\n\n## Not run: \n##D el$sendKeys(\"xyz\")\n##D el$sendKeys(\"x\", \"y\", \"z\")\n##D el$sendKeys(\"username\", key$enter, \"password\", key$enter)\n##D \n##D ## Sending CTRL+A\n##D el$sendKeys(key$control, \"a\")\n##D \n##D ## Note that modifier keys (control, alt, shift, meta) are sticky,\n##D ## they remain in effect in the rest of the sendKeys() call. E.g.\n##D ## this sends CTRL+X and CTRL+S\n##D el$sendKeys(key$control, \"x\", \"s\")\n##D \n##D ## You will need multiple calls to release control and send CTRL+X S\n##D el$sendKeys(key$control, \"x\")\n##D el$sendKeys(\"s\")\n## End(Not run)\n\n\n\n"} {"package":"RcmdrPlugin.EBM","topic":"fncEBMPostTest","snippet":"### Name: fncEBMPostTest\n### Title: Computes post test probability to have a dissease based on the\n### pre-test probability and the likelihood ratio of the diagnostic test\n### Aliases: fncEBMPostTest\n### Keywords: post-test probability\n\n### ** Examples\n\nfncEBMPostTest(.12, 5.7)\n\n\n"} {"package":"Rramas","topic":"as.tmatrix","snippet":"### Name: as.tmatrix\n### Title: Population Transition Matrix\n### Aliases: as.tmatrix plot.tmatrix print.tmatrix summary.tmatrix\n### plot.summary.tmatrix print.summary.tmatrix\n### Keywords: algebra models\n\n### ** Examples\n\n\n data(coryphanthaA)\n coryphanthaA\n coryphanthaA <- as.tmatrix(coryphanthaA)\n summary(coryphanthaA)\n plot(coryphanthaA) \n \n\n\n"} {"package":"Rramas","topic":"coryphanthaA","snippet":"### Name: coryphanthaA\n### Title: Transition Matrices of Three Coryphantha robbinsorum Populations\n### Aliases: coryphanthaA coryphanthaB coryphanthaC\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(coryphanthaA)\n\n\n\n"} {"package":"Rramas","topic":"decline","snippet":"### Name: decline\n### Title: Compute Declination and Explosion Probabilities\n### Aliases: decline explosion summary.rmas.risk plot.summary.rmas.risk\n### Keywords: models\n\n### ** Examples\n\n \n data(coryphanthaA)\n coryphanthaA <- as.tmatrix(coryphanthaA)\n #initial abundances:\n v0 <- c(100,0,0)\n \n # run 1000 simulations of 20 years with demographic stochasticity:\n simu20.ds <- projectn(v0=v0, mat=coryphanthaA, time = 20, estdem=TRUE, nrep=1000)\n \n # compute declination probabilities\n simu20.ds.dec <- decline(simu20.ds)\n \n summary(simu20.ds.dec)\n \n \n\n\n"} {"package":"Rramas","topic":"projectn","snippet":"### Name: projectn\n### Title: Demographic Projections\n### Aliases: projectn project1 estambi estdemo plot.rmas plot.summary.rmas\n### summary.rmas\n### Keywords: algebra models\n\n### ** Examples\n\n \n data(coryphanthaA)\n coryphanthaA <- as.tmatrix(coryphanthaA)\n \n # run a deterministic simulation of 20 years from an initial state of\n # 100 small juveniles:\n v0 <- c(100,0,0)\n simu20 <- projectn(v0=v0, mat=coryphanthaA, time = 20)\n plot(simu20, sum=FALSE)\n summary(simu20)\n \n # run 100 simulations of 20 years with demographic stochasticity:\n simu20.ds <- projectn(v0=v0, mat=coryphanthaA, time = 20, estdem=TRUE, nrep=100)\n plot(simu20.ds)\n summary(simu20.ds)\n \n # run 100 simulations of 20 years with demographic stochasticity but \n # assuming that the first row of the transition matrix represent both\n # fecundity and survival, each with a 50% contribution \n \n # first generate the stmat matrix:\n stmat <- (coryphanthaA >0)\n stmat <- stmat*c(0.5,0,0)\n stmat\n \n simu20.ds2 <- projectn(v0=v0, mat=coryphanthaA, time = 20, estdem=TRUE,\n stmat=stmat, nrep=100)\n plot(simu20.ds2)\n summary(simu20.ds2)\n \n \n # run 100 simulations of 20 years with both demographic and environmental\n # stochasticity:\n # first generate a sd matrix to describe environmental stochasticity:\n sdenv <- coryphanthaA/20 \n sdenv\n \n simu20.eds <- projectn(v0=v0, mat=coryphanthaA, matsd =sdenv, time = 20,\n estdem=TRUE,estamb=TRUE, nrep=100)\n plot(simu20.eds)\n summary(simu20.eds)\n \n # Example of management actions\n # each time step, 10 individuals will be added to the first stage ,10 individuals \n # will be added to the second stage, and 50 percent of the individuals in the \n # third stage will be extracted\n \n man <- c(10, 10, -0.5)\n p1 <- projectn(v0 = c(100, 100,100), mat= coryphanthaA, management=man)\n\t\n # summarize and plot population trajectory\n summary(p1) \n\t\n\t# summarizes and plots harvest history\n\tsummary(p1, harvest=TRUE) \n \n\n\n\n"} {"package":"representr","topic":"clust_proto_random","snippet":"### Name: clust_proto_random\n### Title: Prototype record from a cluster.\n### Aliases: clust_proto_random clust_proto_minimax maxmin_compare\n### within_category_compare random_compare\n\n### ** Examples\n\ndata(\"rl_reg1\")\n\nclusters <- split(rl_reg1, identity.rl_reg1)\nclust_proto_random(clusters[[1]])\n\n\nnot_clusters <- lapply(seq_along(clusters), function(x){\nif(nrow(clusters[[x]]) > 1)\n do.call(rbind, clusters[-x])\n})\nclust_proto_minimax(clusters[[1]], not_clusters[[1]], dist_binary)\n\n\n\n"} {"package":"representr","topic":"dist_binary","snippet":"### Name: dist_binary\n### Title: The distance between two records\n### Aliases: dist_binary dist_col_type_slow\n\n### ** Examples\n\ndata(\"rl_reg1\")\ndist_binary(rl_reg1[1,], rl_reg1[2,])\n\ntype <- c(\"string\", \"string\", \"numeric\", \"numeric\",\n \"numeric\", \"categorical\", \"ordinal\", \"numeric\", \"numeric\")\norder <- list(education = c(\"Less than a high school diploma\",\n \"High school graduates, no college\", \"Some college or associate degree\",\n \"Bachelor's degree only\", \"Advanced degree\"))\n\ndist_col_type_slow(rl_reg1[1,], rl_reg1[2,], col_type = type, order = order)\n\n\n\n"} {"package":"representr","topic":"emp_kl_div","snippet":"### Name: emp_kl_div\n### Title: Calculate the empirical KL divergence for a representative\n### dataset as compared to the true dataset\n### Aliases: emp_kl_div\n\n### ** Examples\n\n\ndata(\"rl_reg1\")\n\n## random prototyping\nrep_dat_random <- represent(rl_reg1, identity.rl_reg1, \"proto_random\", id = FALSE, parallel = FALSE)\n\n## empirical KL divergence\ncat_vars <- c(\"sex\")\nnum_vars <- c(\"income\", \"bp\")\nemp_kl_div(rl_reg1[unique(identity.rl_reg1), c(cat_vars, num_vars)],\n rep_dat_random[, c(cat_vars, num_vars)],\n cat_vars, num_vars)\n\n\n\n"} {"package":"representr","topic":"pp_weights","snippet":"### Name: pp_weights\n### Title: Get posterior weights for each record post record-linkage using\n### posterior prototyping.\n### Aliases: pp_weights\n\n### ** Examples\n\n\ndata(rl_reg1)\n\n# make a fake posterior distribution for the linkage\nm <- 10\nn <- nrow(rl_reg1)\npost_link <- matrix(sample(seq_len(n), n*m, replace = TRUE), nrow = m)\n\n# get the posterior prototyping weights\ncol_type <- c(\"string\", \"string\", \"numeric\", \"numeric\", \"numeric\", \"categorical\", \"ordinal\",\n \"numeric\", \"numeric\")\norders <- list(education = c(\"Less than a high school diploma\", \"High school graduates, no college\",\n \"Some college or associate degree\", \"Bachelor's degree only\", \"Advanced degree\"))\nweights <- c(.25, .25, .05, .05, .1, .15, .05, .05, .05)\n\n## No test: \npp_weight <- pp_weights(rl_reg1, post_link, \"proto_minimax\", distance = dist_col_type,\n col_type = col_type, weights = weights, orders = orders, scale = TRUE, parallel = FALSE)\n\n# threshold by posterior prototyping weights\nhead(rl_reg1[pp_weight > 0.5, ])\n## End(No test)\n\n\n\n"} {"package":"representr","topic":"represent","snippet":"### Name: represent\n### Title: Create a representative dataset post record-linkage.\n### Aliases: represent\n\n### ** Examples\n\n\ndata(\"rl_reg1\")\n\n## random prototyping\nrep_dat_random <- represent(rl_reg1, identity.rl_reg1, \"proto_random\", id = FALSE, parallel = FALSE)\nhead(rep_dat_random)\n\n## minimax prototyping\ncol_type <- c(\"string\", \"string\", \"numeric\", \"numeric\", \"numeric\", \"categorical\", \"ordinal\",\n \"numeric\", \"numeric\")\norders <- list(education = c(\"Less than a high school diploma\", \"High school graduates, no college\",\n \"Some college or associate degree\", \"Bachelor's degree only\", \"Advanced degree\"))\nweights <- c(.25, .25, .05, .05, .1, .15, .05, .05, .05)\nrep_dat_minimax <- represent(rl_reg1, identity.rl_reg1, \"proto_minimax\", id = FALSE,\n distance = dist_col_type, col_type = col_type, weights = weights, orders = orders,\n scale = TRUE, parallel = FALSE)\nhead(rep_dat_minimax)\n\n## Not run: \n##D ## with alternative tie breaker\n##D rep_dat_minimax <- represent(rl_reg1, identity.rl_reg1, \"proto_minimax\", id = FALSE,\n##D distance = dist_col_type, col_type = col_type, weights = weights, orders = orders,\n##D ties_fn = \"maxmin_compare\", scale = TRUE, parallel = FALSE)\n##D head(rep_dat_minimax)\n##D \n##D rep_dat_minimax <- represent(rl_reg1, identity.rl_reg1, \"proto_minimax\", id = FALSE,\n##D distance = dist_col_type, col_type = col_type, weights = weights, orders = orders,\n##D ties_fn = \"within_category_compare_cpp\", scale = TRUE, parallel = FALSE)\n##D head(rep_dat_minimax)\n##D \n##D ## composite prototyping\n##D rep_dat_composite <- represent(rl_reg1, identity.rl_reg1, \"composite\",\n##D col_type = col_type, parallel = FALSE)\n##D head(rep_dat_composite)\n## End(Not run)\n\n\n\n"} {"package":"UPG","topic":"UPG.Diag","snippet":"### Name: UPG.Diag\n### Title: MCMC Diagnostics for 'UPG.Probit', 'UPG.Logit', 'UPG.MNL' and\n### 'UPG.Binomial' objects using 'coda'\n### Aliases: UPG.Diag\n\n### ** Examples\n\n## No test: \n# estimate a probit model using example data\nlibrary(UPG)\ndata(lfp)\ny = lfp[,1]\nX = lfp[,-1]\nresults.probit = UPG(y = y, X = X, model = \"probit\")\n\n# compute MCMC diagnostics\nUPG.Diag(results.probit)\n## End(No test)\n\n\n\n"} {"package":"UPG","topic":"UPG","snippet":"### Name: UPG\n### Title: Efficient MCMC Samplers for Bayesian probit regression and\n### various logistic regression models\n### Aliases: UPG\n\n### ** Examples\n\n## No test: \n# load package\nlibrary(UPG)\n\n# estimate a probit model using example data\n# warning: use more burn-ins, burnin = 100 is just used for demonstration purposes\ndata(lfp)\ny = lfp[,1]\nX = lfp[,-1]\nresults.probit = UPG(y = y, X = X, model = \"probit\", burnin = 100)\n\n# estimate a logit model using example data\n# warning: use more burn-ins, burnin = 100 is just used for demonstration purposes\ndata(lfp)\ny = lfp[,1]\nX = lfp[,-1]\nresults.logit = UPG(y = y, X = X, model = \"logit\", burnin = 100)\n\n# estimate a MNL model using example data\n# warning: use more burn-ins, burnin = 100 is just used for demonstration purposes\ndata(program)\ny = program[,1]\nX = program[,-1]\nresults.mnl = UPG(y = y, X = X, model = \"mnl\", burnin = 100)\n\n# estimate a binomial logit model using example data\n# warning: use more burn-ins, burnin = 100 is just used for demonstration purposes\ndata(titanic)\ny = titanic[,1]\nNi = titanic[,2]\nX = titanic[,-c(1,2)]\nresults.binomial = UPG(y = y, X = X, Ni = Ni, model = \"binomial\", burnin = 100)\n## End(No test)\n\n\n\n"} {"package":"UPG","topic":"coef.UPG.Binomial","snippet":"### Name: coef.UPG.Binomial\n### Title: Extract coefficients from UPG.Binomial objects\n### Aliases: coef.UPG.Binomial\n\n### ** Examples\n\n## No test: \n# estimate a binomial logit model using example data\nlibrary(UPG)\ndata(titanic)\ny = titanic[,1]\nNi = titanic[,2]\nX = titanic[,-c(1,2)]\nresults.binomial = UPG(y = y, X = X, Ni = Ni, model = \"binomial\")\n\n# extract posterior means and credible interval based on 0.025 and 0.975 quantiles\ncoef(results.binomial, q = c(0.025, 0.975))\n## End(No test)\n\n\n"} {"package":"UPG","topic":"coef.UPG.Logit","snippet":"### Name: coef.UPG.Logit\n### Title: Extract coefficients from UPG.Logit objects\n### Aliases: coef.UPG.Logit\n\n### ** Examples\n\n## No test: \n# estimate a logit model using example data\nlibrary(UPG)\ndata(lfp)\ny = lfp[,1]\nX = lfp[,-1]\nresults.logit = UPG(y = y, X = X, model = \"logit\")\n\n# extract posterior means and credible interval based on 0.025 and 0.975 quantiles\ncoef(results.logit, q = c(0.025, 0.975))\n## End(No test)\n\n\n"} {"package":"UPG","topic":"coef.UPG.MNL","snippet":"### Name: coef.UPG.MNL\n### Title: Extract coefficients from UPG.MNL objects\n### Aliases: coef.UPG.MNL\n\n### ** Examples\n\n## No test: \n# estimate a multinomial logit model using example data\nlibrary(UPG)\ndata(program)\ny = program[,1]\nX = program[,-1]\nresults.mnl = UPG(y = y, X = X, model = \"mnl\")\n\n# extract posterior means and credible interval based on 0.025 and 0.975 quantiles\ncoef(results.mnl, q = c(0.025, 0.975))\n## End(No test)\n\n\n"} {"package":"UPG","topic":"coef.UPG.Probit","snippet":"### Name: coef.UPG.Probit\n### Title: Extract coefficients from UPG.Probit objects\n### Aliases: coef.UPG.Probit\n\n### ** Examples\n\n## No test: \n# estimate a probit model using example data\nlibrary(UPG)\ndata(lfp)\ny = lfp[,1]\nX = lfp[,-1]\nresults.probit = UPG(y = y, X = X, model = \"probit\")\n\n# extract posterior means and credible interval based on 0.025 and 0.975 quantiles\ncoef(results.probit, q = c(0.025, 0.975))\n## End(No test)\n\n\n"} {"package":"UPG","topic":"logLik.UPG.Binomial","snippet":"### Name: logLik.UPG.Binomial\n### Title: Compute log-likelihoods from UPG.Binomial objects\n### Aliases: logLik.UPG.Binomial\n\n### ** Examples\n\n## No test: \n# estimate a binomial logit model using example data\nlibrary(UPG)\ndata(titanic)\ny = titanic[,1]\nNi = titanic[,2]\nX = titanic[,-c(1,2)]\nresults.binomial = UPG(y = y, X = X, Ni = Ni, model = \"binomial\")\n\n# extract log-likelihood\nll.binomial = logLik(results.binomial)\n\n## End(No test)\n\n\n"} {"package":"UPG","topic":"logLik.UPG.Logit","snippet":"### Name: logLik.UPG.Logit\n### Title: Compute log-likelihoods from UPG.Logit objects\n### Aliases: logLik.UPG.Logit\n\n### ** Examples\n\n## No test: \n# estimate a logit model using example data\nlibrary(UPG)\ndata(lfp)\ny = lfp[,1]\nX = lfp[,-1]\nresults.logit = UPG(y = y, X = X, model = \"logit\")\n\n# extract log-likelihood\nll.logit = logLik(results.logit)\n\n## End(No test)\n\n\n"} {"package":"UPG","topic":"logLik.UPG.MNL","snippet":"### Name: logLik.UPG.MNL\n### Title: Compute log-likelihoods from UPG.MNL objects\n### Aliases: logLik.UPG.MNL\n\n### ** Examples\n\n## No test: \n# estimate a multinomial logit model using example data\nlibrary(UPG)\ndata(program)\ny = program[,1]\nX = program[,-1]\nresults.mnl = UPG(y = y, X = X, model = \"mnl\")\n\n# extract log-likelihood\nll.mnl = logLik(results.mnl)\n\n## End(No test)\n\n\n"} {"package":"UPG","topic":"logLik.UPG.Probit","snippet":"### Name: logLik.UPG.Probit\n### Title: Compute log-likelihoods from UPG.Probit objects\n### Aliases: logLik.UPG.Probit\n\n### ** Examples\n\n## No test: \n# estimate a probit model using example data\nlibrary(UPG)\ndata(lfp)\ny = lfp[,1]\nX = lfp[,-1]\nresults.probit = UPG(y = y, X = X, model = \"probit\")\n\n# extract log-likelihood\nll.probit = logLik(results.probit)\n\n## End(No test)\n\n\n"} {"package":"UPG","topic":"plot.UPG.Binomial","snippet":"### Name: plot.UPG.Binomial\n### Title: Coefficient plots for UPG.Binomial objects\n### Aliases: plot.UPG.Binomial\n\n### ** Examples\n\n## No test: \n# estimate a binomial logit model using example data\nlibrary(UPG)\ndata(titanic)\ny = titanic[,1]\nNi = titanic[,2]\nX = titanic[,-c(1,2)]\nresults.binomial = UPG(y = y, X = X, Ni = Ni, model = \"binomial\")\n\n# plot the results and sort coefficients by effect size\nplot(results.binomial, sort = TRUE)\n\n# plot only variables 1 and 3 with custom names, credible intervals and axis labels\nplot(results.binomial,\n include = c(1,3),\n names = c(\"Custom 1\", \"Custom 2\"),\n q = c(0.1, 0.9),\n xlab = c(\"Custom X\"),\n ylab = c(\"Custom Y\"))\n## End(No test)\n\n\n"} {"package":"UPG","topic":"plot.UPG.Logit","snippet":"### Name: plot.UPG.Logit\n### Title: Coefficient plots for UPG.Logit objects\n### Aliases: plot.UPG.Logit\n\n### ** Examples\n\n## No test: \n# estimate a logit model using example data\nlibrary(UPG)\ndata(lfp)\ny = lfp[,1]\nX = lfp[,-1]\nresults.logit = UPG(y = y, X = X, model = \"logit\")\n\n# plot the results and sort coefficients by effect size\nplot(results.logit, sort = TRUE)\n\n# plot only variables 1 and 3 with custom names, credible intervals and axis labels\nplot(results.logit,\n include = c(1,3),\n names = c(\"Custom 1\", \"Custom 2\"),\n q = c(0.1, 0.9),\n xlab = c(\"Custom X\"),\n ylab = c(\"Custom Y\"))\n## End(No test)\n\n\n"} {"package":"UPG","topic":"plot.UPG.MNL","snippet":"### Name: plot.UPG.MNL\n### Title: Coefficient plots for UPG.MNL objects\n### Aliases: plot.UPG.MNL\n\n### ** Examples\n\n## No test: \n# estimate a multinomial logit model using example data\nlibrary(UPG)\ndata(program)\ny = program[,1]\nX = program[,-1]\nresults.mnl = UPG(y = y, X = X, model = \"mnl\")\n\n# plot the results and sort coefficients by average effect size\nplot(results.mnl, sort = TRUE)\n\n# plot only variables 1 and 3 with custom group and variable names\n# also, customize credible intervals and axis labels\nplot(results.mnl,\n include = c(1,3),\n names = c(\"Custom 1\", \"Custom 2\"),\n groups = c(\"Alpha\", \"Beta\"),\n q = c(0.1, 0.9),\n xlab = c(\"Custom X\"),\n ylab = c(\"Custom Y\"))\n## End(No test)\n\n\n"} {"package":"UPG","topic":"plot.UPG.Probit","snippet":"### Name: plot.UPG.Probit\n### Title: Coefficient plots for UPG.Probit objects\n### Aliases: plot.UPG.Probit\n\n### ** Examples\n\n## No test: \n# estimate a probit model using example data\nlibrary(UPG)\ndata(lfp)\ny = lfp[,1]\nX = lfp[,-1]\nresults.probit = UPG(y = y, X = X, model = \"probit\")\n\n# plot the results and sort coefficients by effect size\nplot(results.probit, sort = TRUE)\n\n# plot only variables 1 and 3 with custom names, credible intervals and axis labels\nplot(results.probit,\n include = c(1, 3),\n names = c(\"Custom 1\", \"Custom 2\"),\n q = c(0.1, 0.9),\n xlab = c(\"Custom X\"),\n ylab = c(\"Custom Y\"))\n## End(No test)\n\n\n"} {"package":"UPG","topic":"predict.UPG.Binomial","snippet":"### Name: predict.UPG.Binomial\n### Title: Predicted probabilities from UPG.Binomial objects\n### Aliases: predict.UPG.Binomial\n\n### ** Examples\n\n## No test: \n# estimate a binomial logit model using example data\nlibrary(UPG)\ndata(titanic)\ny = titanic[,1]\nNi = titanic[,2]\nX = titanic[,-c(1,2)]\nresults.binomial = UPG(y = y, X = X, Ni = Ni, model = \"binomial\")\n\n# extract predicted probabilities\npredict(results.binomial)\n## End(No test)\n\n\n"} {"package":"UPG","topic":"predict.UPG.Logit","snippet":"### Name: predict.UPG.Logit\n### Title: Predicted probabilities from UPG.Logit objects\n### Aliases: predict.UPG.Logit\n\n### ** Examples\n\n## No test: \n# estimate a logit model using example data\nlibrary(UPG)\ndata(lfp)\ny = lfp[,1]\nX = lfp[,-1]\nresults.logit = UPG(y = y, X = X, model = \"logit\")\n\n# extract predicted probabilities\npredict(results.logit)\n## End(No test)\n\n\n"} {"package":"UPG","topic":"predict.UPG.MNL","snippet":"### Name: predict.UPG.MNL\n### Title: Predicted probabilities from UPG.MNL objects\n### Aliases: predict.UPG.MNL\n\n### ** Examples\n\n## No test: \n# estimate a multinomial logit model using example data\nlibrary(UPG)\ndata(program)\ny = program[,1]\nX = program[,-1]\nresults.mnl = UPG(y = y, X = X, model = \"mnl\")\n\n# extract predicted probabilities\npredict(results.mnl)\n## End(No test)\n\n\n"} {"package":"UPG","topic":"predict.UPG.Probit","snippet":"### Name: predict.UPG.Probit\n### Title: Predicted probabilities from UPG.Probit objects\n### Aliases: predict.UPG.Probit\n\n### ** Examples\n\n## No test: \n# estimate a probit model using example data\nlibrary(UPG)\ndata(lfp)\ny = lfp[,1]\nX = lfp[,-1]\nresults.probit = UPG(y = y, X = X, model = \"probit\")\n\n# extract predicted probabilities\npredict(results.probit)\n## End(No test)\n\n\n"} {"package":"UPG","topic":"print.UPG.Binomial","snippet":"### Name: print.UPG.Binomial\n### Title: Print information for UPG.Binomial objects\n### Aliases: print.UPG.Binomial\n\n### ** Examples\n\n## No test: \n# estimate a binomial logit model using example data\nlibrary(UPG)\ndata(titanic)\ny = titanic[,1]\nNi = titanic[,2]\nX = titanic[,-c(1,2)]\nresults.binomial = UPG(y = y, X = X, Ni = Ni, model = \"binomial\")\nprint(results.binomial)\n## End(No test)\n\n\n"} {"package":"UPG","topic":"print.UPG.Logit","snippet":"### Name: print.UPG.Logit\n### Title: Print information for UPG.Logit objects\n### Aliases: print.UPG.Logit\n\n### ** Examples\n\n## No test: \n# estimate a logit model using example data\nlibrary(UPG)\ndata(lfp)\ny = lfp[,1]\nX = lfp[,-1]\nresults.logit = UPG(y = y, X = X, model = \"logit\")\nprint(results.logit)\n## End(No test)\n\n\n"} {"package":"UPG","topic":"print.UPG.MNL","snippet":"### Name: print.UPG.MNL\n### Title: Print information for UPG.MNL objects\n### Aliases: print.UPG.MNL\n\n### ** Examples\n\n## No test: \n# estimate a multinomial logit model using example data\nlibrary(UPG)\ndata(program)\ny = program[,1]\nX = program[,-1]\nresults.mnl = UPG(y = y, X = X, model = \"mnl\")\nprint(results.mnl)\n## End(No test)\n\n\n"} {"package":"UPG","topic":"print.UPG.Probit","snippet":"### Name: print.UPG.Probit\n### Title: Print information for UPG.Probit objects\n### Aliases: print.UPG.Probit\n\n### ** Examples\n\n## No test: \n# estimate a probit model using example data\nlibrary(UPG)\ndata(lfp)\ny = lfp[,1]\nX = lfp[,-1]\nresults.probit = UPG(y = y, X = X, model = \"probit\")\nprint(results.probit)\n## End(No test)\n\n\n"} {"package":"UPG","topic":"summary.UPG.Binomial","snippet":"### Name: summary.UPG.Binomial\n### Title: Estimation results and tables for UPG.Binomial objects\n### Aliases: summary.UPG.Binomial\n\n### ** Examples\n\n## No test: \n# estimate a binomial logit model using example data\nlibrary(UPG)\ndata(titanic)\ny = titanic[,1]\nNi = titanic[,2]\nX = titanic[,-c(1,2)]\nresults.binomial = UPG(y = y, X = X, Ni = Ni, model = \"binomial\")\n\n# basic summary of regression results\nsummary(results.binomial)\n\n# generate a LaTeX table with subset of variables and custom names\nsummary(results.binomial,\n include=c(1,3),\n names=c(\"V. kept 1\", \"V. kept 3\"),\n table=\"latex\")\n## End(No test)\n\n\n"} {"package":"UPG","topic":"summary.UPG.Logit","snippet":"### Name: summary.UPG.Logit\n### Title: Estimation results and tables for UPG.Logit objects\n### Aliases: summary.UPG.Logit\n\n### ** Examples\n\n## No test: \n# estimate a logit model using example data\nlibrary(UPG)\ndata(lfp)\ny = lfp[,1]\nX = lfp[,-1]\nresults.logit = UPG(y = y, X = X, model = \"logit\")\n\n# basic summary of regression results\nsummary(results.logit)\n\n# generate a LaTeX table with subset of variables and custom names\nsummary(results.logit,\n include=c(1,3),\n names=c(\"V. kept 1\", \"V. kept 3\"),\n table=\"latex\")\n## End(No test)\n\n\n"} {"package":"UPG","topic":"summary.UPG.MNL","snippet":"### Name: summary.UPG.MNL\n### Title: Estimation results and tables for UPG.MNL objects\n### Aliases: summary.UPG.MNL\n\n### ** Examples\n\n## No test: \n# estimate a multinomial logit model using example data\nlibrary(UPG)\ndata(program)\ny = program[,1]\nX = program[,-1]\nresults.mnl = UPG(y = y, X = X, model = \"mnl\")\n\n# basic summary of regression results\nsummary(results.mnl)\n\n# generate a LaTeX table with subset of variables and custom names\nsummary(results.mnl,\n include=c(1,3),\n groups=c(\"Alpha\",\"Beta\"),\n names=c(\"V. kept 1\", \"V. kept 3\"),\n table=\"latex\")\n## End(No test)\n\n\n"} {"package":"UPG","topic":"summary.UPG.Probit","snippet":"### Name: summary.UPG.Probit\n### Title: Estimation result summary and tables for UPG.Probit objects\n### Aliases: summary.UPG.Probit\n\n### ** Examples\n\n## No test: \n# estimate a probit model using example data\nlibrary(UPG)\ndata(lfp)\ny = lfp[,1]\nX = lfp[,-1]\nresults.probit = UPG(y = y, X = X, model = \"probit\")\n\n# basic summary of regression results\nsummary(results.probit)\n\n# generate a LaTeX table with subset of variables and custom names\nsummary(results.probit,\n include=c(1,3),\n names=c(\"V. kept 1\", \"V. kept 3\"),\n table=\"latex\")\n## End(No test)\n\n\n"} {"package":"nlts","topic":"add.test","snippet":"### Name: add.test\n### Title: Lagrange multiplier test for additivity in a timeseries\n### Aliases: add.test\n### Keywords: ts\n\n### ** Examples\n\n\n data(plodia)\n add.test(sqrt(plodia), order = 3)\n\n\n"} {"package":"nlts","topic":"contingency.periodogram","snippet":"### Name: contingency.periodogram\n### Title: The contingency periodogram for periodicity in categorical time\n### series\n### Aliases: contingency.periodogram\n### Keywords: ts\n\n### ** Examples\n\n data(plodia)\n data<-as.factor((scale(plodia) > 0))\n fit <- contingency.periodogram(data, maxper = 9) \n ## Not run: plot(fit)\n\n\n"} {"package":"nlts","topic":"lin.order.cls","snippet":"### Name: lin.order.cls\n### Title: The order of a time series using cross-validation of the linear\n### autoregressive model (conditional least-squares).\n### Aliases: lin.order.cls\n### Keywords: ts\n\n### ** Examples\n\n\n data(plodia)\n fit <- lin.order.cls(sqrt(plodia), order=1:5)\n ## Not run: plot(fit)\n summary(fit)\n\n\n"} {"package":"nlts","topic":"lin.test","snippet":"### Name: lin.test\n### Title: A Tukey one-degree-of-freedom test for linearity in time series.\n### Aliases: lin.test\n### Keywords: ts\n\n### ** Examples\n\n\n data(plodia)\n lin.test(sqrt(plodia), order = 3)\n\n\n"} {"package":"nlts","topic":"ll.edm","snippet":"### Name: ll.edm\n### Title: Nonlinear forecasting of local polynomial 'empirical dynamic\n### model'.\n### Aliases: ll.edm\n### Keywords: ts\n\n### ** Examples\n\n\n data(plodia)\n\n sim1 <- ll.edm(sqrt(plodia), order=2, bandwidth = 1.5) \n\n\n"} {"package":"nlts","topic":"ll.order","snippet":"### Name: ll.order\n### Title: Consistent nonlinear estimate of the order using local\n### polynomial regression.\n### Aliases: ll.order\n### Keywords: ts\n\n### ** Examples\n\n\n data(plodia)\n\n fit <- ll.order(sqrt(plodia), order=1:3, bandwidth\n = seq(0.5, 1.5, by = 0.5)) \n\n ## Not run: plot(fit)\n\n summary(fit)\n\n\n\n"} {"package":"nlts","topic":"portman.Q","snippet":"### Name: portman.Q\n### Title: Ljung-Box test for whiteness in a time series.\n### Aliases: portman.Q\n### Keywords: ts\n\n### ** Examples\n\n\n data(plodia)\n\n portman.Q(sqrt(plodia), K = 10) \n\n fit <- ar(sqrt(plodia)) \n portman.Q(na.omit(fit$resid), K = 10) \n\n\n"} {"package":"nlts","topic":"prediction.profile.ll","snippet":"### Name: prediction.profile.ll\n### Title: Nonlinear forecasting at varying lags using local polynomial\n### regression.\n### Aliases: prediction.profile.ll print.ppll\n### Keywords: ts\n\n### ** Examples\n\n\n data(plodia)\n\n fit <- prediction.profile.ll(sqrt(plodia), step=1:3, order=1:3,\n bandwidth = seq(0.5, 1.5, by = 0.5))\n\n ## Not run: plot(fit)\n\n\n"} {"package":"nlts","topic":"spec.lomb","snippet":"### Name: spec.lomb\n### Title: The Lomb periodogram for unevenly sampled data\n### Aliases: spec.lomb\n### Keywords: ts\n\n### ** Examples\n\n\n data(plodia)\n\n y <- sqrt(plodia)\n x <- 1:length(y) \n\n #make some missing values\n y[10:19] <- NA; x[10:19] <- NA \n #omit NAs\n y <- na.omit(y); x <- na.omit(x) \n\n #the lomb p'gram\n fit <- spec.lomb(y, x) \n summary(fit)\n ## Not run: plot(fit)\n\n\n"} {"package":"nlts","topic":"specar.ci","snippet":"### Name: specar.ci\n### Title: Confidence interval for the ar-spectrum and the dominant period.\n### Aliases: specar.ci\n### Keywords: ts\n\n### ** Examples\n\n\n data(plodia)\n\n\n fit <- specar.ci(sqrt(plodia), order=3, resamp=10) \n\n ## Not run: plot(fit, period=FALSE)\n\n summary(fit)\n\n\n"} {"package":"svenssonm","topic":"con_ta","snippet":"### Name: con_ta\n### Title: Contingency Table Generation\n### Aliases: con_ta\n\n### ** Examples\n\nx <- c (1:5,5:1)\ny <- c(1:5,1,1,5,4,1)\ncon_ta(x,y,)\n\n\n"} {"package":"svenssonm","topic":"indichange","snippet":"### Name: indichange\n### Title: Individual Change\n### Aliases: indichange rv rvse iv ralpha pralpha\n\n### ** Examples\n\nx <- c (1:5,5:1)\ny <- c(1:5,1,1,5,4,1)\nz <- con_ta(x,y,)\nrv(z)\nrvse(z)\niv(z)\nralpha(z)\npralpha(z)\n\n\n"} {"package":"svenssonm","topic":"pa","snippet":"### Name: pa\n### Title: Percentage Agreement\n### Aliases: pa\n\n### ** Examples\n\nx <- c (1:5,5:1)\ny <- c(1:5,1,1,5,4,1)\nz <- con_ta(x,y,)\npa(z)\n\n\n"} {"package":"svenssonm","topic":"sresult","snippet":"### Name: sresult\n### Title: Summary for Svensson's Method\n### Aliases: sresult\n\n### ** Examples\n\nx <- c (1:5,5:1)\ny <- c(1:5,1,1,5,4,1)\nz <- con_ta(x,y,)\nsresult(z)\n\n\n"} {"package":"svenssonm","topic":"syschange","snippet":"### Name: syschange\n### Title: Systematic Change\n### Aliases: syschange rp rpse rc rcse\n\n### ** Examples\n\nx <- c (1:5,5:1)\ny <- c(1:5,1,1,5,4,1)\nz <- con_ta(x,y,)\nrp(z)\nrpse(z)\nrc(z)\nrcse(z)\n\n\n"} {"package":"hierSDR","topic":"angle","snippet":"### Name: angle\n### Title: Angle between two subspaces\n### Aliases: angle\n\n### ** Examples\n\n\n## case where any relation between b1 and b2 is random\nb1 <- matrix(rnorm(10 * 2), ncol = 2)\nb2 <- matrix(rnorm(10 * 2), ncol = 2)\nangle(b1, b2)\n\n## angle here should be small\nb1 <- matrix(rnorm(10 * 2), ncol = 2)\nb2 <- b1 + matrix(rnorm(10 * 2, sd = 0.2), ncol = 2)\nangle(b1, b2)\n\n\n"} {"package":"hierSDR","topic":"hier.phd.nt","snippet":"### Name: hier.phd.nt\n### Title: Main hierarchical SDR fitting function\n### Aliases: hier.phd.nt\n\n### ** Examples\n\n\nlibrary(hierSDR)\n\n\n\n"} {"package":"hierSDR","topic":"hier.sphd","snippet":"### Name: hier.sphd\n### Title: Main hierarchical sufficient dimension reduction fitting\n### function\n### Aliases: hier.sphd\n\n### ** Examples\n\n\nlibrary(hierSDR)\n\nset.seed(123)\ndat <- simulate_data(nobs = 200, nvars = 6,\n x.type = \"some_categorical\",\n sd.y = 1, model = 2)\n\nx <- dat$x ## covariates\nz <- dat$z ## factor indicators\ny <- dat$y ## response\n\ndat$beta ## true coefficients that generate the subspaces\n\ndat$z.combinations ## what combinations of z represent different subpops\n\n## correct structural dimensions:\ndat$d.correct\n\n## fit hier SPHD model:\n\n## No test: \nhiermod <- hier.sphd(x, y, z, dat$z.combinations, d = dat$d.correct,\n verbose = FALSE, maxit = 250, maxk = 8200)\n\n## validated inf criterion for choosing dimensions (the smaller the better)\nhiermod$vic\n\n\ncbind(hiermod$beta[[4]], NA, dat$beta[[4]])\n\n## angles between estimated and true subspaces for each population:\nmapply(function(x,y) angle(x,y), hiermod$beta, dat$beta)\n\n## projection difference norm between estimated and true subspaces for each population:\nmapply(function(x,y) projnorm(x,y), hiermod$beta, dat$beta)\n## End(No test)\n\n\n\n\n"} {"package":"hierSDR","topic":"plot.hier_sdr_fit","snippet":"### Name: plot.hier_sdr_fit\n### Title: Plotting hierarchical SDR models\n### Aliases: plot.hier_sdr_fit\n\n### ** Examples\n\n\nlibrary(hierSDR)\n\n\n\n"} {"package":"hierSDR","topic":"projnorm","snippet":"### Name: projnorm\n### Title: Norm of difference of projections\n### Aliases: projnorm\n\n### ** Examples\n\nb1 <- matrix(rnorm(10 * 2), ncol = 2)\nb2 <- matrix(rnorm(10 * 2), ncol = 2)\nprojnorm(b1, b2)\n\n## angle here should be smalls\nb1 <- matrix(rnorm(10 * 2), ncol = 2)\nb2 <- b1 + matrix(rnorm(10 * 2, sd = 0.2), ncol = 2)\nprojnorm(b1, b2)\n\n\n"} {"package":"hierSDR","topic":"simulate_data","snippet":"### Name: simulate_data\n### Title: Simulate data with hierarchical subspaces\n### Aliases: simulate_data\n\n### ** Examples\n\n\nlibrary(hierSDR)\n\nset.seed(123)\ndat <- simulate_data(nobs = 100, nvars = 6,\n x.type = \"some_categorical\",\n sd.y = 1, model = 2)\n\nx <- dat$x ## covariates\nz <- dat$z ## factor indicators\ny <- dat$y ## response\n\ndat$beta ## true coefficients that generate the subspaces\n\ndat$snr ## signal-to-noise ratio\n\nstr(x)\nstr(z)\n\ndat$z.combinations ## what combinations of z represent different subpops\n\n## correct structural dimensions:\ndat$d.correct\n\n\n\n\n"} {"package":"junctions","topic":"calc_k","snippet":"### Name: calc_k\n### Title: Calculate the limit of the number of junctions\n### Aliases: calc_k\n### Keywords: junctions\n\n### ** Examples\n\nk <- calc_k(N = 100, R = 1000, H_0 = 0.5, C = 1)\n\n\n"} {"package":"junctions","topic":"calculate_mat","snippet":"### Name: calculate_mat\n### Title: Function to calculate the maximum accurate time\n### Aliases: calculate_mat\n### Keywords: analytic error time\n\n### ** Examples\n\ncalculate_mat(N = Inf, R = 1000, H_0 = 0.5, C = 1)\n\n\n"} {"package":"junctions","topic":"estimate_time","snippet":"### Name: estimate_time\n### Title: Estimate the time since the onset of hybridization, using the\n### number of junctions\n### Aliases: estimate_time\n\n### ** Examples\n\ncat(\"example calculate time\")\nJ <- number_of_junctions(N = 100, R = 1000, H_0 = 0.5, C = 1, t = 200)\nestimate_time(J = J, N = 100, R = 1000, H_0 = 0.5, C = 1)\n# should be 200 again\n\n\n"} {"package":"junctions","topic":"estimate_time_one_chrom","snippet":"### Name: estimate_time_one_chrom\n### Title: Estimate the time since the onset of hybridization, using the\n### observed number of junctions, taking into account the distribution of\n### markers on a single chromosome\n### Aliases: estimate_time_one_chrom\n\n### ** Examples\n\ncat(\"example estimate time one chrom\")\nmarkers <- seq(from = 0, to = 1, length.out = 100)\nJ <- number_of_junctions_markers(N = 100, H_0 = 0.5, t = 200,\nmarker_distribution = markers)\nestimate_time_one_chrom(J = J,\n N = 100,\n H_0 = 0.5,\n marker_distribution = markers) #should be 200 again\n\n\n"} {"package":"junctions","topic":"number_of_junctions","snippet":"### Name: number_of_junctions\n### Title: Calculate the average number of junctions\n### Aliases: number_of_junctions\n\n### ** Examples\n\njt <- number_of_junctions(N = 100, R = 1000, H_0 = 0.5, C = 1, t = 1000)\njt2 <- number_of_junctions(N = 100, R = 1000, H_0 = 0.5, C = 1, t = 0:1000)\n\n\n"} {"package":"junctions","topic":"number_of_junctions_backcross","snippet":"### Name: number_of_junctions_backcross\n### Title: Calculate the average number of junctions during backcrossing\n### Aliases: number_of_junctions_backcross\n\n### ** Examples\n\ncat(\"example number of junctions backcross\")\njt <- number_of_junctions_backcross(H_0 = 0.1, C = 1, t = 5)\n\n\n"} {"package":"junctions","topic":"number_of_junctions_di","snippet":"### Name: number_of_junctions_di\n### Title: Calculate the expected number of junctions between two markers\n### separated by a given amount of recombination\n### Aliases: number_of_junctions_di\n\n### ** Examples\n\nnumber_of_junctions_di(N = 100, H_0 = 0.5, t = 1000, di = 0.01)+\n\n\n"} {"package":"junctions","topic":"number_of_junctions_markers","snippet":"### Name: number_of_junctions_markers\n### Title: Calculate the expected total number of junctions in a\n### chromosome, given the distribution of markers\n### Aliases: number_of_junctions_markers\n\n### ** Examples\n\nmarkers <- seq(from = 0, to = 1, length.out = 1000)\njt <- number_of_junctions_markers(N = 100,\n H_0 = 0.5,\n t = 1000,\n marker_distribution = markers)\nrandom_markers <- sort(runif(1000, 0, 1))\njt2 <- number_of_junctions_markers(N = 100,\n H_0 = 0.5,\n t = 1000,\n marker_distribution = random_markers)\n\n\n"} {"package":"junctions","topic":"sim_backcrossing","snippet":"### Name: sim_backcrossing\n### Title: Function to simulate data using a back crossing scheme\n### Aliases: sim_backcrossing\n\n### ** Examples\n\nsim_backcrossing(population_size = 100,\n total_runtime = 5,\n size_in_morgan = 1, number_of_markers = 100, seed = 6,\n time_points = 1:5)\n\n\n"} {"package":"junctions","topic":"sim_fin_chrom","snippet":"### Name: sim_fin_chrom\n### Title: Individual Based Simulation of the accumulation of junctions\n### Aliases: sim_fin_chrom\n\n### ** Examples\n\ncat(\"example sim_fin_chrom\")\nsim_fin_chrom(pop_size = 100, freq_ancestor_1 = 0.5,\n total_runtime = 10, morgan = 1, seed = 42,\n R = 100)\n\n\n"} {"package":"junctions","topic":"sim_inf_chrom","snippet":"### Name: sim_inf_chrom\n### Title: Individual Based Simulation of the accumulation of junctions\n### Aliases: sim_inf_chrom\n\n### ** Examples\n\ncat(\"example sim inf chrom\")\nv <- sim_inf_chrom(pop_size = 100, freq_ancestor_1 = 0.5,\n total_runtime = 10, morgan = 1, markers = 100,\n seed = 42)\nplot(v$avgJunctions, type = \"l\", xlab = \"Generations\",\nylab = \"Number of Junctions\", main = \"Example Infinite Chromosome\")\nlines(v$detectedJunctions, col = \"blue\")\nlegend(\"bottomright\", c(\"Real number\",\"Number detected\"),\n lty = 1, col = c(\"black\", \"blue\"))\n\n\n"} {"package":"junctions","topic":"sim_phased_unphased","snippet":"### Name: sim_phased_unphased\n### Title: Individual Based Simulation of the accumulation of junctions\n### Aliases: sim_phased_unphased\n\n### ** Examples\n\n## Not run: \n##D sim_phased_unphased(pop_size = 100, freq_ancestor_1 = 0.5,\n##D total_runtime = 10, size_in_morgan = 1,\n##D markers = 10, time_points = c(0, 5, 10),\n##D num_threads = 1)\n## End(Not run)\n\n\n"} {"package":"shapley","topic":"h2o.get_ids","snippet":"### Name: h2o.get_ids\n### Title: h2o.get_ids\n### Aliases: h2o.get_ids\n\n### ** Examples\n\n\n## Not run: \n##D library(h2o)\n##D h2o.init(ignore_config = TRUE, nthreads = 2, bind_to_localhost = FALSE, insecure = TRUE)\n##D prostate_path <- system.file(\"extdata\", \"prostate.csv\", package = \"h2o\")\n##D prostate <- h2o.importFile(path = prostate_path, header = TRUE)\n##D y <- \"CAPSULE\"\n##D prostate[,y] <- as.factor(prostate[,y]) #convert to factor for classification\n##D aml <- h2o.automl(y = y, training_frame = prostate, max_runtime_secs = 30)\n##D \n##D # get the model IDs\n##D ids <- h2o.ids(aml)\n## End(Not run)\n\n\n"} {"package":"shapley","topic":"shapley","snippet":"### Name: shapley\n### Title: Weighted average of SHAP values and weighted SHAP confidence\n### intervals for a grid of fine-tuned models or base-learners of a\n### stacked ensemble model\n### Aliases: shapley\n\n### ** Examples\n\n\n## Not run: \n##D # load the required libraries for building the base-learners and the ensemble models\n##D library(h2o) #shapley supports h2o models\n##D library(shapley)\n##D \n##D # initiate the h2o server\n##D h2o.init(ignore_config = TRUE, nthreads = 2, bind_to_localhost = FALSE, insecure = TRUE)\n##D \n##D # upload data to h2o cloud\n##D prostate_path <- system.file(\"extdata\", \"prostate.csv\", package = \"h2o\")\n##D prostate <- h2o.importFile(path = prostate_path, header = TRUE)\n##D \n##D set.seed(10)\n##D \n##D ### H2O provides 2 types of grid search for tuning the models, which are\n##D ### AutoML and Grid. Below, I demonstrate how weighted mean shapley values\n##D ### can be computed for both types.\n##D \n##D #######################################################\n##D ### PREPARE AutoML Grid (takes a couple of minutes)\n##D #######################################################\n##D # run AutoML to tune various models (GBM) for 60 seconds\n##D y <- \"CAPSULE\"\n##D prostate[,y] <- as.factor(prostate[,y]) #convert to factor for classification\n##D aml <- h2o.automl(y = y, training_frame = prostate, max_runtime_secs = 120,\n##D include_algos=c(\"GBM\"),\n##D \n##D # this setting ensures the models are comparable for building a meta learner\n##D seed = 2023, nfolds = 10,\n##D keep_cross_validation_predictions = TRUE)\n##D \n##D ### call 'shapley' function to compute the weighted mean and weighted confidence intervals\n##D ### of SHAP values across all trained models.\n##D ### Note that the 'newdata' should be the testing dataset!\n##D result <- shapley(models = aml, newdata = prostate, plot = TRUE)\n##D \n##D #######################################################\n##D ### PREPARE H2O Grid (takes a couple of minutes)\n##D #######################################################\n##D # make sure equal number of \"nfolds\" is specified for different grids\n##D grid <- h2o.grid(algorithm = \"gbm\", y = y, training_frame = prostate,\n##D hyper_params = list(ntrees = seq(1,50,1)),\n##D grid_id = \"ensemble_grid\",\n##D \n##D # this setting ensures the models are comparable for building a meta learner\n##D seed = 2023, fold_assignment = \"Modulo\", nfolds = 10,\n##D keep_cross_validation_predictions = TRUE)\n##D \n##D result2 <- shapley(models = grid, newdata = prostate, plot = TRUE)\n##D \n##D #######################################################\n##D ### PREPARE autoEnsemble STACKED ENSEMBLE MODEL\n##D #######################################################\n##D \n##D ### get the models' IDs from the AutoML and grid searches.\n##D ### this is all that is needed before building the ensemble,\n##D ### i.e., to specify the model IDs that should be evaluated.\n##D library(autoEnsemble)\n##D ids <- c(h2o.get_ids(aml), h2o.get_ids(grid))\n##D autoSearch <- ensemble(models = ids, training_frame = prostate, strategy = \"search\")\n##D result3 <- shapley(models = autoSearch, newdata = prostate, plot = TRUE)\n##D \n##D \n## End(Not run)\n\n\n"} {"package":"shapley","topic":"shapley.plot","snippet":"### Name: shapley.plot\n### Title: Plot weighted SHAP contributions\n### Aliases: shapley.plot\n\n### ** Examples\n\n\n## Not run: \n##D # load the required libraries for building the base-learners and the ensemble models\n##D library(h2o) #shapley supports h2o models\n##D library(shapley)\n##D \n##D # initiate the h2o server\n##D h2o.init(ignore_config = TRUE, nthreads = 2, bind_to_localhost = FALSE, insecure = TRUE)\n##D \n##D # upload data to h2o cloud\n##D prostate_path <- system.file(\"extdata\", \"prostate.csv\", package = \"h2o\")\n##D prostate <- h2o.importFile(path = prostate_path, header = TRUE)\n##D \n##D ### H2O provides 2 types of grid search for tuning the models, which are\n##D ### AutoML and Grid. Below, I demonstrate how weighted mean shapley values\n##D ### can be computed for both types.\n##D \n##D set.seed(10)\n##D \n##D #######################################################\n##D ### PREPARE AutoML Grid (takes a couple of minutes)\n##D #######################################################\n##D # run AutoML to tune various models (GBM) for 60 seconds\n##D y <- \"CAPSULE\"\n##D prostate[,y] <- as.factor(prostate[,y]) #convert to factor for classification\n##D aml <- h2o.automl(y = y, training_frame = prostate, max_runtime_secs = 120,\n##D include_algos=c(\"GBM\"),\n##D \n##D # this setting ensures the models are comparable for building a meta learner\n##D seed = 2023, nfolds = 10,\n##D keep_cross_validation_predictions = TRUE)\n##D \n##D ### call 'shapley' function to compute the weighted mean and weighted confidence intervals\n##D ### of SHAP values across all trained models.\n##D ### Note that the 'newdata' should be the testing dataset!\n##D result <- shapley(models = aml, newdata = prostate, plot = TRUE)\n##D \n##D #######################################################\n##D ### PLOT THE WEIGHTED MEAN SHAP VALUES\n##D #######################################################\n##D \n##D shapley.plot(result, plot = \"bar\")\n##D shapley.plot(result, plot = \"waffle\")\n## End(Not run)\n\n\n"} {"package":"shapley","topic":"shapley.test","snippet":"### Name: shapley.test\n### Title: Normalize a vector based on specified minimum and maximum values\n### Aliases: shapley.test\n\n### ** Examples\n\n\n## Not run: \n##D # load the required libraries for building the base-learners and the ensemble models\n##D library(h2o) #shapley supports h2o models\n##D library(autoEnsemble) #autoEnsemble models, particularly useful under severe class imbalance\n##D library(shapley)\n##D \n##D # initiate the h2o server\n##D h2o.init(ignore_config = TRUE, nthreads = 2, bind_to_localhost = FALSE, insecure = TRUE)\n##D \n##D # upload data to h2o cloud\n##D prostate_path <- system.file(\"extdata\", \"prostate.csv\", package = \"h2o\")\n##D prostate <- h2o.importFile(path = prostate_path, header = TRUE)\n##D \n##D ### H2O provides 2 types of grid search for tuning the models, which are\n##D ### AutoML and Grid. Below, I demonstrate how weighted mean shapley values\n##D ### can be computed for both types.\n##D \n##D set.seed(10)\n##D \n##D #######################################################\n##D ### PREPARE AutoML Grid (takes a couple of minutes)\n##D #######################################################\n##D # run AutoML to tune various models (GBM) for 60 seconds\n##D y <- \"CAPSULE\"\n##D prostate[,y] <- as.factor(prostate[,y]) #convert to factor for classification\n##D aml <- h2o.automl(y = y, training_frame = prostate, max_runtime_secs = 120,\n##D include_algos=c(\"GBM\"),\n##D \n##D # this setting ensures the models are comparable for building a meta learner\n##D seed = 2023, nfolds = 10,\n##D keep_cross_validation_predictions = TRUE)\n##D \n##D ### call 'shapley' function to compute the weighted mean and weighted confidence intervals\n##D ### of SHAP values across all trained models.\n##D ### Note that the 'newdata' should be the testing dataset!\n##D result <- shapley(models = aml, newdata = prostate, plot = TRUE)\n##D \n##D #######################################################\n##D ### Significance testing of contributions of two features\n##D #######################################################\n##D \n##D shapley.test(result, features = c(\"GLEASON\", \"PSA\"), n=5000)\n## End(Not run)\n\n\n"} {"package":"shapley","topic":"shapley.top","snippet":"### Name: shapley.top\n### Title: Select top features in a model\n### Aliases: shapley.top\n\n### ** Examples\n\n\n## Not run: \n##D # load the required libraries for building the base-learners and the ensemble models\n##D library(h2o) #shapley supports h2o models\n##D library(shapley)\n##D \n##D # initiate the h2o server\n##D h2o.init(ignore_config = TRUE, nthreads = 2, bind_to_localhost = FALSE, insecure = TRUE)\n##D \n##D # upload data to h2o cloud\n##D prostate_path <- system.file(\"extdata\", \"prostate.csv\", package = \"h2o\")\n##D prostate <- h2o.importFile(path = prostate_path, header = TRUE)\n##D \n##D ### H2O provides 2 types of grid search for tuning the models, which are\n##D ### AutoML and Grid. Below, I demonstrate how weighted mean shapley values\n##D ### can be computed for both types.\n##D \n##D set.seed(10)\n##D \n##D #######################################################\n##D ### PREPARE AutoML Grid (takes a couple of minutes)\n##D #######################################################\n##D # run AutoML to tune various models (GBM) for 60 seconds\n##D y <- \"CAPSULE\"\n##D prostate[,y] <- as.factor(prostate[,y]) #convert to factor for classification\n##D aml <- h2o.automl(y = y, training_frame = prostate, max_runtime_secs = 120,\n##D include_algos=c(\"GBM\"),\n##D \n##D # this setting ensures the models are comparable for building a meta learner\n##D seed = 2023, nfolds = 10,\n##D keep_cross_validation_predictions = TRUE)\n##D \n##D ### call 'shapley' function to compute the weighted mean and weighted confidence intervals\n##D ### of SHAP values across all trained models.\n##D ### Note that the 'newdata' should be the testing dataset!\n##D result <- shapley(models = aml, newdata = prostate, plot = TRUE)\n##D \n##D #######################################################\n##D ### Significance testing of contributions of two features\n##D #######################################################\n##D \n##D shapley.top(result, lowerci = 0.01, shapratio = 0.005)\n## End(Not run)\n\n\n"} {"package":"socialranking","topic":"L1Scores","snippet":"### Name: L1Scores\n### Title: L1 Ranking\n### Aliases: L1Scores L1Ranking lexcel1Scores lexcel1Ranking\n\n### ** Examples\n\npr <- as.PowerRelation(\"(123 ~ 13 ~ 2) > (12 ~ 1 ~ 3) > (23 ~ {})\")\nscores <- L1Scores(pr)\nscores$`1`\n# [,1] [,2] [,3]\n# [1,] 0 1 0\n# [2,] 1 1 0\n# [3,] 1 0 0\n\nL1Ranking(pr)\n# 2 > 1 > 3\n\n\n\n"} {"package":"socialranking","topic":"L2Scores","snippet":"### Name: L2Scores\n### Title: L2 Ranking\n### Aliases: L2Scores L2Ranking lexcel2Scores lexcel2Ranking\n\n### ** Examples\n\npr <- as.PowerRelation(\"123 ~ 12 ~ 13 ~ 14 ~ 2 ~ 4\")\npr <- appendMissingCoalitions(pr)\nscores <- L2Scores(pr)\nscores$`1`\n# [,1] [,2]\n# [1,] 0 1\n# [2,] 3 0\n# [3,] 1 2\n# [3,] 0 1\n\nL2Ranking(pr)\n# 1 > 2 > 4 > 3\n\nL1Ranking(pr)\n# 2 > 4 > 1 > 3\n\n\n\n"} {"package":"socialranking","topic":"LPSScores","snippet":"### Name: LPSScores\n### Title: LP* Ranking\n### Aliases: LPSScores LPSRanking lexcelPSScores lexcelPSRanking\n\n### ** Examples\n\npr <- as.PowerRelation(\"(123 ~ 12 ~ 2) > (13 ~ 23) > (1 ~ 3 ~ {})\")\nscores <- LPSScores(pr)\nscores$`1`\n# [,1] [,2]\n# [1,] 1 1\n# [2,] 1 0\n\nscores$`2`\n#\n# [1,]\n# [2,]\n\nLPSRanking(pr)\n# 2 > 1 > 3\n\n\n\n"} {"package":"socialranking","topic":"LPScores","snippet":"### Name: LPScores\n### Title: LP Ranking\n### Aliases: LPScores LPRanking lexcelPScores lexcelPRanking\n\n### ** Examples\n\npr <- as.PowerRelation(\"(123 ~ 13 ~ 2) > (12 ~ 1 ~ 3) > (23 ~ {})\")\nscores <- LPScores(pr)\nscores$`2`\n# [1] 1 0 0\n\nLPRanking(pr)\n# 2 > 1 ~ 3\n\n# Since L^(1) also the relation {1,2}, which ranks above {2,3}, it will place 1 above 3\nL1Ranking(pr)\n# 2 > 1 > 3\n\n\n\n"} {"package":"socialranking","topic":"PowerRelation","snippet":"### Name: PowerRelation\n### Title: PowerRelation object\n### Aliases: PowerRelation is.PowerRelation print.PowerRelation\n\n### ** Examples\n\npr <- PowerRelation(list(\n list(c(1,2,3)),\n list(c(1, 2), 2, 3),\n list(c(2, 3), c()),\n list(c(1, 3)),\n list(1)\n))\n\npr\n# 123 > (12 ~ 2 ~ 3) > (23 ~ {}) > 13 > 1\n\nstopifnot(pr$elements == 1:3)\nstopifnot(pr$coalitionLookup(1) == 5)\nstopifnot(pr$coalitionLookup(c()) == 3)\nstopifnot(pr$coalitionLookup(c(1,2)) == 2)\n\n# find coalitions an element appears in\nfor(t in pr$elementLookup(2)) {\n stopifnot(2 %in% pr$eqs[[t[1]]][[t[2]]])\n}\n\n# use createPowerset to help generate a valid function call\nif(interactive())\n createPowerset(letters[1:3], result = \"copy\")\n\n# pasted, rearranged using alt+up / alt+down in RStudio\n\n# note that the function call looks different if elements are multiple characters long\nif(interactive())\n createPowerset(c(\"apple\", \"banana\", \"chocolate\"), result = \"copy\")\n\n# pasted clipboard\nPowerRelation(rlang::list2(\n list(c(\"banana\", \"chocolate\")),\n list(c(\"apple\"),\n c(\"chocolate\")),\n list(c(\"banana\")),\n list(c()),\n list(c(\"apple\", \"banana\", \"chocolate\"),\n c(\"apple\", \"banana\"),\n c(\"apple\", \"chocolate\")),\n))\n# {banana, chocolate} > ({apple} ~ {chocolate}) > {banana} > {} > ...\n\n\n\n"} {"package":"socialranking","topic":"SocialRanking","snippet":"### Name: SocialRanking\n### Title: 'SocialRanking' object\n### Aliases: SocialRanking\n\n### ** Examples\n\nSocialRanking(list(c(\"a\", \"b\"), \"f\", c(\"c\", \"d\")))\n# a ~ b > f > c ~ d\n\n\n\n"} {"package":"socialranking","topic":"appendMissingCoalitions","snippet":"### Name: appendMissingCoalitions\n### Title: Append missing coalitions\n### Aliases: appendMissingCoalitions\n\n### ** Examples\n\npr <- as.PowerRelation(list(c(1,2), 3))\n# 12 > 3\n\nappendMissingCoalitions(pr)\n# 12 > 3 > (123 ~ 13 ~ 23 ~ 1 ~ 2 ~ {})\n\nappendMissingCoalitions(pr, includeEmptySet = FALSE)\n# 12 > 3 > (123 ~ 13 ~ 23 ~ 1 ~ 2)\n\n\n\n"} {"package":"socialranking","topic":"as.PowerRelation","snippet":"### Name: as.PowerRelation\n### Title: Create PowerRelation object\n### Aliases: as.PowerRelation as.PowerRelation.character\n### as.PowerRelation.list\n\n### ** Examples\n\n# Using character strings\nas.PowerRelation(\"abc > ab > ({} ~ c) > (a ~ b ~ ac) > bc\")\n# abc > ab > ({} ~ c) > (a ~ b ~ ac) > bc\n\n# using createPowerset(), then shifting coalitions up and down using Alt+Up and Alt+Down\nif(interactive()) {\n createPowerset(1:2, result = \"copy\")\n}\nas.PowerRelation(\"\n 12\n > 1\n ~ {}\n > 2\n\")\n\n# Using lists\nas.PowerRelation(list(c(1,2), 2, c(), 1))\n# 12 > 2 > {} > 1\n\nas.PowerRelation(list(c(1,2), 2, c(), 1), comparators = c(\"~\", \">\", \">\"))\n# (12 ~ 2) > {} > 1\n\n# the length of comparators doesn't necessarily matter.\n# If comparators are missing, the existing ones are simply repeated...\nas.PowerRelation(list(c(1,2), 2, c(), 1), comparators = \"~\")\n# (12 ~ 2 ~ {} ~ 1)\n\nas.PowerRelation(list(c(1,2), 2, c(), 1), comparators = c(\"~\", \">\"))\n# (12 ~ 2) > ({} ~ 1)\n\n# ... or the rest is cut off\nas.PowerRelation(list(c(1,2), 2, c(), 1), comparators = c(\"~\", \">\", \"~\", \"~\", \">\"))\n# (12 ~ 2) > ({} ~ 1)\n\n\n"} {"package":"socialranking","topic":"coalitionsAreIndifferent","snippet":"### Name: coalitionsAreIndifferent\n### Title: Are coalitions indifferent\n### Aliases: coalitionsAreIndifferent\n\n### ** Examples\n\npr <- PowerRelation(list(list(c(1,2)), list(1, 2)))\n\nstopifnot(coalitionsAreIndifferent(pr, c(1,2), c(1)) == FALSE)\nstopifnot(coalitionsAreIndifferent(pr, 2, 1) == TRUE)\n\n# Note that it doesn't fail with non-existing power relations\nstopifnot(coalitionsAreIndifferent(pr, 1, c()) == FALSE)\nstopifnot(coalitionsAreIndifferent(pr, 3, c(1,2,3)) == TRUE)\n\n\n\n"} {"package":"socialranking","topic":"copelandScores","snippet":"### Name: copelandScores\n### Title: Copeland-like method\n### Aliases: copelandScores copelandRanking\n\n### ** Examples\n\n# (123 ~ 12 ~ 3 ~ 1) > (2 ~ 23) > 13\npr <- PowerRelation(list(\n list(c(1,2,3), c(1,2), 3, 1),\n list(c(2,3), 2),\n list(c(1,3))\n))\n\ncopelandScores(pr)\n# `1` = c(2, -1)\n# `2` = c(2, -2)\n# `3` = c(1, -2)\n\n# only calculate results for two elements\ncopelandScores(pr, c(1,3))\n# `1` = c(2, -1)\n# `3` = c(1, -2)\n\n# or just one element\ncopelandScores(pr, 2)\n# `2` = c(2, -2)\n\n# 1 > 2 > 3\ncopelandRanking(pr)\n\n\n\n"} {"package":"socialranking","topic":"cpMajorityComparison","snippet":"### Name: cpMajorityComparison\n### Title: CP-Majority relation\n### Aliases: cpMajorityComparison cpMajorityComparisonScore\n\n### ** Examples\n\npr <- as.PowerRelation(\"ac > (a ~ b) > (c ~ bc)\")\n\nscores <- cpMajorityComparison(pr, \"a\", \"b\")\nscores\n# a > b\n# D_ab = {c, {}}\n# D_ba = {{}}\n# Score of a = 2\n# Score of b = 1\n\nstopifnot(scores$e1$name == \"a\")\nstopifnot(scores$e2$name == \"b\")\nstopifnot(scores$e1$score == 2)\nstopifnot(scores$e2$score == 1)\nstopifnot(scores$e1$score == length(scores$e1$winningCoalitions))\nstopifnot(scores$e2$score == length(scores$e2$winningCoalitions))\n\n# get tuples with coalitions S in 2^(N - {i,j})\nemptySetTuple <- Filter(function(x) identical(x$coalition, c()), scores$tuples)[[1]]\nplayerCTuple <- Filter(function(x) identical(x$coalition, \"c\"), scores$tuples)[[1]]\n\n# because {}u{a} ~ {}u{b}, there is no winner\nstopifnot(is.null(emptySetTuple$winner))\nstopifnot(emptySetTuple$e1 == emptySetTuple$e2)\n\n# because {c}u{a} > {c}u{b}, player \"a\" gets the score\nstopifnot(playerCTuple$winner == \"a\")\nstopifnot(playerCTuple$e1 < playerCTuple$e2)\nstopifnot(playerCTuple$e1 == 1L)\nstopifnot(playerCTuple$e2 == 3L)\n\ncpMajorityComparisonScore(pr, \"a\", \"b\") # c(1,0)\ncpMajorityComparisonScore(pr, \"b\", \"a\") # c(0,-1)\n\n\n\n"} {"package":"socialranking","topic":"createPowerset","snippet":"### Name: createPowerset\n### Title: Create powerset\n### Aliases: createPowerset\n\n### ** Examples\n\n# normal return type is a list of vectors\ncreatePowerset(c(\"Alice\", \"Bob\"), includeEmptySet = FALSE)\n## [[1]]\n## [1] \"Alice\" \"Bob\"\n##\n## [[2]]\n## [1] \"Alice\"\n##\n## [[3]]\n## [1] \"Bob\"\n\n# instead of creating a list, print the power set such that it can be copy-pasted\n# and used to create a new PowerRelation object\ncreatePowerset(letters[1:4], result = \"print\")\n# prints\n# as.PowerRelation(\"\n# abcd\n# > abc\n# > abd\n# > acd\n# > bcd\n# > ab\n# ...\n# > {}\n# \")\n\ncreatePowerset(letters[1:3], includeEmptySet = FALSE, result = \"printCompact\")\n# as.PowerRelation(\"abc > ab > ac > bc > a > b > c\")\n\n# create the same string as before, but now copy it to the clipboard instead\nif(interactive()) {\n createPowerset(1:3, result = \"copyCompact\")\n}\n\n# Note that as.PowerRelation(character) only assumes single-char elements.\n# As such, the generated function call string with multi-character names\n# looks a little different.\ncreatePowerset(c(\"Alice\", \"Bob\"), result = \"print\")\n# PowerRelation(rlang::list2(\n# list(c(\"Alice\", \"Bob\")),\n# list(c(\"Alice\")),\n# list(c(\"Bob\")),\n# list(c()),\n# ))\n\n\n\n"} {"package":"socialranking","topic":"cumulativeScores","snippet":"### Name: cumulativeScores\n### Title: Cumulative scores\n### Aliases: cumulativeScores cumulativelyDominates\n\n### ** Examples\n\npr <- as.PowerRelation(\"12 > 1 > 2\")\n\n# `1`: c(1, 2, 2)\n# `2`: c(1, 1, 2)\ncumulativeScores(pr)\n\n# calculate for selected number of elements\ncumulativeScores(pr, c(2))\n\n# TRUE\nd1 <- cumulativelyDominates(pr, 1, 2)\n\n# TRUE\nd2 <- cumulativelyDominates(pr, 1, 1)\n\n# FALSE\nd3 <- cumulativelyDominates(pr, 1, 1, strictly = TRUE)\n\nstopifnot(all(d1, d2, !d3))\n\n\n\n"} {"package":"socialranking","topic":"doRanking","snippet":"### Name: doRanking\n### Title: Create a 'SocialRanking' object\n### Aliases: doRanking\n\n### ** Examples\n\ndoRanking(c(a=1,b=2))\n# b > a\n\ndoRanking(c(a=2,b=2))\n# a ~ b\n\n# a custom ranking function. Here, we implement the following ranking solution:\n# disregard any big coalitions and only rank elements based on their individual performances\n# iRj if and only if {i} >= {j}\nsingletonRanking <- function(pr) {\n scores <- sapply(pr$elements, equivalenceClassIndex, powerRelation = pr)\n # note that coalitions in higher indexed equivalence classes are less preferable\n # hence, scores should be sorted in an increasing order\n doRanking(scores, decreasing = FALSE)\n}\n\npr <- as.PowerRelation(\"abc > ab > ac > b ~ c ~ bc > a\")\nsingletonRanking(pr)\n# b ~ c > a\n\n# a reverse lexcel ranking, where vectors are compared right to left\n# here, we introduce a compare function. It returns:\n# * 0, if a and b are identical\n# * a positive value, if a[i] > b[i] and every value after that is equal\n# * a negative value, if a[i] < b[i] and every value after that is equal\nreverseLexcelCompare <- function(a, b) {\n i <- which(a != b) |> rev()\n if(length(i) == 0) 0\n else a[i[1]] - b[i[1]]\n}\n\nscores <- unclass(cumulativeScores(pr))\n\n# R cannot natively sort a class. Instead:\n# Method 1 - utilize the compare parameter\ndoRanking(scores, compare = reverseLexcelCompare)\n\n\n# Method 2 - introduce S3 class\n`[.RevLex` <- function(x, i, ...) structure(unclass(x)[i], class = \"RevLex\")\n`==.RevLex` <- function(a, b) reverseLexcelCompare(a[[1]],b[[1]]) == 0\n`>.RevLex` <- function(a, b) reverseLexcelCompare(a[[1]],b[[1]]) > 0\nis.na.RevLex <- function(x) FALSE\ndoRanking(structure(scores, class = \"RevLex\"))\n\nstopifnot(\n doRanking(scores, compare = reverseLexcelCompare) ==\n doRanking(structure(scores, class = \"RevLex\"))\n)\n\n\n\n"} {"package":"socialranking","topic":"dominates","snippet":"### Name: dominates\n### Title: Dominance\n### Aliases: dominates\n\n### ** Examples\n\npr <- as.PowerRelation(\"12 > 1 > 2\")\n\n# TRUE\nd1 <- dominates(pr, 1, 2)\n\n# FALSE\nd2 <- dominates(pr, 2, 1)\n\n# TRUE (because it's not strict dominance)\nd3 <- dominates(pr, 1, 1)\n\n# FALSE\nd4 <- dominates(pr, 1, 1, strictly = TRUE)\n\nstopifnot(all(d1, !d2, d3, !d4))\n\n\n\n"} {"package":"socialranking","topic":"elementLookup","snippet":"### Name: elementLookup\n### Title: Element lookup\n### Aliases: elementLookup\n\n### ** Examples\n\npr <- as.PowerRelation(\"12 > 2 ~ 1\")\n\nl <- elementLookup(pr, 1)\nl\n# (1,1), (2,2)\n\nsapply(l, function(tuple) 1 %in% pr$eqs[[tuple[1]]][[tuple[2]]]) |> all() |> stopifnot()\n\n# if element does not exist, it returns NULL\nelementLookup(pr, 3) |> is.null() |> stopifnot()\n\n\n\n"} {"package":"socialranking","topic":"equivalenceClassIndex","snippet":"### Name: equivalenceClassIndex\n### Title: Get index of equivalence class containing a coalition\n### Aliases: equivalenceClassIndex coalitionLookup\n\n### ** Examples\n\npr <- as.PowerRelation(\"12 > 2 ~ 1\")\n\n(e1 <- equivalenceClassIndex(pr, c(1, 2)))\n# 1\n\n(e2 <- equivalenceClassIndex(pr, c(1)))\n# 2\n\n(e3 <- equivalenceClassIndex(pr, c(2)))\n# 2\n\n(e4 <- equivalenceClassIndex(pr, c()))\n# NULL <- empty set does not exist\n\nstopifnot(all(c(e1,e2,e3,e4) == c(1,2,2)))\n\n\n\n"} {"package":"socialranking","topic":"generateNextPartition","snippet":"### Name: generateNextPartition\n### Title: Next partition\n### Aliases: generateNextPartition\n\n### ** Examples\n\ncoalitions <- createPowerset(c('a','b'), includeEmptySet = FALSE)\n# list(c('a','b'), 'a', 'b')\n\ngen <- powerRelationGenerator(coalitions)\ngen()\n# (ab ~ a ~ b)\n\ngen()\n# (ab ~ b) > a\n\n# skipping partition of size two, where the first partition has\n# 2 coalitions and the second partition has 1 coalition\ngen <- generateNextPartition(gen)\ngen()\n# ab > (a ~ b)\n\n# only remaining partition is one of size 3, wherein each\n# equivalence class is of size 1\ngen <- generateNextPartition(gen)\ngen()\n# ab > a > b\n\n# went through all partitions, it will only generate NULL now\ngen <- generateNextPartition(gen)\nstopifnot(is.null(gen()))\n\n\n\n"} {"package":"socialranking","topic":"kramerSimpsonScores","snippet":"### Name: kramerSimpsonScores\n### Title: Kramer-Simpson-like method\n### Aliases: kramerSimpsonScores kramerSimpsonRanking\n\n### ** Examples\n\n# 2 > (1 ~ 3) > 12 > (13 ~ 23) > {} > 123\npr <- as.PowerRelation(\"2 > (1~3) > 12 > (13~23) > {} > 123\")\n\n# get scores for all elements\n# cpMajorityComparisonScore(pr, 2, 1, strictly = TRUE)[1] = 1\n# cpMajorityComparisonScore(pr, 3, 1, strictly = TRUE)[1] = 0\n# therefore the Kramer-Simpson-Score for element\n# `1` = -max(0, 1) = -1\n#\n# Score analogous for the other elements\n# `2` = 0\n# `3` = -2\nkramerSimpsonScores(pr)\n\n# get scores for two elements\n# `1` = 1\n# `3` = 2\nkramerSimpsonScores(pr, c(1,3))\n\n# or single element\n# result is still a list\nkramerSimpsonScores(pr, 2)\n\n# 2 > 1 > 3\nkramerSimpsonRanking(pr)\n\n\n\n"} {"package":"socialranking","topic":"lexcelScores","snippet":"### Name: lexcelScores\n### Title: Lexicographical Excellence\n### Aliases: lexcelScores lexcelRanking dualLexcelRanking\n\n### ** Examples\n\n# note that the coalition {1} appears twice\n# 123 > 12 ~ 13 ~ 1 ~ {} > 23 ~ 1 ~ 2\n# E = {123} > {12, 13, 1, {}} > {23, 1, 2}\npr <- suppressWarnings(as.PowerRelation(\n \"123 > (12 ~ 13 ~ 1 ~ {}) > (23 ~ 1 ~ 2)\"\n))\n\n# lexcel scores for all elements\n# `1` = c(1, 3, 1)\n# `2` = c(1, 1, 2)\n# `3` = c(1, 1, 1)\nlexcelScores(pr)\n\n# lexcel scores for a subset of all elements\nlexcelScores(pr, c(1, 3))\nlexcelScores(pr, 2)\n\n# 1 > 2 > 3\nlexcelRanking(pr)\n\n# 3 > 1 > 2\ndualLexcelRanking(pr)\n\n\n\n"} {"package":"socialranking","topic":"makePowerRelationMonotonic","snippet":"### Name: makePowerRelationMonotonic\n### Title: Make Power Relation monotonic\n### Aliases: makePowerRelationMonotonic\n\n### ** Examples\n\npr <- as.PowerRelation(\"ab > ac > abc > b > a > {} > c > bc\")\nmakePowerRelationMonotonic(pr)\n# (abc ~ ab) > ac > (bc ~ b) > a > (c ~ {})\n\n# notice that missing coalitions are automatically added,\n# except for the empty set\npr <- as.PowerRelation(\"a > b > c\")\nmakePowerRelationMonotonic(pr)\n# (abc ~ ab ~ ac ~ a) > (bc ~ b) > c\n\n# setting addMissingCoalitions to FALSE changes this behavior\npr <- as.PowerRelation(\"a > ab > c ~ {} > b\")\nmakePowerRelationMonotonic(pr, addMissingCoalitions = FALSE)\n# (ab ~ a) > (b ~ c ~ {})\n\n# notice that an equivalence class containing an empty coalition\n# automatically moves all remaining coalitions to that equivalence class.\npr <- as.PowerRelation(\"a > {} > b > c\")\nmakePowerRelationMonotonic(pr)\n# (abc ~ ab ~ ac ~ a) > (bc ~ b ~ c ~ {})\n\n\n\n"} {"package":"socialranking","topic":"ordinalBanzhafScores","snippet":"### Name: ordinalBanzhafScores\n### Title: Ordinal Banzhaf ranking\n### Aliases: ordinalBanzhafScores ordinalBanzhafRanking\n\n### ** Examples\n\npr <- as.PowerRelation(\"12 > (2 ~ {}) > 1\")\n\n# Player 1 contributes positively to {2}\n# Player 1 contributes negatively to {empty set}\n# Therefore player 1 has a score of 1 - 1 = 0\n#\n# Player 2 contributes positively to {1}\n# Player 2 does NOT have an impact on {empty set}\n# Therefore player 2 has a score of 1 - 0 = 0\nordinalBanzhafScores(pr)\n# `1` = c(1, -1, 0)\n# `2` = c(1, 0, 0)\n\nordinalBanzhafRanking(pr)\n# 1 > 2\n\n\n\n"} {"package":"socialranking","topic":"powerRelationGenerator","snippet":"### Name: powerRelationGenerator\n### Title: Generate power relations\n### Aliases: powerRelationGenerator\n\n### ** Examples\n\ncoalitions <- createPowerset(c('a','b'), includeEmptySet = FALSE)\n# list(c('a','b'), 'a', 'b')\n\ngen <- powerRelationGenerator(coalitions)\n\nwhile(!is.null(pr <- gen())) {\n print(pr)\n}\n# (ab ~ a ~ b)\n# (ab ~ a) > b\n# (ab ~ b) > a\n# (a ~ b) > ab\n# ab > (a ~ b)\n# a > (ab ~ b)\n# b > (ab ~ a)\n# ab > a > b\n# ab > b > a\n# a > ab > b\n# b > ab > a\n# a > b > ab\n# b > a > ab\n\n# from now on, gen() always returns NULL\ngen()\n# NULL\n\n# Use generateNextPartition() to skip certain partitions\ngen <- powerRelationGenerator(coalitions)\n\ngen <- generateNextPartition(gen)\ngen <- generateNextPartition(gen)\ngen()\n\n\n\n"} {"package":"socialranking","topic":"powerRelationMatrix","snippet":"### Name: powerRelationMatrix\n### Title: Create relation matrix\n### Aliases: powerRelationMatrix as.relation.PowerRelation\n\n### ** Examples\n\npr <- as.PowerRelation(\"12 > 1 > 2\")\nrelation <- powerRelationMatrix(pr)\n\n# do relation stuff\n# Incidence matrix\n# 111\n# 011\n# 001\nrelations::relation_incidence(relation)\n\n# all TRUE\nstopifnot(all(\n relations::relation_is_acyclic(relation),\n relations::relation_is_antisymmetric(relation),\n relations::relation_is_linear_order(relation),\n relations::relation_is_complete(relation),\n relations::relation_is_reflexive(relation),\n relations::relation_is_transitive(relation)\n))\n\n\n# a power relation where coalitions {1} and {2} are indifferent\npr <- as.PowerRelation(\"12 > (1 ~ 2)\")\nrelation <- powerRelationMatrix(pr)\n\n# Incidence matrix\n# 111\n# 011\n# 011\nrelations::relation_incidence(relation)\n\n# FALSE\nstopifnot(!any(\n relations::relation_is_acyclic(relation),\n relations::relation_is_antisymmetric(relation),\n relations::relation_is_linear_order(relation)\n))\n# TRUE\nstopifnot(all(\n relations::relation_is_complete(relation),\n relations::relation_is_reflexive(relation),\n relations::relation_is_transitive(relation)\n))\n\n\n# a pr with cycles\npr <- suppressWarnings(as.PowerRelation(\"12 > 1 > 2 > 1\"))\nrelation <- powerRelationMatrix(pr)\n\n# Incidence matrix\n# 1111\n# 0111\n# 0111\n# 0111\nrelations::relation_incidence(relation)\n\n# custom naming convention\nrelation <- powerRelationMatrix(\n pr,\n function(x) paste0(letters[x], \":\", paste(pr$rankingCoalitions[[x]], collapse = \"|\"))\n)\n\nrelations::relation_incidence(relation)\n# Incidences:\n# a:1|2 b:1 c:2 d:1\n# a:1|2 1 1 1 1\n# b:1 0 1 1 1\n# c:2 0 1 1 1\n# d:1 0 1 1 1\n\n\n\n"} {"package":"socialranking","topic":"testRelation","snippet":"### Name: testRelation\n### Title: Test relation between two elements\n### Aliases: testRelation %:% %>=dom% %>dom% %>=cumuldom% %>cumuldom%\n### %>=cp% %>cp% %>=banz% %>banz% %>=cop% %>cop% %>=ks% %>ks% %>=lex%\n### %>lex% %>=duallex% %>duallex% %>=L1% %>L1% %>=L2% %>L2% %>=LP% %>LP%\n### %>=LPS% %>LPS%\n\n### ** Examples\n\npr <- as.PowerRelation(\"123 > 12 ~ 13 ~ 23 > 3 > 1 ~ 2 > {}\")\n\n# Dominance\nstopifnot(pr %:% 1 %>=dom% 2)\n\n# Strict dominance\nstopifnot((pr %:% 1 %>dom% 2) == FALSE)\n\n# Cumulative dominance\nstopifnot(pr %:% 1 %>=cumuldom% 2)\n\n# Strict cumulative dominance\nstopifnot((pr %:% 1 %>cumuldom% 2) == FALSE)\n\n# CP-Majority relation\nstopifnot(pr %:% 1 %>=cp% 2)\n\n# Strict CP-Majority relation\nstopifnot((pr %:% 1 %>cp% 2) == FALSE)\n\n# Ordinal banzhaf relation\nstopifnot(pr %:% 1 %>=banz% 2)\n\n# Strict ordinal banzhaf relation\n# (meaning 1 had a strictly higher positive contribution than 2)\nstopifnot((pr %:% 1 %>banz% 2) == FALSE)\n\n# Copeland-like method\nstopifnot(pr %:% 1 %>=cop% 2)\nstopifnot(pr %:% 2 %>=cop% 1)\n\n# Strict Copeland-like method\n# (meaning pairwise winning minus pairwise losing comparison of\n# 1 is strictly higher than of 2)\nstopifnot((pr %:% 1 %>cop% 2) == FALSE)\nstopifnot((pr %:% 2 %>cop% 1) == FALSE)\nstopifnot(pr %:% 3 %>cop% 1)\n\n# Kramer-Simpson-like method\nstopifnot(pr %:% 1 %>=ks% 2)\nstopifnot(pr %:% 2 %>=ks% 1)\n\n# Strict Kramer-Simpson-like method\n# (meaning ks-score of 1 is actually higher than 2)\nstopifnot((pr %:% 2 %>ks% 1) == FALSE)\nstopifnot((pr %:% 1 %>ks% 2) == FALSE)\nstopifnot(pr %:% 3 %>ks% 1)\n\n# Lexicographical and dual lexicographical excellence\nstopifnot(pr %:% 3 %>=lex% 1)\nstopifnot(pr %:% 3 %>=duallex% 1)\n\n# Strict lexicographical and dual lexicographical excellence\n# (meaning their lexicographical scores don't match)\nstopifnot(pr %:% 3 %>lex% 1)\nstopifnot(pr %:% 3 %>duallex% 1)\n\n# L^(1) and L^(2)\nstopifnot(pr %:% 1 %>=L1% 2)\nstopifnot(pr %:% 1 %>=L2% 2)\n\n# Strict L^(1) and L^(2)\nstopifnot((pr %:% 1 %>L1% 2) == FALSE)\nstopifnot(pr %:% 3 %>L1% 1)\n\nstopifnot((pr %:% 1 %>L2% 2) == FALSE)\nstopifnot(pr %:% 3 %>L2% 1)\n\n# L^p and L^p*\nstopifnot(pr %:% 1 %>=LP% 2)\nstopifnot(pr %:% 1 %>=LPS% 2)\n\n# Strict L^(1) and L^(2)\nstopifnot((pr %:% 1 %>LP% 2) == FALSE)\nstopifnot(pr %:% 3 %>LP% 1)\n\nstopifnot((pr %:% 1 %>LPS% 2) == FALSE)\nstopifnot(pr %:% 3 %>LPS% 1)\n\n\n\n"} {"package":"socialranking","topic":"transitiveClosure","snippet":"### Name: transitiveClosure\n### Title: Transitive Closure\n### Aliases: transitiveClosure\n\n### ** Examples\n\npr <- as.PowerRelation(\"1 > 2\")\n\n# nothing changes\ntransitiveClosure(pr)\n\n\npr <- suppressWarnings(as.PowerRelation(\"1 > 2 > 1\"))\n\n# 1 ~ 2\ntransitiveClosure(pr)\n\n\npr <- suppressWarnings(\n as.PowerRelation(\"1 > 3 > 1 > 2 > 23 > 2\")\n)\n\n# 1 > 3 > 1 > 2 > 23 > 2 =>\n# 1 ~ 3 > 2 ~ 23\ntransitiveClosure(pr)\n\n\n\n"} {"package":"palettesForR","topic":"showPalette","snippet":"### Name: showPalette\n### Title: Show a palette.\n### Aliases: showPalette\n\n### ** Examples\n\ndata(Caramel_gpl)\nshowPalette(myPal = Caramel_gpl)\n\n\n"} {"package":"dlm","topic":"ARtransPars","snippet":"### Name: ARtransPars\n### Title: Function to parametrize a stationary AR process\n### Aliases: ARtransPars\n### Keywords: misc\n\n### ** Examples\n\n(ar <- ARtransPars(rnorm(5)))\nall( Mod(polyroot(c(1,-ar))) > 1 ) # TRUE\n\n\n"} {"package":"dlm","topic":"FF","snippet":"### Name: FF\n### Title: Components of a dlm object\n### Aliases: FF FF<- V V<- GG GG<- W W<- m0 m0<- C0 C0<- FF.dlm FF<-.dlm\n### V.dlm V<-.dlm GG.dlm GG<-.dlm W.dlm W<-.dlm m0.dlm m0<-.dlm C0.dlm\n### C0<-.dlm JFF JFF<- JV JV<- JGG JGG<- JW JW<- X X<- JFF.dlm JFF<-.dlm\n### JV.dlm JV<-.dlm JGG.dlm JGG<-.dlm JW.dlm JW<-.dlm X.dlm X<-.dlm\n### Keywords: ts\n\n### ** Examples\n\nset.seed(222)\nmod <- dlmRandom(5, 6)\nall.equal( FF(mod), mod$FF )\nall.equal( V(mod), mod$V )\nall.equal( GG(mod), mod$GG )\nall.equal( W(mod), mod$W )\nall.equal( m0(mod), mod$m0 )\nall.equal( C0(mod), mod$C0)\nm0(mod)\nm0(mod) <- rnorm(6)\nC0(mod)\nC0(mod) <- rwishart(10, 6)\n### A time-varying model\nmod <- dlmModReg(matrix(rnorm(10), 5, 2))\nJFF(mod)\nX(mod)\n\n\n"} {"package":"dlm","topic":"NelPlo","snippet":"### Name: NelPlo\n### Title: Nelson-Plosser macroeconomic time series\n### Aliases: NelPlo\n### Keywords: datasets\n\n### ** Examples\n\ndata(NelPlo)\nplot(NelPlo)\n\n\n"} {"package":"dlm","topic":"USecon","snippet":"### Name: USecon\n### Title: US macroeconomic time series\n### Aliases: USecon\n### Keywords: datasets\n\n### ** Examples\n\ndata(USecon)\nplot(USecon)\n\n\n"} {"package":"dlm","topic":"arms","snippet":"### Name: arms\n### Title: Function to perform Adaptive Rejection Metropolis Sampling\n### Aliases: arms\n### Keywords: distribution multivariate misc\n\n### ** Examples\n\n#### ==> Warning: running the examples may take a few minutes! <== #### \n## No test: \nset.seed(4521222)\n### Univariate densities\n## Unif(-r,r) \ny <- arms(runif(1,-1,1), function(x,r) 1, function(x,r) (x>-r)*(x-7)*((x-mean)<7),\n 5000, mean=10)\nsummary(y); hist(y,prob=TRUE,main=\"Gaussian(m,1); m=10\")\ncurve(dnorm(x,mean=10),3,17,add=TRUE)\n## Exponential(1)\ny <- arms(5, function(x) -x, function(x) (x>0)*(x<70), 5000)\nsummary(y); hist(y,prob=TRUE,main=\"Exponential(1)\")\ncurve(exp(-x),0,8,add=TRUE)\n## Gamma(4.5,1) \ny <- arms(runif(1,1e-4,20), function(x) 3.5*log(x)-x,\n function(x) (x>1e-4)*(x<20), 5000)\nsummary(y); hist(y,prob=TRUE,main=\"Gamma(4.5,1)\")\ncurve(dgamma(x,shape=4.5,scale=1),1e-4,20,add=TRUE)\n## Gamma(0.5,1) (this one is not log-concave)\ny <- arms(runif(1,1e-8,10), function(x) -0.5*log(x)-x,\n function(x) (x>1e-8)*(x<10), 5000)\nsummary(y); hist(y,prob=TRUE,main=\"Gamma(0.5,1)\")\ncurve(dgamma(x,shape=0.5,scale=1),1e-8,10,add=TRUE)\n## Beta(.2,.2) (this one neither)\ny <- arms(runif(1), function(x) (0.2-1)*log(x)+(0.2-1)*log(1-x),\n function(x) (x>1e-5)*(x<1-1e-5), 5000)\nsummary(y); hist(y,prob=TRUE,main=\"Beta(0.2,0.2)\")\ncurve(dbeta(x,0.2,0.2),1e-5,1-1e-5,add=TRUE)\n## Triangular\ny <- arms(runif(1,-1,1), function(x) log(1-abs(x)), function(x) abs(x)<1, 5000) \nsummary(y); hist(y,prob=TRUE,ylim=c(0,1),main=\"Triangular\")\ncurve(1-abs(x),-1,1,add=TRUE)\n## Multimodal examples (Mixture of normals)\nlmixnorm <- function(x,weights,means,sds) {\n log(crossprod(weights, exp(-0.5*((x-means)/sds)^2 - log(sds))))\n}\ny <- arms(0, lmixnorm, function(x,...) (x>(-100))*(x<100), 5000, weights=c(1,3,2),\n means=c(-10,0,10), sds=c(1.5,3,1.5))\nsummary(y); hist(y,prob=TRUE,main=\"Mixture of Normals\")\ncurve(colSums(c(1,3,2)/6*dnorm(matrix(x,3,length(x),byrow=TRUE),c(-10,0,10),c(1.5,3,1.5))),\n par(\"usr\")[1], par(\"usr\")[2], add=TRUE)\n\n### Bivariate densities \n## Bivariate standard normal\ny <- arms(c(0,2), function(x) -crossprod(x)/2,\n function(x) (min(x)>-5)*(max(x)<5), 500)\nplot(y, main=\"Bivariate standard normal\", asp=1)\n## Uniform in the unit square\ny <- arms(c(0.2,.6), function(x) 1,\n function(x) (min(x)>0)*(max(x)<1), 500)\nplot(y, main=\"Uniform in the unit square\", asp=1)\npolygon(c(0,1,1,0),c(0,0,1,1))\n## Uniform in the circle of radius r\ny <- arms(c(0.2,0), function(x,...) 1,\n function(x,r2) sum(x^2)1) ) 0 else 1\ny <- arms(c(0.2,0.2), function(x) 1, simp, 500)\nplot(y, xlim=c(0,1), ylim=c(0,1), main=\"Uniform in the simplex\", asp=1)\npolygon(c(0,1,0), c(0,0,1))\n## A bimodal distribution (mixture of normals)\nbimodal <- function(x) { log(prod(dnorm(x,mean=3))+prod(dnorm(x,mean=-3))) }\ny <- arms(c(-2,2), bimodal, function(x) all(x>(-10))*all(x<(10)), 500)\nplot(y, main=\"Mixture of bivariate Normals\", asp=1)\n\n## A bivariate distribution with non-convex support\nsupport <- function(x) {\n return(as.numeric( -1 < x[2] && x[2] < 1 &&\n -2 < x[1] &&\n ( x[1] < 1 || crossprod(x-c(1,0)) < 1 ) ) )\n}\nMin.log <- log(.Machine$double.xmin) + 10\nlogf <- function(x) {\n if ( x[1] < 0 ) return(log(1/4))\n else\n if (crossprod(x-c(1,0)) < 1 ) return(log(1/pi))\n return(Min.log)\n}\nx <- as.matrix(expand.grid(seq(-2.2,2.2,length=40),seq(-1.1,1.1,length=40)))\ny <- sapply(1:nrow(x), function(i) support(x[i,]))\nplot(x,type='n',asp=1)\npoints(x[y==1,],pch=1,cex=1,col='green')\nz <- arms(c(0,0), logf, support, 1000)\npoints(z,pch=20,cex=0.5,col='blue')\npolygon(c(-2,0,0,-2),c(-1,-1,1,1))\ncurve(-sqrt(1-(x-1)^2),0,2,add=TRUE)\ncurve(sqrt(1-(x-1)^2),0,2,add=TRUE)\nsum( z[,1] < 0 ) # sampled points in the square\nsum( apply(t(z)-c(1,0),2,crossprod) < 1 ) # sampled points in the circle\n## End(No test)\n\n\n"} {"package":"dlm","topic":"bdiag","snippet":"### Name: bdiag\n### Title: Build a block diagonal matrix\n### Aliases: bdiag\n### Keywords: misc\n\n### ** Examples\n\nbdiag(matrix(1:4,2,2),diag(3))\nbdiag(matrix(1:6,3,2),matrix(11:16,2,3))\n\n\n"} {"package":"dlm","topic":"convex.bounds","snippet":"### Name: convex.bounds\n### Title: Find the boundaries of a convex set\n### Aliases: convex.bounds\n### Keywords: misc\n\n### ** Examples\n\n## boundaries of a unit circle\nconvex.bounds(c(0,0), c(1,1), indFunc=function(x) crossprod(x)<1)\n\n\n"} {"package":"dlm","topic":"dlm","snippet":"### Name: dlm\n### Title: dlm objects\n### Aliases: dlm as.dlm is.dlm\n### Keywords: misc\n\n### ** Examples\n\n## Linear regression as a DLM\nx <- matrix(rnorm(10),nc=2)\nmod <- dlmModReg(x)\nis.dlm(mod)\n\n## Adding dlm's\ndlmModPoly() + dlmModSeas(4) # linear trend plus quarterly seasonal component\n\n\n"} {"package":"dlm","topic":"dlmBSample","snippet":"### Name: dlmBSample\n### Title: Draw from the posterior distribution of the state vectors\n### Aliases: dlmBSample\n### Keywords: misc\n\n### ** Examples\n\nnileMod <- dlmModPoly(1, dV = 15099.8, dW = 1468.4)\nnileFilt <- dlmFilter(Nile, nileMod)\nnileSmooth <- dlmSmooth(nileFilt) # estimated \"true\" level\nplot(cbind(Nile, nileSmooth$s[-1]), plot.type = \"s\",\n col = c(\"black\", \"red\"), ylab = \"Level\",\n main = \"Nile river\", lwd = c(2, 2)) \nfor (i in 1:10) # 10 simulated \"true\" levels \n lines(dlmBSample(nileFilt[-1]), lty=2) \n\n\n"} {"package":"dlm","topic":"dlmFilter","snippet":"### Name: dlmFilter\n### Title: DLM filtering\n### Aliases: dlmFilter\n### Keywords: ts misc\n\n### ** Examples\n\nnileBuild <- function(par) {\n dlmModPoly(1, dV = exp(par[1]), dW = exp(par[2]))\n}\nnileMLE <- dlmMLE(Nile, rep(0,2), nileBuild); nileMLE$conv\nnileMod <- nileBuild(nileMLE$par)\nV(nileMod)\nW(nileMod)\nnileFilt <- dlmFilter(Nile, nileMod)\nnileSmooth <- dlmSmooth(nileFilt)\nplot(cbind(Nile, nileFilt$m[-1], nileSmooth$s[-1]), plot.type='s',\n col=c(\"black\",\"red\",\"blue\"), ylab=\"Level\", main=\"Nile river\", lwd=c(1,2,2))\n\n\n"} {"package":"dlm","topic":"dlmForecast","snippet":"### Name: dlmForecast\n### Title: Prediction and simulation of future observations\n### Aliases: dlmForecast\n### Keywords: misc\n\n### ** Examples\n\n## Comparing theoretical prediction intervals with sample quantiles\nset.seed(353)\nn <- 20; m <- 1; p <- 5\nmod <- dlmModPoly() + dlmModSeas(4, dV=0)\nW(mod) <- rwishart(2*p,p) * 1e-1\nm0(mod) <- rnorm(p, sd=5)\nC0(mod) <- diag(p) * 1e-1\nnew <- 100\nfore <- dlmForecast(mod, nAhead=n, sampleNew=new)\nciTheory <- (outer(sapply(fore$Q, FUN=function(x) sqrt(diag(x))), qnorm(c(0.1,0.9))) +\n as.vector(t(fore$f)))\nciSample <- t(apply(array(unlist(fore$newObs), dim=c(n,m,new))[,1,], 1,\n FUN=function(x) quantile(x, c(0.1,0.9))))\nplot.ts(cbind(ciTheory,fore$f[,1]),plot.type=\"s\", col=c(\"red\",\"red\",\"green\"),ylab=\"y\")\nfor (j in 1:2) lines(ciSample[,j], col=\"blue\")\nlegend(2,-40,legend=c(\"forecast mean\", \"theoretical bounds\", \"Monte Carlo bounds\"),\n col=c(\"green\",\"red\",\"blue\"), lty=1, bty=\"n\")\n\n\n"} {"package":"dlm","topic":"dlmGibbsDIG","snippet":"### Name: dlmGibbsDIG\n### Title: Gibbs sampling for d-inverse-gamma model\n### Aliases: dlmGibbsDIG\n### Keywords: misc\n\n### ** Examples\n\n## See the package vignette for an example\n\n\n"} {"package":"dlm","topic":"dlmLL","snippet":"### Name: dlmLL\n### Title: Log likelihood evaluation for a state space model\n### Aliases: dlmLL\n### Keywords: misc\n\n### ** Examples\n\n##---- See the examples for dlmMLE ----\n\n\n"} {"package":"dlm","topic":"dlmMLE","snippet":"### Name: dlmMLE\n### Title: Parameter estimation by maximum likelihood\n### Aliases: dlmMLE\n### Keywords: misc\n\n### ** Examples\n\ndata(NelPlo)\n### multivariate local level -- seemingly unrelated time series\nbuildSu <- function(x) {\n Vsd <- exp(x[1:2])\n Vcorr <- tanh(x[3])\n V <- Vsd %o% Vsd\n V[1,2] <- V[2,1] <- V[1,2] * Vcorr\n Wsd <- exp(x[4:5])\n Wcorr <- tanh(x[6])\n W <- Wsd %o% Wsd\n W[1,2] <- W[2,1] <- W[1,2] * Wcorr\n return(list(\n m0 = rep(0,2),\n C0 = 1e7 * diag(2),\n FF = diag(2),\n GG = diag(2),\n V = V,\n W = W))\n}\n\nsuMLE <- dlmMLE(NelPlo, rep(0,6), buildSu); suMLE\nbuildSu(suMLE$par)[c(\"V\",\"W\")]\nStructTS(NelPlo[,1], type=\"level\") ## compare with W[1,1] and V[1,1]\nStructTS(NelPlo[,2], type=\"level\") ## compare with W[2,2] and V[2,2]\n\n## multivariate local level model with homogeneity restriction\nbuildHo <- function(x) {\n Vsd <- exp(x[1:2])\n Vcorr <- tanh(x[3])\n V <- Vsd %o% Vsd\n V[1,2] <- V[2,1] <- V[1,2] * Vcorr\n return(list(\n m0 = rep(0,2),\n C0 = 1e7 * diag(2),\n FF = diag(2),\n GG = diag(2),\n V = V,\n W = x[4]^2 * V))\n}\n\nhoMLE <- dlmMLE(NelPlo, rep(0,4), buildHo); hoMLE\nbuildHo(hoMLE$par)[c(\"V\",\"W\")]\n\n\n"} {"package":"dlm","topic":"dlmModARMA","snippet":"### Name: dlmModARMA\n### Title: Create a DLM representation of an ARMA process\n### Aliases: dlmModARMA\n### Keywords: misc\n\n### ** Examples\n\n## ARMA(2,3)\ndlmModARMA(ar = c(.5,.1), ma = c(.4,2,.3), sigma2=1)\n## Bivariate ARMA(2,1)\ndlmModARMA(ar = list(matrix(1:4,2,2), matrix(101:104,2,2)),\n ma = list(matrix(-4:-1,2,2)), sigma2 = diag(2))\n\n\n"} {"package":"dlm","topic":"dlmModPoly","snippet":"### Name: dlmModPoly\n### Title: Create an n-th order polynomial DLM\n### Aliases: dlmModPoly\n### Keywords: misc\n\n### ** Examples\n\n## the default\ndlmModPoly()\n## random walk plus noise\ndlmModPoly(1, dV = .3, dW = .01)\n\n\n"} {"package":"dlm","topic":"dlmModReg","snippet":"### Name: dlmModReg\n### Title: Create a DLM representation of a regression model\n### Aliases: dlmModReg\n### Keywords: misc\n\n### ** Examples\n\nx <- matrix(runif(6,4,10), nc = 2); x\ndlmModReg(x)\ndlmModReg(x, addInt = FALSE)\n\n\n"} {"package":"dlm","topic":"dlmModSeas","snippet":"### Name: dlmModSeas\n### Title: Create a DLM for seasonal factors\n### Aliases: dlmModSeas\n### Keywords: misc\n\n### ** Examples\n\n## seasonal component for quarterly data\ndlmModSeas(4, dV = 3.2)\n\n\n"} {"package":"dlm","topic":"dlmModTrig","snippet":"### Name: dlmModTrig\n### Title: Create Fourier representation of a periodic DLM component\n### Aliases: dlmModTrig\n### Keywords: misc\n\n### ** Examples\n\ndlmModTrig(s = 3)\ndlmModTrig(tau = 3, q = 1) # same thing\ndlmModTrig(s = 4) # for quarterly data\ndlmModTrig(s = 4, q = 1)\ndlmModTrig(tau = 4, q = 2) # a bad idea!\nm1 <- dlmModTrig(tau = 6.3, q = 2); m1\nm2 <- dlmModTrig(om = 2 * pi / 6.3, q = 2)\nall.equal(unlist(m1), unlist(m2))\n\n\n"} {"package":"dlm","topic":"dlmRandom","snippet":"### Name: dlmRandom\n### Title: Random DLM\n### Aliases: dlmRandom\n### Keywords: misc datagen\n\n### ** Examples\n\ndlmRandom(1, 3, 5)\n\n\n"} {"package":"dlm","topic":"dlmSmooth","snippet":"### Name: dlmSmooth\n### Title: DLM smoothing\n### Aliases: dlmSmooth dlmSmooth.dlmFiltered dlmSmooth.default\n### Keywords: ts smooth misc\n\n### ** Examples\n\ns <- dlmSmooth(Nile, dlmModPoly(1, dV = 15100, dW = 1470))\nplot(Nile, type ='o')\nlines(dropFirst(s$s), col = \"red\")\n\n## Multivariate\nset.seed(2)\ntmp <- dlmRandom(3, 5, 20)\nobs <- tmp$y\nm <- tmp$mod\nrm(tmp)\n\nf <- dlmFilter(obs, m)\ns <- dlmSmooth(f)\nall.equal(s, dlmSmooth(obs, m))\n\n\n"} {"package":"dlm","topic":"dlmSum","snippet":"### Name: dlmSum\n### Title: Outer sum of Dynamic Linear Models\n### Aliases: dlmSum %+%\n### Keywords: ts misc\n\n### ** Examples\n\nm1 <- dlmModPoly(2)\nm2 <- dlmModPoly(1)\ndlmSum(m1, m2)\nm1 %+% m2 # same thing\n\n\n"} {"package":"dlm","topic":"dlmSvd2var","snippet":"### Name: dlmSvd2var\n### Title: Compute a nonnegative definite matrix from its Singular Value\n### Decomposition\n### Aliases: dlmSvd2var\n### Keywords: array misc\n\n### ** Examples\n\nx <- matrix(rnorm(16),4,4)\nx <- crossprod(x)\ntmp <- La.svd(x)\nall.equal(dlmSvd2var(tmp$u, sqrt(tmp$d)), x)\n## Vectorized usage\nx <- dlmFilter(Nile, dlmModPoly(1, dV=15099, dW=1469))\nx$se <- sqrt(unlist(dlmSvd2var(x$U.C, x$D.C)))\n## Level with 50% probability interval\nplot(Nile, lty=2)\nlines(dropFirst(x$m), col=\"blue\")\nlines(dropFirst(x$m - .67*x$se), lty=3, col=\"blue\")\nlines(dropFirst(x$m + .67*x$se), lty=3, col=\"blue\")\n\n\n"} {"package":"dlm","topic":"dropFirst","snippet":"### Name: dropFirst\n### Title: Drop the first element of a vector or matrix\n### Aliases: dropFirst\n### Keywords: misc ts\n\n### ** Examples\n\n(pres <- dropFirst(presidents))\nstart(presidents)\nstart(pres)\n\n\n"} {"package":"dlm","topic":"mcmcMean","snippet":"### Name: mcmc\n### Title: Utility functions for MCMC output analysis\n### Aliases: mcmcMean mcmcMeans mcmcSD ergMean\n### Keywords: misc\n\n### ** Examples\n\nx <- matrix(rexp(1000), nc=4)\ndimnames(x) <- list(NULL, LETTERS[1:NCOL(x)])\nmcmcSD(x)\nmcmcMean(x)\nem <- ergMean(x, m = 51)\nplot(ts(em, start=51), xlab=\"Iteration\", main=\"Ergodic means\")\n\n\n"} {"package":"dlm","topic":"residuals.dlmFiltered","snippet":"### Name: residuals.dlmFiltered\n### Title: One-step forecast errors\n### Aliases: residuals.dlmFiltered\n### Keywords: misc\n\n### ** Examples\n\n## diagnostic plots \nnileMod <- dlmModPoly(1, dV = 15100, dW = 1468)\nnileFilt <- dlmFilter(Nile, nileMod)\nres <- residuals(nileFilt, sd=FALSE)\nqqnorm(res)\ntsdiag(nileFilt)\n\n\n"} {"package":"dlm","topic":"rwishart","snippet":"### Name: rwishart\n### Title: Random Wishart matrix\n### Aliases: rwishart\n### Keywords: distribution\n\n### ** Examples\n\nrwishart(25, p = 3)\na <- matrix(rnorm(9), 3)\nrwishart(30, SqrtSigma = a)\nb <- crossprod(a)\nrwishart(30, Sigma = b)\n\n\n"} {"package":"PubChemR","topic":"download","snippet":"### Name: download\n### Title: Download Content from PubChem and Save to a File\n### Aliases: download\n\n### ** Examples\n\n# Download JSON file for the compound \"aspirin\" into \"Aspirin.JSON\"\n# A folder named \"Compound\" will be created under current directory\"\ndownload(\n filename = \"Aspirin\",\n outformat = \"json\",\n path = \"./Compound\",\n identifier = \"aspirin\",\n namespace = \"name\",\n domain = \"compound\",\n operation = NULL,\n searchtype = NULL,\n overwrite = TRUE\n)\n\n# Remove downloaded files and folders.\nfile.remove(\"./Compound/Aspirin.json\")\nfile.remove(\"./Compound/\")\n\n\n"} {"package":"PubChemR","topic":"get_aids","snippet":"### Name: get_aids\n### Title: Retrieve Assay IDs (AIDs) from PubChem\n### Aliases: get_aids\n\n### ** Examples\n\nget_aids(\n identifier = \"aspirin\",\n namespace = \"name\"\n)\n\n\n"} {"package":"PubChemR","topic":"get_all_sources","snippet":"### Name: get_all_sources\n### Title: Retrieve All Sources from PubChem\n### Aliases: get_all_sources\n\n### ** Examples\n\nget_all_sources(\n domain = 'substance'\n)\n\n\n"} {"package":"PubChemR","topic":"get_assays","snippet":"### Name: get_assays\n### Title: Retrieve Assays from PubChem\n### Aliases: get_assays\n\n### ** Examples\n\nget_assays(\n identifier = 1234,\n namespace = \"aid\"\n)\n\n\n"} {"package":"PubChemR","topic":"get_cids","snippet":"### Name: get_cids\n### Title: Retrieve Compound IDs (CIDs) from PubChem\n### Aliases: get_cids\n\n### ** Examples\n\nget_cids(\n identifier = \"aspirin\",\n namespace = \"name\"\n)\n\n\n"} {"package":"PubChemR","topic":"get_compounds","snippet":"### Name: get_compounds\n### Title: Retrieve Compounds from PubChem\n### Aliases: get_compounds\n\n### ** Examples\n\nget_compounds(\n identifier = \"aspirin\",\n namespace = \"name\"\n)\n\n\n"} {"package":"PubChemR","topic":"get_json","snippet":"### Name: get_json\n### Title: Retrieve JSON Data from PubChem\n### Aliases: get_json\n\n### ** Examples\n\nget_json(\n identifier = \"aspirin\",\n namespace = \"name\"\n)\n\n\n"} {"package":"PubChemR","topic":"get_properties","snippet":"### Name: get_properties\n### Title: Retrieve Compound Properties from PubChem\n### Aliases: get_properties\n\n### ** Examples\n\nget_properties(\n properties = \"IsomericSMILES\",\n identifier = \"aspirin\",\n namespace = \"name\"\n)\n\n\n"} {"package":"PubChemR","topic":"get_pubchem","snippet":"### Name: get_pubchem\n### Title: Get Data from PubChem API\n### Aliases: get_pubchem\n\n### ** Examples\n\nget_pubchem(\n identifier = \"aspirin\",\n namespace = \"name\"\n)\n\n\n"} {"package":"PubChemR","topic":"get_pug_rest","snippet":"### Name: get_pug_rest\n### Title: Retrieve Data from PubChem PUG REST API\n### Aliases: get_pug_rest\n\n### ** Examples\n\n get_pug_rest(identifier = \"2244\", namespace = \"cid\", domain = \"compound\", output = \"JSON\")\n\n\n\n"} {"package":"PubChemR","topic":"get_pug_view","snippet":"### Name: get_pug_view\n### Title: Retrieve PUG View Data from PubChem\n### Aliases: get_pug_view\n\n### ** Examples\n\n get_pug_view(identifier = \"2244\", annotation = \"linkout\", domain = \"compound\")\n\n\n\n"} {"package":"PubChemR","topic":"get_sdf","snippet":"### Name: get_sdf\n### Title: Retrieve SDF Data from PubChem and Save as File\n### Aliases: get_sdf\n\n### ** Examples\n\nget_sdf(\n identifier = \"aspirin\",\n namespace = \"name\",\n path = NULL\n)\n\n\n"} {"package":"PubChemR","topic":"get_sids","snippet":"### Name: get_sids\n### Title: Retrieve Substance IDs (SIDs) from PubChem\n### Aliases: get_sids\n\n### ** Examples\n\nget_sids(\n identifier = \"aspirin\",\n namespace = \"name\"\n)\n\n\n"} {"package":"PubChemR","topic":"get_substances","snippet":"### Name: get_substances\n### Title: Retrieve Substances from PubChem\n### Aliases: get_substances\n\n### ** Examples\n\nget_substances(\n identifier = \"aspirin\",\n namespace = \"name\"\n)\n\n\n"} {"package":"PubChemR","topic":"get_synonyms","snippet":"### Name: get_synonyms\n### Title: Retrieve Synonyms from PubChem\n### Aliases: get_synonyms\n\n### ** Examples\n\nget_synonyms(\n identifier = \"aspirin\",\n namespace = \"name\"\n)\n\n\n"} {"package":"PubChemR","topic":"pubchem_summary","snippet":"### Name: pubchem_summary\n### Title: Summarize Data from PubChem Based on Identifier\n### Aliases: pubchem_summary\n\n### ** Examples\n\n## No test: \n summary_data <- pubchem_summary(\n identifier = \"aspirin\",\n namespace = 'name',\n type = c(\"compound\", \"substance\", \"assay\"),\n properties = \"IsomericSMILES\",\n include_synonyms = TRUE,\n include_sdf = TRUE\n )\n## End(No test)\n\n\n\n"} {"package":"PubChemR","topic":"request","snippet":"### Name: request\n### Title: Request Function for PubChem API\n### Aliases: request\n\n### ** Examples\n\nrequest(\n identifier = \"aspirin\",\n namespace = \"name\"\n)\n\n\n"} {"package":"imputeLCMD","topic":"intensity_PXD000438","snippet":"### Name: intensity_PXD000438\n### Title: Dataset PXD000438 from ProteomeXchange.\n### Aliases: intensity_PXD000438\n\n### ** Examples\n\n data(intensity_PXD000438)\n\n\n"} {"package":"imputeLCMD","topic":"intensity_PXD000501","snippet":"### Name: intensity_PXD000501\n### Title: Dataset PXD000501 from ProteomeXchange.\n### Aliases: intensity_PXD000501\n\n### ** Examples\n\n data(intensity_PXD000501)\n\n\n"} {"package":"encryptr","topic":"decrypt","snippet":"### Name: decrypt\n### Title: Decrypt a data frame or tibble column using an RSA\n### public/private key\n### Aliases: decrypt\n\n### ** Examples\n\n#' This will run:\n# genkeys()\n# gp_encrypt = gp %>%\n# select(-c(name, address1, address2, address3)) %>%\n# encrypt(postcode, telephone)\n# gp_encrypt %>%\n# decrypt(postcode, telephone)\n\n## Not run: \n##D # For CRAN and testing:\n##D library(dplyr)\n##D temp_dir = tempdir()\n##D genkeys(file.path(temp_dir, \"id_rsa\")) # temp directory for testing only\n##D gp_encrypt = gp %>%\n##D select(-c(name, address1, address2, address3)) %>%\n##D encrypt(postcode, telephone, public_key_path = file.path(temp_dir, \"id_rsa.pub\"))\n##D gp_encrypt %>%\n##D decrypt(postcode, telephone, private_key_path = file.path(temp_dir, \"id_rsa\"))\n##D \n## End(Not run)\n\n\n"} {"package":"encryptr","topic":"decrypt_file","snippet":"### Name: decrypt_file\n### Title: Decrypt a file\n### Aliases: decrypt_file\n\n### ** Examples\n\n# This will run:\n# Create example file to encrypt\n# write.csv(gp, \"gp.csv\")\n# genkeys()\n# encrypt_file(\"gp.csv\")\n# decrypt_file(\"gp.csv.encryptr.bin\", file_name = \"gp2.csv\")\n\n# For CRAN and testing:\ntemp_dir = tempdir() # temp directory for testing only\ngenkeys(file.path(temp_dir, \"id_rsa4\"))\nwrite.csv(gp, file.path(temp_dir, \"gp.csv\"))\nencrypt_file(file.path(temp_dir, \"gp.csv\"), public_key_path = file.path(temp_dir, \"id_rsa4.pub\"))\ndecrypt_file(file.path(temp_dir, \"gp.csv.encryptr.bin\"),\n private_key_path = file.path(temp_dir, \"id_rsa4\"),\n file_name = \"file.path(temp_dir, gp2.csv)\")\n\n\n"} {"package":"encryptr","topic":"decrypt_vec","snippet":"### Name: decrypt_vec\n### Title: Decrypt ciphertext using an RSA public/private key\n### Aliases: decrypt_vec\n\n### ** Examples\n\n## Not run: \n##D hospital_number = c(\"1010761111\", \"2010761212\")\n##D genkeys(file.path(tempdir(), \"id_rsa\") # temp directory for testing only\n##D hospital_number_encrypted = encrypt_char(hospital_number)\n##D decrypt_vec(hospital_number_encrypted)\n## End(Not run)\n\n\n"} {"package":"encryptr","topic":"encrypt","snippet":"### Name: encrypt\n### Title: Encrypt a data frame or tibble column using an RSA\n### public/private key\n### Aliases: encrypt\n\n### ** Examples\n\n# This will run:\n# genkeys()\n# gp_encrypt = gp %>%\n# select(-c(name, address1, address2, address3)) %>%\n# encrypt(postcode, telephone)\n\n# For CRAN and testing:\nlibrary(dplyr)\ntemp_dir = tempdir()\ngenkeys(file.path(temp_dir, \"id_rsa2\")) # temp directory for testing only\ngp_encrypt = gp %>%\n select(-c(name, address1, address2, address3)) %>%\n encrypt(postcode, telephone, public_key_path = file.path(temp_dir, \"id_rsa2.pub\"))\n\n\n"} {"package":"encryptr","topic":"encrypt_file","snippet":"### Name: encrypt_file\n### Title: Encrypt a file\n### Aliases: encrypt_file\n\n### ** Examples\n\n# This will run:\n# Create example file to encrypt\n# write.csv(gp, \"gp.csv\")\n# genkeys()\n# encrypt_file(\"gp.csv\")\n\n# For CRAN and testing:\n## Not run: \n##D # Run only once in decrypt_file example\n##D temp_dir = tempdir() # temp directory for testing only\n##D genkeys(file.path(temp_dir, \"id_rsa\"))\n##D write.csv(gp, file.path(temp_dir, \"gp.csv\"))\n##D encrypt_file(file.path(temp_dir, \"gp.csv\"), public_key_path = file.path(temp_dir, \"id_rsa.pub\"))\n## End(Not run)\n\n\n"} {"package":"encryptr","topic":"encrypt_vec","snippet":"### Name: encrypt_vec\n### Title: Encrypt a character vector using an RSA public/private key\n### Aliases: encrypt_vec\n\n### ** Examples\n\n## Not run: \n##D hospital_number = c(\"1010761111\", \"2010761212\")\n##D encrypt_vec(hospital_number)\n## End(Not run)\n\n\n"} {"package":"encryptr","topic":"genkeys","snippet":"### Name: genkeys\n### Title: Create and write RSA private and public keys\n### Aliases: genkeys\n\n### ** Examples\n\n# Function can be used as this:\n# genkeys()\n\n# For CRAN purposes and testing\ntemp_dir = tempdir()\ngenkeys(file.path(temp_dir, \"id_rsa3\"))\n\n\n\n"} {"package":"THREC","topic":"Treeht","snippet":"### Name: Treeht\n### Title: Tree data from the long-term forest experiments in Sweden\n### Aliases: Treeht\n### Keywords: datasets\n\n### ** Examples\n\ndata(Treeht)\n\n\n\n"} {"package":"THREC","topic":"broad","snippet":"### Name: broad\n### Title: Tree data from the long-term forest experiments in Sweden\n### Aliases: broad\n### Keywords: datasets\n\n### ** Examples\n\ndata(broad)\n\n\n\n"} {"package":"THREC","topic":"broadleaves","snippet":"### Name: broadleaves\n### Title: A response calibration function for the other broadleaves\n### Aliases: broadleaves\n### Keywords: broadleaves\n\n### ** Examples\n\nlibrary(THREC)\n\n# sample data\ndata(broad)\n\nbroadleaves(broad)\n\n\n"} {"package":"THREC","topic":"conifers","snippet":"### Name: conifers\n### Title: A response calibration function for the other conifer species\n### Aliases: conifers\n### Keywords: conifers\n\n### ** Examples\n\nlibrary(THREC)\n\n# sample data\ndata(broad)\n\nconifers(broad)\n\n\n"} {"package":"THREC","topic":"main_species","snippet":"### Name: main_species\n### Title: Main species generalized function response calibration\n### Aliases: main_species\n### Keywords: main_species\n\n### ** Examples\n\nlibrary(THREC)\n\n# sample data\ndata(Treeht)\n\nmain_species(Treeht)\n\n\n"} {"package":"THREC","topic":"species_specific","snippet":"### Name: species_specific\n### Title: Species-specific response calibration for Scots pine, Norway\n### spruce, and Birch\n### Aliases: species_specific\n### Keywords: species_specific\n\n### ** Examples\n\nlibrary(THREC)\n\n# sample data\ndata(Treeht)\n\nspecies_specific(Treeht)\n\n\n"} {"package":"parseRPDR","topic":"all_ids_mi2b2","snippet":"### Name: all_ids_mi2b2\n### Title: Legacy function to create a vector of all possible IDs for mi2b2\n### workbench\n### Aliases: all_ids_mi2b2\n\n### ** Examples\n\n## Not run: \n##D all_MGH_mrn <- all_ids_mi2b2(type = \"MGH\", d_mrn = data_mrn, d_con = data_con)\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"convert_dia","snippet":"### Name: convert_dia\n### Title: Searches diagnosis columns for given diseases.\n### Aliases: convert_dia\n\n### ** Examples\n\n## Not run: \n##D #Search for Hypertension and Stroke ICD codes\n##D diseases <- list(HT = c(\"ICD10:I10\"), Stroke = c(\"ICD9:434.91\", \"ICD9:I63.50\"))\n##D data_dia_parse <- convert_dia(d = data_dia, codes_to_find = diseases, nThread = 2)\n##D \n##D #Search for Hypertension and Stroke ICD codes and summarize per patient providing earliest time\n##D diseases <- list(HT = c(\"ICD10:I10\"), Stroke = c(\"ICD9:434.91\", \"ICD9:I63.50\"))\n##D data_dia_disease <- convert_dia(d = data_dia, codes_to_find = diseases, nThread = 2,\n##D collapse = \"ID_MERGE\", aggr_type = \"earliest\")\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"convert_enc","snippet":"### Name: convert_enc\n### Title: Searches columns for given diseases defined by ICD codes.\n### Aliases: convert_enc\n\n### ** Examples\n\n## Not run: \n##D #Parse encounter ICD columns and keep original ones as well\n##D data_enc_parse <- convert_enc(d = data_enc, keep = TRUE, nThread = 2)\n##D \n##D #Parse encounter ICD columns and discard original ones,\n##D #and create indicator variable for the following diseases\n##D diseases <- list(HT = c(\"I10\"), Stroke = c(\"434.91\", \"I63.50\"))\n##D data_enc_disease <- convert_enc(d = data_enc, keep = FALSE,\n##D codes_to_find = diseases, nThread = 2)\n##D \n##D #Parse encounter ICD columns and discard original ones\n##D #and create indicator variables for the following diseases and summarize per patient,\n##D #whether there are any encounters where the given diseases were registered\n##D diseases <- list(HT = c(\"I10\"), Stroke = c(\"434.91\", \"I63.50\"))\n##D data_enc_disease <- convert_enc(d = data_enc, keep = FALSE,\n##D codes_to_find = diseases, nThread = 2, collapse = \"ID_MERGE\")\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"convert_lab","snippet":"### Name: convert_lab\n### Title: Converts lab results to normal/abnormal based-on reference\n### values.\n### Aliases: convert_lab\n\n### ** Examples\n\n## Not run: \n##D #Convert loaded lab results\n##D data_lab_pretty <- convert_lab(d = data_lab)\n##D data_lab_pretty[, c(\"lab_result\", \"lab_result_pretty\", \"lab_result_range\",\n##D \"lab_result_abn_pretty\", \"lab_result_abn_flag_pretty\")]\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"convert_med","snippet":"### Name: convert_med\n### Title: Adds boolean columns corresponding to a group of medications\n### whether it is present in the given row.\n### Aliases: convert_med\n\n### ** Examples\n\n## Not run: \n##D #Define medication group and add an indicator column whether\n##D #the given medication group was administered\n##D meds <- list(statin = c(\"Simvastatin\", \"Atorvastatin\"),\n##D NSAID = c(\"Acetaminophen\", \"Paracetamol\"))\n##D \n##D data_med_indic <- convert_med(d = data_med, codes_to_find = meds, nThread = 1)\n##D \n##D #Summarize per patient if they ever had the given medication groups registered\n##D data_med_indic_any <- convert_med(d = data_med,\n##D codes_to_find = meds, collapse = \"ID_MERGE\", nThread = 2)\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"convert_notes","snippet":"### Name: convert_notes\n### Title: Extracts information from notes free text.\n### Aliases: convert_notes\n\n### ** Examples\n\n## Not run: \n##D #Create columns with specific parts of the radiological report defined by anchors\n##D data_rad_parsed <- convert_notes(d = data_rad, code = \"rad_rep_txt\",\n##D anchors = c(\"Exam Code\", \"Ordering Provider\", \"HISTORY\", \"Associated Reports\",\n##D \"Report Below\", \"REASON\", \"REPORT\", \"TECHNIQUE\", \"COMPARISON\", \"FINDINGS\",\n##D \"IMPRESSION\", \"RECOMMENDATION\", \"SIGNATURES\", \"report_end\"), nThread = 2)\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"convert_phy","snippet":"### Name: convert_phy\n### Title: Searches health history data for given codes\n### Aliases: convert_phy\n\n### ** Examples\n\n## Not run: \n##D #Search for Height and Weight codes\n##D anthropometrics <- list(Weight = c(\"LMR:3688\", \"EPIC:WGT\"), Height = c(\"LMR:3771\", \"EPIC:HGT\"))\n##D data_phy_parse <- convert_phy(d = data_phy, codes_to_find = anthropometrics, nThread = 2)\n##D \n##D #Search for for Height and Weight codes and summarize per patient providing earliest time\n##D anthropometrics <- list(Weight = c(\"LMR:3688\", \"EPIC:WGT\"), Height = c(\"LMR:3771\", \"EPIC:HGT\"))\n##D data_phy_parse <- convert_phy(d = data_phy, codes_to_find = anthropometrics, nThread = 2,\n##D collapse = \"ID_MERGE\", aggr_type = \"earliest\")\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"convert_prc","snippet":"### Name: convert_prc\n### Title: Searches procedures columns for given procedures.\n### Aliases: convert_prc\n\n### ** Examples\n\n## Not run: \n##D #Search for Anesthesia CPT codes\n##D procedures <- list(Anesthesia = c(\"CTP:00410\", \"CPT:00104\"))\n##D data_prc_parse <- convert_prc(d = data_prc, codes_to_find = procedures, nThread = 2)\n##D \n##D #Search for Anesthesia CPT codes\n##D procedures <- list(Anesthesia = c(\"CTP:00410\", \"CPT:00104\"))\n##D data_prc_procedures <- convert_prc(d = data_prc, codes_to_find = procedures,\n##D nThread = 2, collapse = \"ID_MERGE\", aggr_type = \"earliest\")\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"convert_rfv","snippet":"### Name: convert_rfv\n### Title: Searches columns for given reason for visit defined by ERFV\n### codes.\n### Aliases: convert_rfv\n\n### ** Examples\n\n## Not run: \n##D #Parse reason for visit columns\n##D #and create indicator variables for the following reasons and summarize per patient,\n##D #whether there are any encounters where the given reasons were registered\n##D reasons <- list(Pain = c(\"ERFV:160357\", \"ERFV:140012\"), Visit = c(\"ERFV:501\"))\n##D data_rfv_disease <- convert_rfv(d = data_rfv, keep = FALSE,\n##D codes_to_find = reasons, nThread = 2, collapse = \"ID_MERGE\")\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"create_img_db","snippet":"### Name: create_img_db\n### Title: Create a database of DICOM headers.\n### Aliases: create_img_db\n\n### ** Examples\n\n## Not run: \n##D #Create a database with DICOM header information\n##D all_dicom_headers <- create_img_db(path = \"/Users/Test/Data/DICOM/\")\n##D all_dicom_headers <- create_img_db(path = \"/Users/Test/Data/DICOM/\", ext = c(\".dcm\", \".DICOM\"))\n##D #Create a database with DICOM header information for only IDs and accession numbers\n##D all_dicom_headers <- create_img_db(path = \"/Users/Test/Data/DICOM/\",\n##D keywords = c(\"PatientID\", \"AccessionNumber\"))\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"export_notes","snippet":"### Name: export_notes\n### Title: Exports free text notes to individual text files.\n### Aliases: export_notes\n\n### ** Examples\n\n## Not run: \n##D #Output all cardiology notes to given folder\n##D d <- load_notes(\"Car.txt\", type = \"car\", nThread = 2, format_orig = TRUE)\n##D export_notes(d, folder = \"/Users/Test/Notes/\", code = \"car_rep_txt\",\n##D name1 = \"ID_MERGE\", name2 = \"car_rep_num\")\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"find_exam","snippet":"### Name: find_exam\n### Title: Find exam data within a given timeframe using parallel CPU\n### computing and possibly shared RAM management.\n### Aliases: find_exam\n\n### ** Examples\n\n## Not run: \n##D #Filter encounters for first emergency visits at one of MGH's ED departments\n##D data_enc_ED <- data_enc[enc_clinic == \"MGH EMERGENCY 10020010608\"]\n##D data_enc_ED <- data_enc_ED[!duplicated(data_enc_ED$ID_MERGE)]\n##D \n##D #Find all radiological examinations within 3 day of the ED registration\n##D rdt_ED <- find_exam(d_from = data_rdt, d_to = data_enc_ED,\n##D d_from_ID = \"ID_MERGE\", d_to_ID = \"ID_MERGE\",\n##D d_from_time = \"time_rdt_exam\", d_to_time = \"time_enc_admit\", time_diff_name = \"time_diff_ED_rdt\",\n##D before = TRUE, after = TRUE, time = 3, time_unit = \"days\", multiple = \"all\",\n##D nThread = 2, shared_RAM = FALSE)\n##D \n##D #Find earliest radiological examinations within 3 day of the ED registration\n##D rdt_ED <- find_exam(d_from = data_rdt, d_to = data_enc_ED,\n##D d_from_ID = \"ID_MERGE\", d_to_ID = \"ID_MERGE\",\n##D d_from_time = \"time_rdt_exam\", d_to_time = \"time_enc_admit\", time_diff_name = \"time_diff_ED_rdt\",\n##D before = TRUE, after = TRUE, time = 3, time_unit = \"days\", multiple = \"earliest\",\n##D nThread = 2, shared_RAM = FALSE)\n##D \n##D #Find closest radiological examinations on or after 1 day of the ED registration\n##D #and add primary diagnosis column from encounters\n##D rdt_ED <- find_exam(d_from = data_rdt, d_to = data_enc_ED,\n##D d_from_ID = \"ID_MERGE\", d_to_ID = \"ID_MERGE\",\n##D d_from_time = \"time_rdt_exam\", d_to_time = \"time_enc_admit\", time_diff_name = \"time_diff_ED_rdt\",\n##D before = FALSE, after = TRUE, time = 1, time_unit = \"days\", multiple = \"earliest\",\n##D add_column = \"enc_diag_princ\", nThread = 2, shared_RAM = FALSE)\n##D \n##D #Find closest radiological examinations on or after 1 day of the ED registration\n##D #but also provide empty rows for patients with exam data but not within the timeframe\n##D rdt_ED <- find_exam(d_from = data_rdt, d_to = data_enc_ED,\n##D d_from_ID = \"ID_MERGE\", d_to_ID = \"ID_MERGE\",\n##D d_from_time = \"time_rdt_exam\", d_to_time = \"time_enc_admit\", time_diff_name = \"time_diff_ED_rdt\",\n##D before = FALSE, after = TRUE, time = 1, time_unit = \"days\", multiple = \"earliest\",\n##D add_column = \"enc_diag_princ\", keep_data = TRUE nThread = 2, shared_RAM = FALSE)\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"load_all","snippet":"### Name: load_all\n### Title: Loads allergy data information into R.\n### Aliases: load_all\n\n### ** Examples\n\n## Not run: \n##D #Using defaults\n##D d_all <- load_all(file = \"test_All.txt\")\n##D \n##D #Use sequential processing\n##D d_all <- load_all(file = \"test_All.txt\", nThread = 1)\n##D \n##D #Use parallel processing and parse data in MRN_Type and MRN columns and keep all IDs\n##D d_all <- load_all(file = \"test_All.txt\", nThread = 20, mrn_type = TRUE, perc = 1)\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"load_all_data","snippet":"### Name: load_all_data\n### Title: Loads all RPDR text outputs into R.\n### Aliases: load_all_data\n\n### ** Examples\n\n## Not run: \n##D #Load all Con, Dem and Mrn datasets processing all files within given datasource in parallel\n##D load_all_data(folder = folder_rpdr, which_data = c(\"con\", \"dem\", \"mrn\"),\n##D nThread = 2, many_sources = FALSE)\n##D \n##D #Load all supported file types parallelizing on the level of datasources\n##D load_all_data(folder = folder_rpdr, nThread = 2, many_sources = TRUE,\n##D format_orig = TRUE)\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"load_bib","snippet":"### Name: load_bib\n### Title: Loads BiobankFile data into R.\n### Aliases: load_bib\n\n### ** Examples\n\n## Not run: \n##D #Using defaults\n##D d_bib <- load_bib(file = \"test_Bib.txt\")\n##D \n##D #Use sequential processing\n##D d_bib <- load_bib(file = \"test_Bib.txt\", nThread = 1)\n##D \n##D #Use parallel processing and parse data in MRN_Type and MRN columns and keep all IDs\n##D d_bib <- load_bib(file = \"test_Bib.txt\", nThread = 20, mrn_type = TRUE, perc = 1)\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"load_con","snippet":"### Name: load_con\n### Title: Loads contact information into R.\n### Aliases: load_con\n\n### ** Examples\n\n## Not run: \n##D #Using defaults\n##D d_con <- load_con(file = \"test_Con.txt\")\n##D \n##D #Use sequential processing\n##D d_con <- load_con(file = \"test_Con.txt\", nThread = 1)\n##D \n##D #Use parallel processing and parse data in\n##D #MRN_Type and MRN columns (default in load_con) and keep all IDs\n##D d_con <- load_con(file = \"test_Con.txt\", nThread = 20, mrn_type = TRUE, perc = 1)\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"load_dem","snippet":"### Name: load_dem\n### Title: Loads demographic information into R for new demographic tables\n### following changes in the beginning of 2022.\n### Aliases: load_dem\n\n### ** Examples\n\n## Not run: \n##D #Using defaults\n##D d_dem <- load_dem(file = \"test_Dem.txt\")\n##D \n##D #Use sequential processing\n##D d_dem <- load_dem(file = \"test_Dem.txt\", nThread = 1)\n##D \n##D #Use parallel processing and parse data in MRN_Type and MRN columns and keep all IDs\n##D d_dem <- load_dem(file = \"test_Dem.txt\", nThread = 20, mrn_type = TRUE, perc = 1)\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"load_dem_old","snippet":"### Name: load_dem_old\n### Title: Loads demographic information into R for demographics tables\n### before 2022.\n### Aliases: load_dem_old\n\n### ** Examples\n\n## Not run: \n##D #Using defaults\n##D d_dem <- load_dem_old(file = \"test_Dem.txt\")\n##D \n##D #Use sequential processing\n##D d_dem <- load_dem_old(file = \"test_Dem.txt\", nThread = 1)\n##D \n##D #Use parallel processing and parse data in MRN_Type and MRN columns and keep all IDs\n##D d_dem <- load_dem_old(file = \"test_Dem.txt\", nThread = 20, mrn_type = TRUE, perc = 1)\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"load_dia","snippet":"### Name: load_dia\n### Title: Loads diagnoses into R.\n### Aliases: load_dia\n\n### ** Examples\n\n## Not run: \n##D #Using defaults\n##D d_dia <- load_dia(file = \"test_Dia.txt\")\n##D \n##D #Use sequential processing\n##D d_dia <- load_dia(file = \"test_Dia.txt\", nThread = 1)\n##D \n##D #Use parallel processing and parse data in MRN_Type and MRN columns and keep all IDs\n##D d_dea <- load_dia(file = \"test_Dea.txt\", nThread = 20, mrn_type = TRUE, perc = 1)\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"load_enc","snippet":"### Name: load_enc\n### Title: Loads encounter information into R.\n### Aliases: load_enc\n\n### ** Examples\n\n## Not run: \n##D #Using defaults\n##D d_enc <- load_enc(file = \"test_Enc.txt\")\n##D \n##D #Use sequential processing\n##D d_enc <- load_enc(file = \"test_Enc.txt\", nThread = 1)\n##D \n##D #Use parallel processing and parse data in MRN_Type and MRN columns and keep all IDs\n##D d_exc <- load_enc(file = \"test_Exc.txt\", nThread = 20, mrn_type = TRUE, perc = 1)\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"load_lab","snippet":"### Name: load_lab\n### Title: Loads laboratory results into R.\n### Aliases: load_lab\n\n### ** Examples\n\n## Not run: \n##D #Using defaults\n##D d_lab <- load_lab(file = \"test_Lab.txt\")\n##D \n##D #Use sequential processing\n##D d_lab <- load_lab(file = \"test_Lab.txt\", nThread = 1)\n##D \n##D #Use parallel processing and parse data in MRN_Type and MRN columns and keep all IDs\n##D d_clb <- load_lab(file = \"test_Clb.txt\", nThread = 20, mrn_type = TRUE, perc = 1)\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"load_lno","snippet":"### Name: load_lno\n### Title: Loads LMR note documents into R.\n### Aliases: load_lno\n\n### ** Examples\n\n## Not run: \n##D #Using defaults\n##D d_lno <- load_lno(file = \"test_Lno.txt\")\n##D \n##D #Use sequential processing\n##D d_lno <- load_lno(file = \"test_Lno.txt\", nThread = 1)\n##D \n##D #Use parallel processing and parse data in MRN_Type and MRN columns and keep all IDs\n##D d_lno <- load_lno(file = \"test_Lno.txt\", nThread = 20, mrn_type = TRUE, perc = 1)\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"load_mcm","snippet":"### Name: load_mcm\n### Title: Loads match control data into R.\n### Aliases: load_mcm\n\n### ** Examples\n\n## Not run: \n##D #Using defaults\n##D d_mcm <- load_mcm(file = \"test_Mcm.txt\")\n##D \n##D #Use sequential processing\n##D d_mcm <- load_mcm(file = \"test_Mcm.txt\", nThread = 1)\n##D \n##D #Use parallel processing and parse data in MRN_Type and MRN columns and keep all IDs\n##D d_mcm <- load_mcm(file = \"test_Mcm.txt\", nThread = 20, mrn_type = TRUE, perc = 1)\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"load_med","snippet":"### Name: load_med\n### Title: Loads medication order detail into R.\n### Aliases: load_med\n\n### ** Examples\n\n## Not run: \n##D #Using defaults\n##D d_med <- load_med(file = \"test_Med.txt\")\n##D \n##D #Use sequential processing\n##D d_med <- load_med(file = \"test_Med.txt\", nThread = 1)\n##D \n##D #Use parallel processing and parse data in MRN_Type and MRN columns and keep all IDs\n##D d_mee <- load_med(file = \"test_Mee.txt\", nThread = 20, mrn_type = TRUE, perc = 1)\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"load_mic","snippet":"### Name: load_mic\n### Title: Loads microbiology results into R.\n### Aliases: load_mic\n\n### ** Examples\n\n## Not run: \n##D #Using defaults\n##D d_mic <- load_mic(file = \"test_Mic.txt\")\n##D \n##D #Use sequential processing\n##D d_mic <- load_mic(file = \"test_Mic.txt\", nThread = 1)\n##D \n##D #Use parallel processing and parse data in MRN_Type and MRN columns and keep all IDs\n##D d_mic <- load_mic(file = \"test_Mic.txt\", nThread = 20, mrn_type = TRUE, perc = 1)\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"load_mrn","snippet":"### Name: load_mrn\n### Title: Loads MRN data into R.\n### Aliases: load_mrn\n\n### ** Examples\n\n## Not run: \n##D #Using defaults\n##D d_mrn <- load_mrn(file = \"test_Mrn.txt\")\n##D \n##D #Use sequential processing\n##D d_mrn <- load_mrn(file = \"test_Mrn.txt\", nThread = 1)\n##D \n##D #Use parallel processing and parse data in MRN_Type and MRN columns and keep all IDs\n##D d_mrn <- load_mrn(file = \"test_Mrn.txt\", nThread = 20, mrn_type = TRUE, perc = 1)\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"load_notes","snippet":"### Name: load_notes\n### Title: Loads note documents into R.\n### Aliases: load_notes\n\n### ** Examples\n\n## Not run: \n##D #Using defaults\n##D d_hnp <- load_notes(file = \"test_Hnp.txt\", type = \"hnp\")\n##D \n##D #Use sequential processing\n##D d_hnp <- load_notes(file = \"test_Hnp.txt\", type = \"hnp\", nThread = 1, format_orig = TRUE)\n##D \n##D #Use parallel processing and parse data in MRN_Type and MRN columns and keep all IDs\n##D d_hnp <- load_notes(file = \"test_Hnp.txt\", type = \"hnp\", nThread = 20, mrn_type = TRUE, perc = 1)\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"load_phy","snippet":"### Name: load_phy\n### Title: Loads helath history information into R.\n### Aliases: load_phy\n\n### ** Examples\n\n## Not run: \n##D #Using defaults\n##D d_phy <- load_phy(file = \"test_Phy.txt\")\n##D \n##D #Use sequential processing\n##D d_phy <- load_phy(file = \"test_Phy.txt\", nThread = 1)\n##D \n##D #Use parallel processing and parse data in MRN_Type and MRN columns and keep all IDs\n##D d_phy <- load_phy(file = \"test_Phy.txt\", nThread = 20, mrn_type = TRUE, perc = 1)\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"load_prc","snippet":"### Name: load_prc\n### Title: Loads procedures into R.\n### Aliases: load_prc\n\n### ** Examples\n\n## Not run: \n##D #Using defaults\n##D d_prc <- load_prc(file = \"test_Prc.txt\")\n##D \n##D #Use sequential processing\n##D d_prc <- load_prc(file = \"test_Prc.txt\", nThread = 1)\n##D \n##D #Use parallel processing and parse data in MRN_Type and MRN columns and keep all IDs\n##D d_pec <- load_prc(file = \"test_Pec.txt\", nThread = 20, mrn_type = TRUE, perc = 1)\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"load_prv","snippet":"### Name: load_prv\n### Title: Loads providers information into R.\n### Aliases: load_prv\n\n### ** Examples\n\n## Not run: \n##D #Using defaults\n##D d_prv <- load_prv(file = \"test_Prv.txt\")\n##D \n##D #Use sequential processing\n##D d_prv <- load_prv(file = \"test_Prv.txt\", nThread = 1)\n##D \n##D #Use parallel processing and parse data in\n##D #MRN_Type and MRN columns (default in load_con) and keep all IDs\n##D d_prv <- load_prv(file = \"test_Prv.txt\", nThread = 20, mrn_type = TRUE, perc = 1)\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"load_ptd","snippet":"### Name: load_ptd\n### Title: Loads patient data information into R.\n### Aliases: load_ptd\n\n### ** Examples\n\n## Not run: \n##D #Using defaults\n##D d_ptd <- load_ptd(file = \"test_Phy.txt\")\n##D \n##D #Use sequential processing\n##D d_ptd <- load_ptd(file = \"test_Phy.txt\", nThread = 1)\n##D \n##D #Use parallel processing and parse data in MRN_Type and MRN columns and keep all IDs\n##D d_ptd <- load_ptd(file = \"test_Phy.txt\", nThread = 20, mrn_type = TRUE, perc = 1)\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"load_rdt","snippet":"### Name: load_rdt\n### Title: Loads radiology procedures data into R.\n### Aliases: load_rdt\n\n### ** Examples\n\n## Not run: \n##D #Using defaults\n##D d_rdt <- load_rdt(file = \"test_Rdt.txt\")\n##D \n##D #Use sequential processing\n##D d_rdt <- load_rdt(file = \"test_Rdt.txt\", nThread = 1)\n##D \n##D #Use parallel processing and parse data in MRN_Type and MRN columns and keep all IDs\n##D d_rdt <- load_rdt(file = \"test_Rdt.txt\", nThread = 20, mrn_type = TRUE, perc = 1)\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"load_rfv","snippet":"### Name: load_rfv\n### Title: Loads reason for visit data into R.\n### Aliases: load_rfv\n\n### ** Examples\n\n## Not run: \n##D #Using defaults\n##D d_rfv <- load_rfv(file = \"test_Rfv.txt\")\n##D \n##D #Use sequential processing\n##D d_rfv <- load_rfv(file = \"test_Rfv.txt\", nThread = 1)\n##D \n##D #Use parallel processing and parse data in MRN_Type and MRN columns and keep all IDs\n##D d_rfv <- load_rfv(file = \"test_Rfv.txt\", nThread = 20, mrn_type = TRUE, perc = 1)\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"load_trn","snippet":"### Name: load_trn\n### Title: Loads transfusion results into R.\n### Aliases: load_trn\n\n### ** Examples\n\n## Not run: \n##D #Using defaults\n##D d_trn <- load_trn(file = \"test_Trn.txt\")\n##D \n##D #Use sequential processing\n##D d_trn <- load_trn(file = \"test_Trn.txt\", nThread = 1)\n##D \n##D #Use parallel processing and parse data in MRN_Type and MRN columns and keep all IDs\n##D d_trn <- load_trn(file = \"test_Trn.txt\", nThread = 20, mrn_type = TRUE, perc = 1)\n## End(Not run)\n\n\n"} {"package":"parseRPDR","topic":"pretty_mrn","snippet":"### Name: pretty_mrn\n### Title: Converts MRN integer to string compatible with RPDR.\n### Aliases: pretty_mrn\n\n### ** Examples\n\n## Not run: \n##D mrns <- sample(1e4:1e7, size = 10) #Simulate MRNs\n##D \n##D #MGH format\n##D pretty_mrn(v = mrns, prefix = \"MGH\")\n##D \n##D #BWH format\n##D pretty_mrn(v = mrns, prefix = \"BWH\")\n##D \n##D #Multiple sources using space as a separator\n##D pretty_mrn(v = mrns[1:3], prefix = c(\"MGH\", \"BWH\", \"EMPI\"), sep = \" \")\n##D \n##D #Keeping the length of the IDs despite not adhering to the requirements\n##D pretty_mrn(v = mrns, prefix = \"EMPI\", id_length = \"asis\")\n## End(Not run)\n\n\n"} {"package":"soptdmaeA","topic":"cmatbrcd.mae","snippet":"### Name: cmatbrcd.mae\n### Title: Computes the treatment information matrix\n### Aliases: cmatbrcd.mae\n### Keywords: Information matrix C-matrix\n\n### ** Examples\n\n\n##Information matrix\n\n trt.N <- 4 \n blk.N <- 4 \n theta <- 0.3 \n dsgn <- rbind(1:4,c(2:4,1))\n dtype <- \"rcd\"\n\n cmatbrcd.mae(trt.N = 4, blk.N = 4, theta = 0.2, des = dsgn, dtype = \"rcd\")\n\n\n"} {"package":"soptdmaeA","topic":"soptdmaeA","snippet":"### Name: soptdmaeA\n### Title: Sequential optimal designs for two-colour cDNA microarray\n### experiments\n### Aliases: soptdmaeA soptdmaeA.default print.soptdmaeA summary.soptdmaeA\n### print.summary.soptdmaeA\n### Keywords: Sequential A-optimal block designs Sequential D-optimal block\n### designs Sequential E-optimal block designs Sequential MV-optimal\n### block designs Sequential A-optimal row-column designs Sequential\n### D-optimal row-column designs Sequential E-optimal row-column designs\n### Sequential MV-optimal row-column designs Microarray experiment Array\n### exchange algorithm\n\n### ** Examples\n\n ## No test: \n ##To obtain sequential A-optimal or near-optimal block design for a given\n ##initial A-optimal or near-optimal block design, set\n \n trt.N <- 3 #Number of treatments\n blk.N <- 3 #Number of blocks\n theta <- 0 #theta value\n nrep <- 10 #Number of replications\n strt <- 2 #Number of added treatments\n sary <- 3 #Number of added arrays\n des0 <- rbind(1:3, c(2, 3, 1)) #Initial design\n dtype = \"blkd\" #Design type\n Optcrit <- \"A\" #Optimality criteria\n\n seqAoptbd <- soptdmaeA(trt.N = 3, blk.N = 3, theta = 0, nrep = 10, \n strt = 2, sary = 3, des0, dtype = \"blkd\", Optcrit = \"A\")\n \n summary(seqAoptbd)\n\n ##To obtain sequential A-optimal or near-optimal row-column design for a given\n ##initial A-optimal or near-optimal row-column design des0 (stated above), set\n \n dtype = \"rcd\" #Design type\n\n seqAoptrcd <- soptdmaeA(trt.N = 3, blk.N = 3, theta = 0, nrep = 10, \n strt = 2, sary = 3, des0, dtype = \"rcd\", Optcrit = \"A\")\n \n summary(seqAoptrcd)\n## End(No test)\n\n\n"} {"package":"iprior","topic":"decimal_place","snippet":"### Name: decimal_place\n### Title: Cut a numeric vector to a certain number of decimal places\n### Aliases: decimal_place dec_plac\n\n### ** Examples\n\ndecimal_place(pi, 3)\ndecimal_place(c(exp(1), pi, sqrt(2)), 4)\n\n\n\n"} {"package":"iprior","topic":"gen_multilevel","snippet":"### Name: gen_multilevel\n### Title: Generate simulated data for multilevel models\n### Aliases: gen_multilevel\n\n### ** Examples\n\ngen_multilevel()\n\n\n\n"} {"package":"iprior","topic":"gen_smooth","snippet":"### Name: gen_smooth\n### Title: Generate simulated data for smoothing models\n### Aliases: gen_smooth\n\n### ** Examples\n\ngen_smooth(10)\n\n\n\n"} {"package":"iprior","topic":"hsb","snippet":"### Name: hsb\n### Title: High school and beyond dataset\n### Aliases: hsb\n### Keywords: datasets\n\n### ** Examples\n\ndata(hsb)\nstr(hsb)\n\n\n"} {"package":"iprior","topic":"hsbsmall","snippet":"### Name: hsbsmall\n### Title: High school and beyond dataset\n### Aliases: hsbsmall\n### Keywords: datasets\n\n### ** Examples\n\ndata(hsbsmall)\nstr(hsbsmall)\n\n\n"} {"package":"iprior","topic":"iprior","snippet":"### Name: iprior\n### Title: Fit an I-prior regression model\n### Aliases: iprior iprior.default iprior.formula iprior.ipriorKernel\n### iprior.ipriorMod\n\n### ** Examples\n\n\n# Formula based input\n(mod.stackf <- iprior(stack.loss ~ Air.Flow + Water.Temp + Acid.Conc.,\n data = stackloss))\nmod.toothf <- iprior(len ~ supp * dose, data = ToothGrowth)\nsummary(mod.toothf)\n\n# Non-formula based input\nmod.stacknf <- iprior(y = stackloss$stack.loss,\n Air.Flow = stackloss$Air.Flow,\n Water.Temp = stackloss$Water.Temp,\n Acid.Conc. = stackloss$Acid.Conc.)\nmod.toothnf <- iprior(y = ToothGrowth$len, ToothGrowth$supp, ToothGrowth$dose,\n interactions = \"1:2\")\n\n# Formula based model option one.lam = TRUE\n# Sets a single scale parameter for all variables\nmodf <- iprior(stack.loss ~ ., data = stackloss, one.lam = TRUE)\nmodnf <- iprior(y = stackloss$stack.loss, X = stackloss[1:3])\nall.equal(coef(modnf), coef(modnf)) # both models are equivalent\n\n# Fit models using different kernels\ndat <- gen_smooth(n = 100)\nmod <- iprior(y ~ X, dat, kernel = \"fbm\") # Hurst = 0.5 (default)\nmod <- iprior(y ~ X, dat, kernel = \"poly3\") # polynomial degree 3\n\n# Fit models using various estimation methods\nmod1 <- iprior(y ~ X, dat)\nmod2 <- iprior(y ~ X, dat, method = \"em\")\nmod3 <- iprior(y ~ X, dat, method = \"canonical\")\nmod4 <- iprior(y ~ X, dat, method = \"mixed\")\nmod5 <- iprior(y ~ X, dat, method = \"fixed\", lambda = coef(mod1)[1],\n psi = coef(mod1)[2])\nc(logLik(mod1), logLik(mod2), logLik(mod3), logLik(mod4),\n logLik(mod5))\n\n## Not run: \n##D \n##D # For large data sets, it is worth trying the Nystrom method\n##D mod <- iprior(y ~ X, gen_smooth(5000), kernel = \"se\", nystrom = 50,\n##D est.lengthscale = TRUE) # a bit slow\n##D plot_fitted(mod, ci = FALSE)\n## End(Not run)\n\n\n\n"} {"package":"iprior","topic":"iprior_cv","snippet":"### Name: iprior_cv\n### Title: Perform a cross-validation experiment with the iprior function\n### Aliases: iprior_cv iprior_cv.default iprior_cv.formula\n\n### ** Examples\n\n## Not run: \n##D \n##D # 5-fold CV experiment\n##D (mod.cv <- iprior_cv(y ~ X, gen_smooth(100), kernel = \"se\", folds = 5))\n##D \n##D # LOOCV experiment\n##D (mod.cv <- iprior_cv(y ~ X, gen_smooth(100), kernel = \"se\", folds = Inf))\n##D \n##D # Can also get root MSE\n##D print(mod.cv, \"RMSE\")\n## End(Not run)\n\n\n\n"} {"package":"iprior","topic":"kernL","snippet":"### Name: kernL\n### Title: Load the kernel matrices for I-prior models\n### Aliases: kernL kernL.formula\n\n### ** Examples\n\n\nstr(ToothGrowth)\n(mod <- kernL(y = ToothGrowth$len,\n supp = ToothGrowth$supp,\n dose = ToothGrowth$dose,\n interactions = \"1:2\"))\nkernL(len ~ supp * dose, data = ToothGrowth) # equivalent formula call\n\n# Choosing different kernels\nstr(stackloss)\nkernL(stack.loss ~ ., stackloss, kernel = \"fbm\") # all fBm kernels\nkernL(stack.loss ~ ., stackloss, kernel = \"FBm\") # cApS dOn't MatTeR\nkernL(stack.loss ~ ., stackloss,\n kernel = c(\"linear\", \"se\", \"poly3\")) # different kernels\n\n# Sometimes the print output is too long, can use str() options here\nprint(mod, strict.width = \"cut\", width = 30)\n\n\n\n"} {"package":"iprior","topic":"kernel","snippet":"### Name: kernel\n### Title: Reproducing kernels for the I-prior package\n### Aliases: kernel kernels kern_canonical kern_linear kern_pearson\n### kern_fbm kern_se kern_poly\n\n### ** Examples\n\nkern_linear(1:3)\nkern_fbm(1:5, 1:3, gamma = 0.7)\n\n\n\n"} {"package":"iprior","topic":"pollution","snippet":"### Name: pollution\n### Title: Air pollution and mortality\n### Aliases: pollution\n### Keywords: datasets\n\n### ** Examples\n\ndata(pollution)\nstr(pollution)\n\n\n"} {"package":"iprior","topic":"predict","snippet":"### Name: predict\n### Title: Obtain predicted values from 'ipriorMod' objects\n### Aliases: predict fitted.ipriorMod predict.ipriorMod print.ipriorPredict\n\n### ** Examples\n\ndat <- gen_smooth(20)\nmod <- iprior(y ~ ., dat, kernel = \"se\")\nfitted(mod)\nfitted(mod, intervals = TRUE)\npredict(mod, gen_smooth(5))\n\nwith(dat, mod <<- iprior(y, X, kernel = \"poly\"))\nnewdat <- gen_smooth(30)\nmod.pred <- predict(mod, list(newdat$X), y.test = newdat$y, intervals = TRUE)\nstr(mod.pred)\nprint(mod.pred, row = 5)\n\n\n\n"} {"package":"iprior","topic":"tecator.cv","snippet":"### Name: tecator.cv\n### Title: Results of I-prior cross-validation experiment on Tecator data\n### set\n### Aliases: tecator.cv\n### Keywords: datasets\n\n### ** Examples\n\n# Results from the six experiments\nprint(tecator.cv[[1]], \"RMSE\")\nprint(tecator.cv[[2]], \"RMSE\")\nprint(tecator.cv[[3]], \"RMSE\")\nprint(tecator.cv[[4]], \"RMSE\")\nprint(tecator.cv[[5]], \"RMSE\")\nprint(tecator.cv[[6]], \"RMSE\")\n\n# Summary of results\nprint(tecator.cv[[7]])\n\n## Not run: \n##D \n##D # Prepare data set\n##D data(tecator, package = \"caret\")\n##D endpoints <- as.data.frame(endpoints)\n##D colnames(endpoints) <- c(\"water\", \"fat\", \"protein\")\n##D absorp <- -t(diff(t(absorp))) # this takes first differences using diff()\n##D fat <- endpoints$fat\n##D \n##D # Here is the code to replicate the results\n##D mod1.cv <- iprior_cv(fat, absorp, folds = Inf)\n##D mod2.cv <- iprior_cv(fat, absorp, folds = Inf, kernel = \"poly2\",\n##D est.offset = TRUE)\n##D mod3.cv <- iprior_cv(fat, absorp, folds = Inf, kernel = \"poly3\",\n##D est.offset = TRUE)\n##D mod4.cv <- iprior_cv(fat, absorp, method = \"em\", folds = Inf, kernel = \"fbm\",\n##D control = list(stop.crit = 1e-2))\n##D mod5.cv <- iprior_cv(fat, absorp, folds = Inf, kernel = \"fbm\",\n##D est.hurst = TRUE, control = list(stop.crit = 1e-2))\n##D mod6.cv <- iprior_cv(fat, absorp, folds = Inf, kernel = \"se\",\n##D est.lengthscale = TRUE, control = list(stop.crit = 1e-2))\n##D \n##D tecator_res_cv <- function(mod) {\n##D res <- as.numeric(apply(mod$res[, -1], 2, mean)) # Calculate RMSE\n##D c(\"Training RMSE\" = res[1], \"Test RMSE\" = res[2])\n##D }\n##D \n##D tecator_tab_cv <- function() {\n##D tab <- t(sapply(list(mod1.cv, mod2.cv, mod3.cv, mod4.cv, mod5.cv, mod6.cv),\n##D tecator_res_cv))\n##D rownames(tab) <- c(\"Linear\", \"Quadratic\", \"Cubic\", \"fBm-0.5\", \"fBm-MLE\",\n##D \"SE-MLE\")\n##D tab\n##D }\n##D \n##D tecator.cv <- list(\n##D \"linear\" = mod1.cv,\n##D \"qudratic\" = mod2.cv,\n##D \"cubic\" = mod3.cv,\n##D \"fbm-0.5\" = mod4.cv,\n##D \"fbm-MLE\" = mod5.cv,\n##D \"SE\" = mod6.cv,\n##D \"summary\" = tecator_tab_cv()\n##D )\n## End(Not run)\n\n\n\n\n"} {"package":"newscatcheR","topic":"describe_url","snippet":"### Name: describe_url\n### Title: Describe URL\n### Aliases: describe_url\n\n### ** Examples\n\ndescribe_url(website = \"ycombinator.com\", rss_table = package_rss)\n\n\n"} {"package":"newscatcheR","topic":"filter_urls","snippet":"### Name: filter_urls\n### Title: Filter URLs in the provided database based on topic, country and\n### language\n### Aliases: filter_urls\n\n### ** Examples\n\nfilter_urls(topic = \"tech\", country = \"US\", language = \"en\")\n\n\n"} {"package":"newscatcheR","topic":"get_headlines","snippet":"### Name: get_headlines\n### Title: Get headlines A helper function to get just the headlines of the\n### feed\n### Aliases: get_headlines\n\n### ** Examples\n\n## Not run: \n##D Sys.sleep(3) # adding a small time delay to avoid\n##D # simultaneous posts to the API\n##D get_headlines(website = \"ycombinator.com\", rss_table = package_rss)\n## End(Not run)\n\n\n"} {"package":"newscatcheR","topic":"get_news","snippet":"### Name: get_news\n### Title: Get news Get the contents of a rss feed\n### Aliases: get_news\n\n### ** Examples\n\n## Not run: \n##D Sys.sleep(3) # adding a small time delay to avoid\n##D # simultaneous posts to the API\n##D get_news(website = \"ycombinator.com\", rss_table = package_rss)\n## End(Not run)\n\n\n"} {"package":"crossval","topic":"confusionMatrix","snippet":"### Name: confusionMatrix\n### Title: Compute Confusion Matrix\n### Aliases: confusionMatrix\n### Keywords: univar\n\n### ** Examples\n\n# load crossval library\nlibrary(\"crossval\")\n\n# true labels\na = c(\"cancer\", \"cancer\", \"control\", \"control\", \"cancer\", \"control\", \"control\")\n\n# predicted labels\np = c(\"cancer\", \"control\", \"control\", \"control\", \"cancer\", \"control\", \"cancer\")\n\n# confusion matrix (a vector)\ncm = confusionMatrix(a, p, negative=\"control\") \ncm\n# FP TP TN FN \n# 1 2 3 1 \n# attr(,\"negative\")\n# [1] \"control\"\n\n# corresponding accuracy, sensitivity etc.\ndiagnosticErrors(cm)\n# acc sens spec ppv npv lor \n# 0.7142857 0.6666667 0.7500000 0.6666667 0.7500000 1.7917595\n# attr(,\"negative\")\n# [1] \"control\"\n\n\n"} {"package":"crossval","topic":"crossval","snippet":"### Name: crossval\n### Title: Generic Function for Cross Valdidation\n### Aliases: crossval\n### Keywords: multivariate\n\n### ** Examples\n\n# load \"crossval\" package\nlibrary(\"crossval\")\n\n# classification examples\n\n# set up lda prediction function\npredfun.lda = function(train.x, train.y, test.x, test.y, negative)\n{\n require(\"MASS\") # for lda function\n\n lda.fit = lda(train.x, grouping=train.y)\n ynew = predict(lda.fit, test.x)$class\n\n # count TP, FP etc.\n out = confusionMatrix(test.y, ynew, negative=negative)\n\n return( out )\n}\n\n\n# Student's Sleep Data\ndata(sleep)\nX = as.matrix(sleep[,1, drop=FALSE]) # increase in hours of sleep \nY = sleep[,2] # drug given \nplot(X ~ Y)\nlevels(Y) # \"1\" \"2\"\ndim(X) # 20 1\n\nset.seed(12345)\ncv.out = crossval(predfun.lda, X, Y, K=5, B=20, negative=\"1\")\n\ncv.out$stat\ndiagnosticErrors(cv.out$stat)\n\n\n# linear regression example\n\ndata(\"attitude\")\ny = attitude[,1] # rating variable\nx = attitude[,-1] # date frame with the remaining variables\nis.factor(y) # FALSE\n\nsummary( lm(y ~ . , data=x) )\n\n# set up lm prediction function\npredfun.lm = function(train.x, train.y, test.x, test.y)\n{\n lm.fit = lm(train.y ~ . , data=train.x)\n ynew = predict(lm.fit, test.x )\n\n # compute squared error risk (MSE)\n out = mean( (ynew - test.y)^2 )\n\n return( out )\n}\n\n\n# prediction MSE using all variables\nset.seed(12345)\ncv.out = crossval(predfun.lm, x, y, K=5, B=20)\nc(cv.out$stat, cv.out$stat.se)\n\n# and only two variables\ncv.out = crossval(predfun.lm, x[,c(1,3)], y, K=5, B=20)\nc(cv.out$stat, cv.out$stat.se) \n\n\n\n# for more examples (e.g. using cross validation in a regression or classification context)\n# see the R packages \"sda\", \"care\", or \"binda\".\n\n\n\n"} {"package":"crossval","topic":"diagnosticErrors","snippet":"### Name: diagnosticErrors\n### Title: Compute Diagnostic Errors: Accuracy, Sensitivity, Specificity,\n### Positive Predictive Value, Negative Predictive Value, Log Odds Ratio\n### Aliases: diagnosticErrors\n### Keywords: univar\n\n### ** Examples\n\n# load crossval library\nlibrary(\"crossval\")\n\n# true labels\na = c(\"cancer\", \"cancer\", \"control\", \"control\", \"cancer\", \"control\", \"control\")\n\n# predicted labels\np = c(\"cancer\", \"control\", \"control\", \"control\", \"cancer\", \"control\", \"cancer\")\n\n# confusion matrix (a vector)\ncm = confusionMatrix(a, p, negative=\"control\") \ncm\n# FP TP TN FN \n# 1 2 3 1 \n# attr(,\"negative\")\n# [1] \"control\"\n\n# corresponding accuracy, sensitivity etc.\ndiagnosticErrors(cm)\n# acc sens spec ppv npv lor \n# 0.7142857 0.6666667 0.7500000 0.6666667 0.7500000 1.7917595\n# attr(,\"negative\")\n# [1] \"control\"\n\n\n"} {"package":"cmocean","topic":"cmocean","snippet":"### Name: cmocean\n### Title: Return a cmocean palette function\n### Aliases: cmocean\n### Keywords: color\n\n### ** Examples\n\n image(volcano, col = cmocean('thermal')(100))\n image(volcano, col = cmocean('Temperature', '0.03')(128))\n image(volcano, col = cmocean('thermal', clip = .2)(256))\n image(volcano, col = cmocean('thermal', start = .1, end = .7)(256))\n image(volcano, col = cmocean('thermal', direction = -1)(256))\n image(volcano, col = cmocean('thermal', alpha = .5)(256))\n\n\n"} {"package":"cmocean","topic":"scale_colour_cmocean","snippet":"### Name: scale_colour_cmocean\n### Title: cmocean colour scales for ggplot2\n### Aliases: scale_colour_cmocean scale_color_cmocean scale_fill_cmocean\n\n### ** Examples\nif (require('ggplot2')) {\n dat <- data.frame(\n a = 1:10, b = 11:20, c = rnorm(10)\n )\n\n ggplot(dat, aes(x = a, y = b, fill = c)) +\n geom_raster() + scale_fill_cmocean()\n}\n\n"} {"package":"disto","topic":"as.data.frame.disto","snippet":"### Name: as.data.frame.disto\n### Title: Convert a disto object to dataframe\n### Aliases: as.data.frame.disto\n\n### ** Examples\n\ntemp <- stats::dist(iris[,1:4])\ndio <- disto(objectname = \"temp\")\ndio\nhead(as.data.frame(dio))\n\n\n"} {"package":"disto","topic":"dist_extract","snippet":"### Name: dist_extract\n### Title: Matrix style extraction from dist object\n### Aliases: dist_extract\n\n### ** Examples\n\n# examples for dist_extract\n\n# create a dist object\ntemp <- dist(iris[,1:4])\nattr(temp, \"Labels\") <- outer(letters, letters, paste0)[1:150]\nhead(temp)\nmax(temp)\nas.matrix(temp)[1:5, 1:5]\n\n\ndist_extract(temp, 1, 1)\ndist_extract(temp, 1, 2)\ndist_extract(temp, 2, 1)\ndist_extract(temp, \"aa\", \"ba\")\n\ndist_extract(temp, 1:10, 11:20)\ndim(dist_extract(temp, 1:10, ))\ndim(dist_extract(temp, , 1:10))\ndist_extract(temp, 1:10, 11:20, product = \"inner\")\nlength(dist_extract(temp, 1:10, , product = \"inner\"))\nlength(dist_extract(temp, , 1:10, product = \"inner\"))\n\ndist_extract(temp, c(\"aa\", \"ba\", \"ca\"), c(\"ca\", \"da\", \"fa\"))\ndist_extract(temp, c(\"aa\", \"ba\", \"ca\"), c(\"ca\", \"da\", \"fa\"), product = \"inner\")\n\ndist_extract(temp, k = 1:3) # product is always inner when k is specified\n\n\n"} {"package":"disto","topic":"dist_replace","snippet":"### Name: dist_replace\n### Title: Replacement values in dist\n### Aliases: dist_replace\n\n### ** Examples\n\n\n# create a dist object\nd <- dist(iris[,1:4])\nattr(d, \"Labels\") <- outer(letters, letters, paste0)[1:150]\nhead(d)\nmax(d)\nas.matrix(d)[1:5, 1:5]\n\n# replacement in ij-mode\nd <- dist_replace(d, 1, 2, 100)\ndist_extract(d, 1, 2, product = \"inner\")\nd <- dist_replace(d, \"ca\", \"ba\", 102)\ndist_extract(d, \"ca\", \"ba\", product = \"inner\")\n\nd <- dist_replace(d, 1:5, 6:10, 11:15)\ndist_extract(d, 1:5, 6:10, product = \"inner\")\nd <- dist_replace(d, c(\"ca\", \"da\"), c(\"aa\", \"ba\"), 102)\ndist_extract(d, c(\"ca\", \"da\"), c(\"aa\", \"ba\"), product = \"inner\")\n\n# replacement in k-mode\nd <- dist_replace(d, k = 2, value = 101)\ndist_extract(d, k = 2)\ndist_extract(d, 3, 1, product = \"inner\") # extracting k=2 in ij-mode\n\n\n"} {"package":"disto","topic":"disto","snippet":"### Name: disto\n### Title: Constructor for class 'disto'\n### Aliases: disto disto disto-package disto-package\n\n### ** Examples\n\ntemp <- stats::dist(iris[,1:4])\ndio <- disto(objectname = \"temp\")\ndio\nunclass(dio)\n\n\n"} {"package":"disto","topic":"names.disto","snippet":"### Name: names.disto\n### Title: Get names/labels\n### Aliases: names.disto\n\n### ** Examples\n\ntemp <- stats::dist(iris[,1:4])\ndio <- disto(objectname = \"temp\")\ndio\nnames(dio) <- paste0(\"a\", 1:150)\n\n\n"} {"package":"disto","topic":"plot.disto","snippet":"### Name: plot.disto\n### Title: Plot a disto object\n### Aliases: plot.disto\n\n### ** Examples\n\ntemp <- stats::dist(iris[,1:4])\ndio <- disto(objectname = \"temp\")\nplot(dio, type = \"heatmap\")\nplot(dio, type = \"dendrogram\")\n\n\n"} {"package":"disto","topic":"print.disto","snippet":"### Name: print.disto\n### Title: Print method for dist class\n### Aliases: print.disto\n\n### ** Examples\n\ntemp <- stats::dist(iris[,1:4])\ndio <- disto(objectname = \"temp\")\nprint(dio)\n\n\n"} {"package":"disto","topic":"size","snippet":"### Name: size\n### Title: Obtain size of the disto object\n### Aliases: size\n\n### ** Examples\n\ntemp <- stats::dist(iris[,1:4])\ndio <- disto(objectname = \"temp\")\nsize(dio)\n\n\n"} {"package":"disto","topic":"summary.disto","snippet":"### Name: summary.disto\n### Title: Summary method for dist class\n### Aliases: summary.disto\n\n### ** Examples\n\ntemp <- stats::dist(iris[,1:4])\ndio <- disto(objectname = \"temp\")\ndio\nsummary(dio)\n\n\n"} {"package":"disto","topic":"`names<-.disto``","snippet":"### Name: `names<-.disto``\n### Title: Set names/labels\n### Aliases: `names<-.disto`` names<-.disto\n\n### ** Examples\n\ntemp <- stats::dist(iris[,1:4])\ndio <- disto(objectname = \"temp\")\ndio\nnames(dio) <- paste0(\"a\", 1:150)\n\n\n"} {"package":"disto","topic":"`[.disto`","snippet":"### Name: `[.disto`\n### Title: Extract from a disto object in matrix style extraction\n### Aliases: `[.disto` [.disto\n\n### ** Examples\n\ntemp <- stats::dist(iris[,1:4])\ndio <- disto(objectname = \"temp\")\ndio\nnames(dio) <- paste0(\"a\", 1:150)\n\ndio[1, 2]\ndio[2, 1]\ndio[c(\"a1\", \"a10\"), c(\"a5\", \"a72\")]\ndio[c(\"a1\", \"a10\"), c(\"a5\", \"a72\"), product = \"inner\"]\ndio[k = c(1,3,5)]\n\n\n"} {"package":"disto","topic":"`[[.disto`","snippet":"### Name: `[[.disto`\n### Title: Extract a single value from disto object\n### Aliases: `[[.disto` [[.disto\n\n### ** Examples\n\ntemp <- stats::dist(iris[,1:4])\ndio <- disto(objectname = \"temp\")\ndio\n\ndio[[1, 2]]\ndio[[2, 1]]\ndio[[k = 3]]\n\n\n"} {"package":"disto","topic":"`[<-.disto`","snippet":"### Name: `[<-.disto`\n### Title: In-place replacement of values\n### Aliases: `[<-.disto` [<-.disto\n\n### ** Examples\n\ntemp <- stats::dist(iris[,1:4])\ndio <- disto(objectname = \"temp\")\nnames(dio) <- paste0(\"a\", 1:150)\ndio\n\ndio[1, 2] <- 10\ndio[1,2]\n\ndio[1:10, 2:11] <- 100\ndio[1:10, 2:11, product = \"inner\"]\n\ndio[paste0(\"a\", 1:5), paste0(\"a\", 6:10)] <- 101\ndio[paste0(\"a\", 1:5), paste0(\"a\", 6:10), product = \"inner\"]\n\n\n"} {"package":"tikzDevice","topic":"getLatexStrWidth","snippet":"### Name: getLatexStrWidth\n### Title: Obtain Font Metrics from LaTeX\n### Aliases: getLatexStrWidth getLatexCharMetrics\n### Keywords: character metrics string\n\n### ** Examples\n\n\n getLatexStrWidth('{\\\\\\\\tiny Hello \\\\\\\\LaTeX!}')\n\n\n # Calculate ascent, descent and width for \"A\"\n getLatexCharMetrics(65)\n\n\n\n"} {"package":"tikzDevice","topic":"sanitizeTexString","snippet":"### Name: sanitizeTexString\n### Title: Replace LaTeX Special Characters in a String\n### Aliases: sanitizeTexString\n### Keywords: character\n\n### ** Examples\n\n\n# Be careful with sanitizing, it may lead to unexpected behavior.\n# For example, we may want -1 to be a superscript it gets\n# sanitized away with the other default special characters.\n# The string appears in LaTeX exactly as shown.\n## Not run: \n##D sanitizeTexString('10\\##D \n## End(Not run)\n\n\n\n"} {"package":"tikzDevice","topic":"setTikzDefaults","snippet":"### Name: setTikzDefaults\n### Title: Reset tikzDevice options to default values.\n### Aliases: setTikzDefaults\n\n### ** Examples\n\n\n print( options( 'tikzDocumentDeclaration' ) )\n options( tikzDocumentDeclaration = 'foo' )\n setTikzDefaults()\n print( options( 'tikzDocumentDeclaration' ) )\n\n\n\n"} {"package":"tikzDevice","topic":"tikz","snippet":"### Name: tikz\n### Title: TikZ Graphics Device\n### Aliases: tikz\n### Keywords: device\n\n### ** Examples\n\n\n## Not run: \n##D \n##D ## Example 1 ###################################\n##D #Set up temporary work directory\n##D td <- tempdir()\n##D tf <- file.path(td,'example1.tex')\n##D oldwd <- getwd()\n##D setwd(td)\n##D \n##D # Minimal plot\n##D tikz(tf,standAlone=TRUE)\n##D plot(1)\n##D dev.off()\n##D \n##D # View the output\n##D tools::texi2dvi(tf,pdf=T)\n##D system(paste(getOption('pdfviewer'),file.path(td,'example1.pdf')))\n##D setwd(oldwd)\n##D ################################################\n##D \n##D ## Example 2 ###################################\n##D #Set up temporary work directory\n##D td <- tempdir()\n##D tf <- file.path(td,'example2.tex')\n##D oldwd <- getwd()\n##D setwd(td)\n##D \n##D #LaTeX math symbol names\n##D syms <-c('alpha','theta','tau','beta','vartheta','pi','upsilon',\n##D 'gamma','gamma','varpi','phi','delta','kappa','rho',\n##D 'varphi','epsilon','lambda','varrho','chi','varepsilon',\n##D 'mu','sigma','psi','zeta','nu','varsigma','omega','eta',\n##D 'xi','Gamma','Lambda','Sigma','Psi','Delta','Xi','Upsilon',\n##D 'Omega','Theta','Pi','Phi')\n##D x <- rnorm(length(syms))\n##D y <- rnorm(length(syms))\n##D \n##D tikz(tf,standAlone=TRUE)\n##D plot(-2:2, -2:2, type = \"n\", axes=F,\n##D xlab='', ylab='', main='TikZ Device Math Example')\n##D text(x,y,paste('\\\\\\\\Large$\\\\\\\\',syms,'$',sep=''))\n##D dev.off()\n##D \n##D #View the output\n##D tools::texi2dvi(tf,pdf=TRUE)\n##D system(paste(getOption('pdfviewer'),file.path(td,'example2.pdf')))\n##D setwd(oldwd)\n##D ################################################\n##D \n##D ## Example 3 ###################################\n##D #Set up temporary work directory\n##D td <- tempdir()\n##D tf <- file.path(td,'example3.tex')\n##D oldwd <- getwd()\n##D setwd(td)\n##D \n##D tikz(tf,standAlone=TRUE)\n##D plot(-2:2, -2:2, type = \"n\", axes=F, xlab='', ylab='', main='Random Circles')\n##D points(rnorm(50), rnorm(50), pch=21,\n##D bg=rainbow(50,alpha=.5), cex=10)\n##D dev.off()\n##D \n##D #View the output\n##D tools::texi2dvi(tf,pdf=TRUE)\n##D system(paste(getOption('pdfviewer'),file.path(td,'example3.pdf')))\n##D setwd(oldwd)\n##D ################################################\n## End(Not run)\n\n\n\n"} {"package":"tikzDevice","topic":"tikzAnnotate","snippet":"### Name: tikzAnnotate\n### Title: Add Custom TikZ Code to an Active Device\n### Aliases: tikzAnnotate tikzNode tikzCoord tikzAnnotateGrob tikzNodeGrob\n### tikzCoordGrob grid.tikzAnnotate grid.tikzNode grid.tikzCoord\n### Keywords: annotation device tikz\n\n### ** Examples\n\n\n## Not run: \n##D \n##D ### Example 1: Annotations in Base Graphics\n##D # Load some additional TikZ libraries\n##D tikz(\"annotation.tex\",width=4,height=4,\n##D packages = c(getOption('tikzLatexPackages'),\n##D \"\\\\usetikzlibrary{decorations.pathreplacing}\",\n##D \"\\\\usetikzlibrary{positioning}\",\n##D \"\\\\usetikzlibrary{shapes.arrows,shapes.symbols}\")\n##D )\n##D \n##D p <- rgamma (300 ,1)\n##D outliers <- which( p > quantile(p,.75)+1.5*IQR(p) )\n##D boxplot(p)\n##D \n##D # Add named coordinates that other TikZ commands can hook onto\n##D tikzCoord(1, min(p[outliers]), 'min outlier')\n##D tikzCoord(1, max(p[outliers]), 'max outlier')\n##D \n##D # Use tikzAnnotate to insert arbitrary code, such as drawing a\n##D # fancy path between min outlier and max outlier.\n##D tikzAnnotate(c(\"\\\\draw[very thick,red,\",\n##D # Turn the path into a brace.\n##D 'decorate,decoration={brace,amplitude=12pt},',\n##D # Shift it 1em to the left of the coordinates\n##D 'transform canvas={xshift=-1em}]',\n##D '(min outlier) --',\n##D # Add a node with some text in the middle of the path\n##D 'node[single arrow,anchor=tip,fill=white,draw=green,',\n##D 'left=14pt,text width=0.70in,align=center]',\n##D '{Holy Outliers Batman!}', '(max outlier);'))\n##D \n##D # tikzNode can be used to place nodes with customized options and content\n##D tikzNode(\n##D opts='starburst,fill=green,draw=blue,very thick,right=of max outlier',\n##D content='Wow!'\n##D )\n##D \n##D dev.off()\n##D \n##D \n##D ### Example 2: Annotations in Grid Graphics\n##D library(grid)\n##D \n##D tikz(\"grid_annotation.tex\",width=4,height=4,\n##D packages = c(getOption('tikzLatexPackages'),\n##D \"\\\\usetikzlibrary{shapes.callouts}\")\n##D )\n##D \n##D pushViewport(plotViewport())\n##D pushViewport(dataViewport(1:10, 1:10))\n##D \n##D grid.rect()\n##D grid.xaxis()\n##D grid.yaxis()\n##D grid.points(1:10, 1:10)\n##D \n##D for ( i in seq(2,8,2) ){\n##D grid.tikzNode(i,i,opts='ellipse callout,draw,anchor=pointer',content=i)\n##D }\n##D \n##D dev.off()\n##D \n## End(Not run)\n\n\n\n\n"} {"package":"shinyAce","topic":"aceEditor","snippet":"### Name: aceEditor\n### Title: Render Ace\n### Aliases: aceEditor\n\n### ** Examples\n\n## Not run: \n##D aceEditor(\n##D outputId = \"myEditor\",\n##D value = \"Initial text for editor here\",\n##D mode = \"r\",\n##D theme = \"ambiance\"\n##D )\n##D \n##D aceEditor(\n##D outputId = \"myCodeEditor\",\n##D value = \"# Enter code\",\n##D mode = \"r\",\n##D hotkeys = list(\n##D helpKey = \"F1\",\n##D runKey = list(\n##D win = \"Ctrl-R|Ctrl-Shift-Enter\",\n##D mac = \"CMD-ENTER|CMD-SHIFT-ENTER\"\n##D )\n##D ),\n##D wordWrap = TRUE, debounce = 10\n##D )\n##D \n##D aceEditor(\n##D outputId = \"mySmartEditor\",\n##D value = \"plot(wt ~ mpg, data = mtcars)\",\n##D mode = \"r\",\n##D autoComplete = \"live\",\n##D autoCompleteList = list(mtcars = colnames(mtcars))\n##D )\n## End(Not run)\n\n\n\n"} {"package":"shinyAce","topic":"get_arg_help","snippet":"### Name: get_arg_help\n### Title: Retrieve argument documentation from help document\n### Aliases: get_arg_help\n\n### ** Examples\n\nshinyAce:::get_arg_help(\"match\", package = \"base\", args = c(\"table\", \"nomatch\"))\n\n\n\n"} {"package":"shinyAce","topic":"get_desc_help","snippet":"### Name: get_desc_help\n### Title: Retrieve description section from help document\n### Aliases: get_desc_help\n\n### ** Examples\n\nshinyAce:::get_desc_help(\"match\", package = \"base\")\n\n\n\n"} {"package":"shinyAce","topic":"get_usage_help","snippet":"### Name: get_usage_help\n### Title: Retrieve usage section from help document\n### Aliases: get_usage_help\n\n### ** Examples\n\nshinyAce:::get_usage_help(\"match\", package = \"base\")\n\n\n\n"} {"package":"shinyAce","topic":"is.empty","snippet":"### Name: is.empty\n### Title: Check if vector is empty\n### Aliases: is.empty\n\n### ** Examples\n\nis.empty(NULL)\nis.empty(NA)\nis.empty(c())\nis.empty(\"\")\nis.empty(\" \")\nis.empty(c(\" \", \" \"))\nis.empty(list())\nis.empty(list(a = \"\", b = \"\"))\n\n\n"} {"package":"shinyAce","topic":"re_capture","snippet":"### Name: re_capture\n### Title: Retrieve regular expression named capture groups as a list\n### Aliases: re_capture\n\n### ** Examples\n\nshinyAce:::re_capture(\"ak09j b\", \"(?\\\\d+)(?[a-zA-Z]+)\", perl = TRUE)\n\n\n\n"} {"package":"shinyAce","topic":"updateAceEditor","snippet":"### Name: updateAceEditor\n### Title: Update Ace Editor\n### Aliases: updateAceEditor\n\n### ** Examples\n\n## Not run: \n##D shinyServer(function(input, output, session) {\n##D observe({\n##D updateAceEditor(session, \"myEditor\", \"Updated text for editor here\",\n##D mode = \"r\", theme = \"ambiance\")\n##D })\n##D }\n## End(Not run)\n\n\n\n"} {"package":"oce","topic":"CTD_BCD2014666_008_1_DN.ODF.gz","snippet":"### Name: CTD_BCD2014666_008_1_DN.ODF.gz\n### Title: Sample ctd File in .odf Format\n### Aliases: CTD_BCD2014666_008_1_DN.ODF.gz\n\n### ** Examples\n\nctd <- read.ctd(system.file(\"extdata\", \"CTD_BCD2014666_008_1_DN.ODF.gz\", package=\"oce\"))\nplot(ctd)\n\n\n\n"} {"package":"oce","topic":"GMTOffsetFromTz","snippet":"### Name: GMTOffsetFromTz\n### Title: Determine Time Offset From Timezone\n### Aliases: GMTOffsetFromTz\n\n### ** Examples\n\nlibrary(oce)\ncat(\"Atlantic Standard Time is \", GMTOffsetFromTz(\"AST\"), \"hours after UTC\")\n\n\n"} {"package":"oce","topic":"ODFNames2oceNames","snippet":"### Name: ODFNames2oceNames\n### Title: Translate ODF CODE Strings to oce Variable Names\n### Aliases: ODFNames2oceNames\n\n### ** Examples\n\nODFNames2oceNames(\"TEMP_01\")$names # \"temperature\"\n\n\n\n"} {"package":"oce","topic":"T68fromT90","snippet":"### Name: T68fromT90\n### Title: Convert From ITS-90 to IPTS-68 Temperature\n### Aliases: T68fromT90\n\n### ** Examples\n\nlibrary(oce)\nT68 <- seq(3, 20, 1)\nT90 <- T90fromT68(T68)\nsqrt(mean((T68-T90)^2))\n\n\n\n"} {"package":"oce","topic":"T90fromT48","snippet":"### Name: T90fromT48\n### Title: Convert From ITS-48 to ITS-90 Temperature\n### Aliases: T90fromT48\n\n### ** Examples\n\nlibrary(oce)\nT68 <- seq(3, 20, 1)\nT90 <- T90fromT68(T68)\nsqrt(mean((T68-T90)^2))\n\n\n\n"} {"package":"oce","topic":"T90fromT68","snippet":"### Name: T90fromT68\n### Title: Convert From IPTS-68 to ITS-90 Temperature\n### Aliases: T90fromT68\n\n### ** Examples\n\nlibrary(oce)\nT68 <- seq(3, 20, 1)\nT90 <- T90fromT68(T68)\nsqrt(mean((T68-T90)^2))\n\n\n\n"} {"package":"oce","topic":"ad2cpCodeToName","snippet":"### Name: ad2cpCodeToName\n### Title: Map AD2CP ID Code to oce Name\n### Aliases: ad2cpCodeToName\n\n### ** Examples\n\nstopifnot(ad2cpCodeToName(0x15) == \"0x15=burst\")\n\n\n\n"} {"package":"oce","topic":"addSpine","snippet":"### Name: addSpine\n### Title: Add a Spine to a section Object\n### Aliases: addSpine\n\n### ** Examples\n\nlibrary(oce)\ndata(section)\neastern <- subset(section, longitude < (-65))\nspine <- list(\n longitude = c(-74.5, -69.2, -55),\n latitude = c(38.6, 36.25, 36.25)\n)\neasternWithSpine <- addSpine(eastern, spine)\n# plot(easternWithSpine, which=\"map\")\n# plot(easternWithSpine, xtype=\"distance\", which=\"temperature\")\n# plot(easternWithSpine, xtype=\"spine\", which=\"temperature\")\n\n\n\n"} {"package":"oce","topic":"adp","snippet":"### Name: adp\n### Title: Sample adp Data\n### Aliases: adp\n\n### ** Examples\n\n## No test: \nlibrary(oce)\ndata(adp)\n\n# Velocity components. (Note: we should probably trim some bins at top.)\nplot(adp)\n\n# Note that tides have moved the mooring.\nplot(adp, which = 15:18)\n## End(No test)\n\n\n\n"} {"package":"oce","topic":"adpConvertRawToNumeric","snippet":"### Name: adpConvertRawToNumeric\n### Title: Convert Raw to Numeric Values in an adp Object\n### Aliases: adpConvertRawToNumeric\n\n### ** Examples\n\nlibrary(oce)\ndata(adp)\nadp[[\"a\"]][, , 1][, 1]\nADP <- adpConvertRawToNumeric(adp)\nADP[[\"a\"]][, , 1][, 1]\n\n\n"} {"package":"oce","topic":"adpEnsembleAverage","snippet":"### Name: adpEnsembleAverage\n### Title: Ensemble Average an adp Object in Time\n### Aliases: adpEnsembleAverage\n\n### ** Examples\n\nlibrary(oce)\ndata(adp)\nadpAvg <- adpEnsembleAverage(adp, n = 2)\nplot(adpAvg)\n\n\n\n"} {"package":"oce","topic":"adp_rdi.000","snippet":"### Name: adp_rdi.000\n### Title: Sample adp File in RDI Format\n### Aliases: adp_rdi.000\n\n### ** Examples\n\nread.oce(system.file(\"extdata\", \"adp_rdi.000\", package=\"oce\"))\n\n\n\n"} {"package":"oce","topic":"adv-class","snippet":"### Name: adv-class\n### Title: Class to Store Acoustic-Doppler Velocimeter Data\n### Aliases: adv-class\n\n### ** Examples\n\ndata(adv)\nadv[[\"v\"]] <- 0.001 + adv[[\"v\"]] # add 1mm/s to all velocity components\n\n\n\n"} {"package":"oce","topic":"adv","snippet":"### Name: adv\n### Title: Sample adv Data\n### Aliases: adv\n\n### ** Examples\n\n## No test: \nlibrary(oce)\ndata(adv)\n\n# Velocity time-series\nplot(adv)\n\n# Spectrum of upward component of velocity, with ``turbulent'' reference line\ns <- spectrum(adv[[\"v\"]][, 3], plot = FALSE)\nplot(log10(s$freq), log10(s$spec), type = \"l\")\nfor (a in seq(-20, 20, by = 1)) {\n abline(a = a, b = -5 / 3, col = \"gray\", lty = \"dotted\")\n}\n## End(No test)\n\n\n\n"} {"package":"oce","topic":"airRho","snippet":"### Name: airRho\n### Title: Air Density\n### Aliases: airRho\n\n### ** Examples\n\ndegC <- seq(0, 30, length.out = 100)\np <- seq(98, 102, length.out = 100) * 1e3\ncontour(x = degC, y = p, z = outer(degC, p, airRho), labcex = 1)\n\n\n"} {"package":"oce","topic":"amsr","snippet":"### Name: amsr\n### Title: Sample amsr Data (Near Nova Scotia)\n### Aliases: amsr\n\n### ** Examples\n\nlibrary(oce)\ndata(coastlineWorld)\ndata(amsr)\nplot(amsr, \"SST\")\nlines(coastlineWorld[[\"longitude\"]], coastlineWorld[[\"latitude\"]])\n\n\n\n"} {"package":"oce","topic":"angle2hms","snippet":"### Name: angle2hms\n### Title: Convert Astronomical Angle in Degrees to Hours, Minutes and\n### Seconds\n### Aliases: angle2hms\n\n### ** Examples\n\n# A randomly-chosen example on page 99 of Meeus (1991).\nangle2hms(177.74208) # string component 11h50m58s.10\n\n\n\n"} {"package":"oce","topic":"angleRemap","snippet":"### Name: angleRemap\n### Title: Convert Angle From 0:360 to -180:180 Convention\n### Aliases: angleRemap\n\n### ** Examples\n\n\nlibrary(oce)\n# fake some heading data that lie near due-north (0 degrees)\nn <- 20\nheading <- 360 + rnorm(n, sd = 10)\nheading <- ifelse(heading > 360, heading - 360, heading)\nx <- 1:n\nplot(x, heading, ylim = c(-10, 360), type = \"l\", col = \"lightgray\", lwd = 10)\nlines(x, angleRemap(heading))\n\n\n"} {"package":"oce","topic":"applyMagneticDeclination,adp-method","snippet":"### Name: applyMagneticDeclination,adp-method\n### Title: Alter an adp Object to Account for Magnetic Declination\n### Aliases: applyMagneticDeclination,adp-method\n\n### ** Examples\n\n# Transform beam coordinate to xyx, then to enu with respect to\n# magnetic north, and then to geographic north.\nlibrary(oce)\nfile <- system.file(\"extdata\", \"adp_rdi.000\", package = \"oce\")\nlon <- -69.73433\nlat <- 47.88126\nbeam <- read.oce(file, from = 1, to = 4, longitude = lon, latitude = lat)\ndec <- magneticField(lon, lat, beam[[\"time\"]][1])$declination\nxyz <- beamToXyzAdp(beam)\n# Here, we tell xyzToEnuAdp() not to set a declination,\n# so enuMag has metadata$north equal to \"magnetic\". We could\n# also skip the use of applyMagneticDeclination() by supplying\n# the known declination to xyzToEnuAdp().\nenuMag <- xyzToEnuAdp(xyz, declination = NULL)\nenuGeo <- applyMagneticDeclination(enuMag, declination = dec)\n\n\n\n"} {"package":"oce","topic":"approx3d","snippet":"### Name: approx3d\n### Title: Trilinear Interpolation in a 3D Array\n### Aliases: approx3d\n\n### ** Examples\n\n# set up a grid\nlibrary(oce)\nn <- 5\nx <- seq(0, 1, length.out = n)\ny <- seq(0, 1, length.out = n)\nz <- seq(0, 1, length.out = n)\nf <- array(1:n^3, dim = c(length(x), length(y), length(z)))\n# interpolate along a diagonal line\nm <- 100\nxout <- seq(0, 1, length.out = m)\nyout <- seq(0, 1, length.out = m)\nzout <- seq(0, 1, length.out = m)\napprox <- approx3d(x, y, z, f, xout, yout, zout)\n# graph the results\nplot(xout, approx, type = \"l\")\npoints(xout[1], f[1, 1, 1])\npoints(xout[m], f[n, n, n])\n\n\n"} {"package":"oce","topic":"argo","snippet":"### Name: argo\n### Title: Sample argo Data\n### Aliases: argo\n\n### ** Examples\n\nlibrary(oce)\ndata(argo)\nsummary(argo)\ndata(coastlineWorld)\nplot(argo, which = \"trajectory\")\n\n\n\n"} {"package":"oce","topic":"argoGrid","snippet":"### Name: argoGrid\n### Title: Grid Argo Float Data\n### Aliases: argoGrid\n\n### ** Examples\n\nlibrary(oce)\ndata(argo)\ng <- argoGrid(argo, p = seq(0, 100, 1))\npar(mfrow = c(2, 1))\nt <- g[[\"time\"]]\nz <- -g[[\"pressure\"]][, 1]\n# Set zlim because of spurious temperatures.\nimagep(t, z, t(g[[\"temperature\"]]), ylim = c(-100, 0), zlim = c(0, 20))\nimagep(t, z, t(g[[\"salinity\"]]), ylim = c(-100, 0))\n\n\n\n"} {"package":"oce","topic":"argoJuldToTime","snippet":"### Name: argoJuldToTime\n### Title: Convert Argo Julian Day to R Time\n### Aliases: argoJuldToTime\n\n### ** Examples\n\nargoJuldToTime(25749)\n\n\n\n"} {"package":"oce","topic":"as.adp","snippet":"### Name: as.adp\n### Title: Create an adp Object\n### Aliases: as.adp\n\n### ** Examples\n\ndata(adp)\nt <- adp[[\"time\"]]\nd <- adp[[\"distance\"]]\nv <- adp[[\"v\"]]\na <- as.adp(time = t, distance = d, v = v)\n## No test: \nplot(a)\n## End(No test)\n\n\n\n"} {"package":"oce","topic":"as.cm","snippet":"### Name: as.cm\n### Title: Coerce Data Into a cm Object\n### Aliases: as.cm\n\n### ** Examples\n\nlibrary(oce)\n# Example 1: creation from scratch\nt <- Sys.time() + 0:50\nu <- sin(2 * pi * 0:50 / 5) + rnorm(51)\nv <- cos(2 * pi * 0:50 / 5) + rnorm(51)\np <- 100 + rnorm(51)\nsummary(as.cm(t, u, v, p))\n\n# Example 2: creation from an adv object\ndata(adv)\nsummary(as.cm(adv))\n\n\n\n"} {"package":"oce","topic":"as.ctd","snippet":"### Name: as.ctd\n### Title: Coerce Data Into a ctd Object\n### Aliases: as.ctd\n\n### ** Examples\n\nlibrary(oce)\n# 1. fake data, with default units\npressure <- 1:50\ntemperature <- 10 - tanh((pressure - 20) / 5) + 0.02 * rnorm(50)\nsalinity <- 34 + 0.5 * tanh((pressure - 20) / 5) + 0.01 * rnorm(50)\nctd <- as.ctd(salinity, temperature, pressure)\n# Add a new column\nfluo <- 5 * exp(-pressure / 20)\nctd <- oceSetData(ctd,\n name = \"fluorescence\", value = fluo,\n unit = list(unit = expression(mg / m^3), scale = \"\")\n)\nsummary(ctd)\n\n# 2. fake data, with supplied units (which are the defaults, actually)\nctd <- as.ctd(salinity, temperature, pressure,\n units = list(\n salinity = list(unit = expression(), scale = \"PSS-78\"),\n temperature = list(unit = expression(degree * C), scale = \"ITS-90\"),\n pressure = list(unit = expression(dbar), scale = \"\")\n )\n)\n\n\n\n"} {"package":"oce","topic":"as.gps","snippet":"### Name: as.gps\n### Title: Coerce Data Into a gps Object\n### Aliases: as.gps\n\n### ** Examples\n\n# Location of the Tower Tank at Dalhousie University\ntowerTank <- as.gps(-63.59428, 44.63572)\n\n\n\n"} {"package":"oce","topic":"as.sealevel","snippet":"### Name: as.sealevel\n### Title: Coerce Data Into a sealevel Object\n### Aliases: as.sealevel\n\n### ** Examples\n\nlibrary(oce)\n\n# Construct a year of M2 tide, starting at the default time\n# 0000-01-01T00:00:00.\nh <- seq(0, 24 * 365)\nelevation <- 2.0 * sin(2 * pi * h / 12.4172)\nsl <- as.sealevel(elevation)\nsummary(sl)\n\n# As above, but start at the Y2K time.\ntime <- as.POSIXct(\"2000-01-01\") + h * 3600\nsl <- as.sealevel(elevation, time)\nsummary(sl)\n\n\n"} {"package":"oce","topic":"as.section","snippet":"### Name: as.section\n### Title: Create a Section\n### Aliases: as.section\n\n### ** Examples\n\nlibrary(oce)\ndata(ctd)\n# vector of names of CTD objects\nfake <- ctd\nfake[[\"temperature\"]] <- ctd[[\"temperature\"]] + 0.5\nfake[[\"salinity\"]] <- ctd[[\"salinity\"]] + 0.1\nfake[[\"longitude\"]] <- ctd[[\"longitude\"]] + 0.01\nfake[[\"station\"]] <- \"fake\"\nsec1 <- as.section(c(\"ctd\", \"fake\"))\nsummary(sec1)\n# vector of CTD objects\nctds <- vector(\"list\", 2)\nctds[[1]] <- ctd\nctds[[2]] <- fake\nsec2 <- as.section(ctds)\nsummary(sec2)\n# argo data (a subset)\ndata(argo)\nsec3 <- as.section(subset(argo, profile < 5))\nsummary(sec3)\n\n\n\n"} {"package":"oce","topic":"as.tidem","snippet":"### Name: as.tidem\n### Title: Create tidem Object From Fitted Harmonic Data\n### Aliases: as.tidem\n\n### ** Examples\n\n\n# Example 1: show agreement with tidem()\ndata(sealevelTuktoyaktuk)\n# 'm0' is model fitted by tidem()\nm0 <- tidem(sealevelTuktoyaktuk)\np0 <- predict(m0, sealevelTuktoyaktuk[[\"time\"]])\nm1 <- as.tidem(\n mean(sealevelTuktoyaktuk[[\"time\"]]), sealevelTuktoyaktuk[[\"latitude\"]],\n m0[[\"name\"]], m0[[\"amplitude\"]], m0[[\"phase\"]]\n)\n# Test agreement with tidem() result, by comparing predicted sealevels.\np1 <- predict(m1, sealevelTuktoyaktuk[[\"time\"]])\nstopifnot(max(abs(p1 - p0), na.rm = TRUE) < 1e-10)\n\n# Example 2: See the effect of dropping weak constituents\nm0[[\"name\"]][which(m0[[\"amplitude\"]] > 0.05)]\nh <- \"\nname amplitude phase\n Z0 1.98061875 0.000000\n MM 0.21213065 263.344739\n MSF 0.15605629 133.795004\n O1 0.07641438 74.233130\n K1 0.13473817 81.093134\n OO1 0.05309911 235.749693\n N2 0.08377108 44.521462\n M2 0.49041340 77.703594\n S2 0.22023705 137.475767\"\ncoef <- read.table(text = h, header = TRUE)\nm2 <- as.tidem(\n mean(sealevelTuktoyaktuk[[\"time\"]]),\n sealevelTuktoyaktuk[[\"latitude\"]],\n coef$name, coef$amplitude, coef$phase\n)\np2 <- predict(m2, sealevelTuktoyaktuk[[\"time\"]])\npar(mfrow = c(3, 1))\noce.plot.ts(sealevelTuktoyaktuk[[\"time\"]], p0)\nylim <- par(\"usr\")[3:4] # to match scales in other panels\noce.plot.ts(sealevelTuktoyaktuk[[\"time\"]], p1, ylim = ylim)\noce.plot.ts(sealevelTuktoyaktuk[[\"time\"]], p2, ylim = ylim)\n\n\n\n"} {"package":"oce","topic":"as.unit","snippet":"### Name: as.unit\n### Title: Convert a String to a Unit\n### Aliases: as.unit\n\n### ** Examples\n\nas.unit(\"DBAR\")\nas.unit(\"IPTS-68\")\nas.unit(\"ITS-90\")\nas.unit(\"PSS-78\")\nas.unit(\"UMOL/KG\")\n\n\n\n"} {"package":"oce","topic":"as.windrose","snippet":"### Name: as.windrose\n### Title: Create a windrose Object\n### Aliases: as.windrose\n\n### ** Examples\n\nlibrary(oce)\nset.seed(1234)\ntheta <- seq(0, 360, 0.25)\nx <- 1 + cos(pi / 180 * theta) + rnorm(theta)\ny <- sin(pi / 180 * theta) + rnorm(theta)\nwr <- as.windrose(x, y)\nsummary(wr)\n\n\n\n"} {"package":"oce","topic":"bcdToInteger","snippet":"### Name: bcdToInteger\n### Title: Convert a BCD Value to an Integer Value\n### Aliases: bcdToInteger\n\n### ** Examples\n\nlibrary(oce)\ntwenty.five <- bcdToInteger(as.raw(0x25))\nthirty.seven <- as.integer(as.raw(0x25))\n\n\n"} {"package":"oce","topic":"beamUnspreadAdp","snippet":"### Name: beamUnspreadAdp\n### Title: Adjust adp Object to Account for Spherical Spreading\n### Aliases: beamUnspreadAdp\n\n### ** Examples\n\nlibrary(oce)\ndata(adp)\nplot(adp, which = 5) # beam 1 echo intensity\nadp.att <- beamUnspreadAdp(adp)\nplot(adp.att, which = 5) # beam 1 echo intensity\n# Profiles\npar(mar = c(4, 4, 1, 1))\na <- adp[[\"a\", \"numeric\"]] # second arg yields matrix return value\ndistance <- adp[[\"distance\"]]\nplot(apply(a, 2, mean), distance, type = \"l\", xlim = c(0, 256))\nlines(apply(a, 2, median), distance, type = \"l\", col = \"red\")\nlegend(\"topright\", lwd = 1, col = c(\"black\", \"red\"), legend = c(\"original\", \"attenuated\"))\n# Image\nplot(adp.att, which = \"amplitude\", col = oce.colorsViridis(100))\n\n\n\n"} {"package":"oce","topic":"binApply1D","snippet":"### Name: binApply1D\n### Title: Apply a Function to Vector Data\n### Aliases: binApply1D\n\n### ** Examples\n\nlibrary(oce)\n# salinity profile (black) with 1-dbar bin means (red)\ndata(ctd)\nplotProfile(ctd, \"salinity\")\np <- ctd[[\"pressure\"]]\nS <- ctd[[\"salinity\"]]\npbreaks <- seq(0, max(p), 1)\nbinned <- binApply1D(p, S, pbreaks, mean)\nlines(binned$result, binned$xmids, lwd = 2, col = rgb(1, 0, 0, 0.9))\n\n\n\n"} {"package":"oce","topic":"binAverage","snippet":"### Name: binAverage\n### Title: Bin-average a Vector y, Based on x Values\n### Aliases: binAverage\n\n### ** Examples\n\nlibrary(oce)\n# A. fake linear data\nx <- seq(0, 100, 1)\ny <- 1 + 2 * x\nplot(x, y, pch = 1)\nba <- binAverage(x, y)\npoints(ba$x, ba$y, pch = 3, col = \"red\", cex = 3)\n\n# B. fake quadratic data\ny <- 1 + x^2\nplot(x, y, pch = 1)\nba <- binAverage(x, y)\npoints(ba$x, ba$y, pch = 3, col = \"red\", cex = 3)\n\n# C. natural data\ndata(co2)\nplot(co2)\navg <- binAverage(time(co2), co2, 1950, 2000, 2)\npoints(avg$x, avg$y, col = \"red\")\n\n\n\n"} {"package":"oce","topic":"binMean1D","snippet":"### Name: binMean1D\n### Title: Bin-average f=f(x)\n### Aliases: binMean1D\n\n### ** Examples\n\n# Plot raw temperature profile as circles, with lines indicating\n# the result of averaging in 1-metre depth intervals.\nlibrary(oce)\ndata(ctd)\nz <- ctd[[\"z\"]]\nT <- ctd[[\"temperature\"]]\nplot(T, z, cex = 0.3)\nTT <- binMean1D(z, T, seq(-100, 0, 1))\nlines(TT$result, TT$xmids, col = rgb(1, 0, 0, 0.9), lwd = 2)\n\n\n\n"} {"package":"oce","topic":"binMean2D","snippet":"### Name: binMean2D\n### Title: Bin-average f=f(x,y)\n### Aliases: binMean2D\n\n### ** Examples\n\nlibrary(oce)\nx <- runif(500, 0, 0.5)\ny <- runif(500, 0, 0.5)\nf <- x^2 + y^2\nxb <- seq(0, 0.5, 0.1)\nyb <- seq(0, 0.5, 0.1)\nm <- binMean2D(x, y, f, xb, yb)\ncm <- colormap(f, col = oceColorsTurbo)\nopar <- par(no.readonly = TRUE)\ndrawPalette(colormap = cm)\nplot(x, y, col = cm$zcol, pch = 20, cex = 1.4)\ncontour(m$xmids, m$ymids, m$result, add = TRUE, labcex = 1.4)\npar(opar)\n\n\n\n"} {"package":"oce","topic":"byteToBinary","snippet":"### Name: byteToBinary\n### Title: Format Bytes as Binary (Defunct)\n### Aliases: byteToBinary\n\n### ** Examples\n\nlibrary(oce)\n# Note comparison with rawToBits():\na <- as.raw(0x0a)\nbyteToBinary(a, \"big\") # \"00001010\"\nas.integer(rev(rawToBits(a))) # 0 0 0 0 1 0 1 0\n\n\n"} {"package":"oce","topic":"cm","snippet":"### Name: cm\n### Title: Sample cm Data\n### Aliases: cm\n\n### ** Examples\n\nlibrary(oce)\ndata(cm)\nsummary(cm)\nplot(cm)\n\n\n\n"} {"package":"oce","topic":"coastlineCut","snippet":"### Name: coastlineCut\n### Title: Cut a Coastline Object at Specified Longitude\n### Aliases: coastlineCut\n\n### ** Examples\n\n## No test: \nlibrary(oce)\ndata(coastlineWorld)\nmapPlot(coastlineCut(coastlineWorld, lon_0 = 100),\n projection = \"+proj=moll +lon_0=100\", col = \"gray\"\n)\n## End(No test)\n\n\n\n"} {"package":"oce","topic":"colormap","snippet":"### Name: colormap\n### Title: Calculate a Color Map\n### Aliases: colormap\n\n### ** Examples\n\nlibrary(oce)\n# Example 1. color scheme for points on xy plot\nx <- seq(0, 1, length.out = 40)\ny <- sin(2 * pi * x)\npar(mar = c(3, 3, 1, 1))\nmar <- par(\"mar\") # prevent margin creep by drawPalette()\n# First, default breaks\nc <- colormap(y)\ndrawPalette(c$zlim, col = c$col, breaks = c$breaks)\nplot(x, y, bg = c$zcol, pch = 21, cex = 1)\ngrid()\npar(mar = mar)\n# Second, 100 breaks, yielding a smoother palette\nc <- colormap(y, breaks = 100)\ndrawPalette(c$zlim, col = c$col, breaks = c$breaks)\nplot(x, y, bg = c$zcol, pch = 21, cex = 1)\ngrid()\npar(mar = mar)\n\n\n\n"} {"package":"oce","topic":"computableWaterProperties","snippet":"### Name: computableWaterProperties\n### Title: Determine Available Derived Water Properties\n### Aliases: computableWaterProperties\n\n### ** Examples\n\nlibrary(oce)\n# Example 1\ndata(ctd)\ncomputableWaterProperties(ctd)\n# Example 2: nothing an be computed from just salinity\ncomputableWaterProperties(\"salinity\")\n# Example 3: quite a lot can be computed from this trio of values\ncomputableWaterProperties(c(\"salinity\", \"temperature\", \"pressure\"))\n# Example 4: now we can get TEOS-10 values as well\ncomputableWaterProperties(c(\n \"salinity\", \"temperature\", \"pressure\",\n \"longitude\", \"latitude\"\n))\n\n\n\n"} {"package":"oce","topic":"concatenate,adp-method","snippet":"### Name: concatenate,adp-method\n### Title: Concatenate adp Objects\n### Aliases: concatenate,adp-method\n\n### ** Examples\n\n## 1. Split, then recombine, a ctd object.\ndata(ctd)\nctd1 <- subset(ctd, scan <= median(ctd[[\"scan\"]]))\nctd2 <- subset(ctd, scan > median(ctd[[\"scan\"]]))\nCTD <- concatenate(ctd1, ctd2)\n\n## 2. Split, then recombine, an adp object.\ndata(adp)\nmidtime <- median(adp[[\"time\"]])\nadp1 <- subset(adp, time <= midtime)\nadp2 <- subset(adp, time > midtime)\nADP <- concatenate(adp1, adp2)\n\n## Not run: \n##D ## 3. Download two met files and combine them.\n##D met1 <- read.met(download.met(id=6358, year=2003, month=8))\n##D met2 <- read.met(download.met(id=6358, year=2003, month=9))\n##D MET <- concatenate(met1, met2)\n## End(Not run)\n\n\n\n"} {"package":"oce","topic":"concatenate,oce-method","snippet":"### Name: concatenate,oce-method\n### Title: Concatenate oce Objects (oce-Specific)\n### Aliases: concatenate,oce-method\n\n### ** Examples\n\n## 1. Split, then recombine, a ctd object.\ndata(ctd)\nctd1 <- subset(ctd, scan <= median(ctd[[\"scan\"]]))\nctd2 <- subset(ctd, scan > median(ctd[[\"scan\"]]))\nCTD <- concatenate(ctd1, ctd2)\n\n## 2. Split, then recombine, an adp object.\ndata(adp)\nmidtime <- median(adp[[\"time\"]])\nadp1 <- subset(adp, time <= midtime)\nadp2 <- subset(adp, time > midtime)\nADP <- concatenate(adp1, adp2)\n\n## Not run: \n##D ## 3. Download two met files and combine them.\n##D met1 <- read.met(download.met(id=6358, year=2003, month=8))\n##D met2 <- read.met(download.met(id=6358, year=2003, month=9))\n##D MET <- concatenate(met1, met2)\n## End(Not run)\n\n\n\n"} {"package":"oce","topic":"coriolis","snippet":"### Name: coriolis\n### Title: Coriolis Parameter on the Earth\n### Aliases: coriolis\n\n### ** Examples\n\nC <- coriolis(45) # 1e-4\n\n\n"} {"package":"oce","topic":"ctd-class","snippet":"### Name: ctd-class\n### Title: Class to Store CTD (or general hydrographic) Data\n### Aliases: ctd-class\n\n### ** Examples\n\n\n# 1. Create a ctd object with fake data.\na <- as.ctd(salinity = 35 + 1:3 / 10, temperature = 10 - 1:3 / 10, pressure = 1:3)\nsummary(a)\n\n# 2. Fix a typo in a station latitude (fake! it's actually okay)\ndata(ctd)\nctd <- oceSetMetadata(\n ctd, \"latitude\", ctd[[\"latitude\"]] - 0.001,\n \"fix latitude typo in log book\"\n)\n\n\n\n"} {"package":"oce","topic":"ctd.cnv.gz","snippet":"### Name: ctd.cnv.gz\n### Title: Sample ctd File in .cnv Format\n### Aliases: ctd.cnv.gz\n\n### ** Examples\n\nread.oce(system.file(\"extdata\", \"ctd.cnv.gz\", package=\"oce\"))\n\n\n\n"} {"package":"oce","topic":"ctdDecimate","snippet":"### Name: ctdDecimate\n### Title: Decimate a ctd Profile\n### Aliases: ctdDecimate\n\n### ** Examples\n\nlibrary(oce)\ndata(ctd)\nplotProfile(ctd, \"salinity\", ylim = c(10, 0))\np <- seq(0, 45, 1)\nctd2 <- ctdDecimate(ctd, p = p)\nlines(ctd2[[\"salinity\"]], ctd2[[\"pressure\"]], col = \"blue\")\np <- seq(0, 45, 1)\nctd3 <- ctdDecimate(ctd, p = p, method = function(x, y, xout) {\n predict(smooth.spline(x, y, df = 30), xout)$y\n})\nlines(ctd3[[\"salinity\"]], ctd3[[\"pressure\"]], col = \"red\")\n\n\n\n"} {"package":"oce","topic":"ctdRepair","snippet":"### Name: ctdRepair\n### Title: Repair a Malformed ctd Object\n### Aliases: ctdRepair\n\n### ** Examples\n\nlibrary(oce)\ndata(ctd)\n# Insert location information into 'data' slot, although it belongs in 'metadata'.\nctd@data$latitude <- ctd@metadata$latitude # Done by experts only!\nctd@data$longitude <- ctd@metadata$longitude # Done by experts only!\nrepaired <- ctdRepair(ctd)\n\n\n\n"} {"package":"oce","topic":"ctd_aml.csv.gz","snippet":"### Name: ctd_aml.csv.gz\n### Title: Sample ctd File in aml Format\n### Aliases: ctd_aml.csv.gz\n\n### ** Examples\n\nctd <- read.ctd.aml(system.file(\"extdata\", \"ctd_aml.csv.gz\", package=\"oce\"))\nsummary(ctd)\nplot(ctd)\n\n\n\n"} {"package":"oce","topic":"ctimeToSeconds","snippet":"### Name: ctimeToSeconds\n### Title: Interpret a Character String as a Time Interval\n### Aliases: ctimeToSeconds\n\n### ** Examples\n\nlibrary(oce)\ncat(\"10 = \", ctimeToSeconds(\"10\"), \"s\\n\", sep = \"\")\ncat(\"01:04 = \", ctimeToSeconds(\"01:04\"), \"s\\n\", sep = \"\")\ncat(\"1:00:00 = \", ctimeToSeconds(\"1:00:00\"), \"s\\n\", sep = \"\")\n\n\n"} {"package":"oce","topic":"curl","snippet":"### Name: curl\n### Title: Curl of 2D Vector Field\n### Aliases: curl\n\n### ** Examples\n\nlibrary(oce)\n# 1. Shear flow with uniform curl.\nx <- 1:4\ny <- 1:10\nu <- outer(x, y, function(x, y) y / 2)\nv <- outer(x, y, function(x, y) -x / 2)\nC <- curl(u, v, x, y, FALSE)\n\n# 2. Rankine vortex: constant curl inside circle, zero outside\nrankine <- function(x, y) {\n r <- sqrt(x^2 + y^2)\n theta <- atan2(y, x)\n speed <- ifelse(r < 1, 0.5 * r, 0.5 / r)\n list(u = -speed * sin(theta), v = speed * cos(theta))\n}\nx <- seq(-2, 2, length.out = 100)\ny <- seq(-2, 2, length.out = 50)\nu <- outer(x, y, function(x, y) rankine(x, y)$u)\nv <- outer(x, y, function(x, y) rankine(x, y)$v)\nC <- curl(u, v, x, y, FALSE)\n# plot results\npar(mfrow = c(2, 2))\nimagep(x, y, u, zlab = \"u\", asp = 1)\nimagep(x, y, v, zlab = \"v\", asp = 1)\nimagep(x, y, C$curl, zlab = \"curl\", asp = 1)\nhist(C$curl, breaks = 100)\n\n\n"} {"package":"oce","topic":"d200321-001.ctd.gz","snippet":"### Name: d200321-001.ctd.gz\n### Title: Sample ctd File in .ctd Format\n### Aliases: d200321-001.ctd.gz\n\n### ** Examples\n\nread.oce(system.file(\"extdata\", \"d200321-001.ctd.gz\", package=\"oce\"))\n\n\n\n"} {"package":"oce","topic":"d201211_0011.cnv.gz","snippet":"### Name: d201211_0011.cnv.gz\n### Title: Sample ctd File in .cnv Format\n### Aliases: d201211_0011.cnv.gz\n\n### ** Examples\n\nread.oce(system.file(\"extdata\", \"d201211_0011.cnv.gz\", package=\"oce\"))\n\n\n\n"} {"package":"oce","topic":"decimate","snippet":"### Name: decimate\n### Title: Smooth and Decimate, or Subsample, an oce Object\n### Aliases: decimate\n\n### ** Examples\n\nlibrary(oce)\ndata(adp)\nplot(adp)\nadpDec <- decimate(adp, by = 2, filter = c(1 / 4, 1 / 2, 1 / 4))\nplot(adpDec)\n\n\n"} {"package":"oce","topic":"decodeTime","snippet":"### Name: decodeTime\n### Title: Oce Version of as.POSIXct\n### Aliases: decodeTime\n\n### ** Examples\n\ndecodeTime(\"July 1 2013 01:02:03\")\ndecodeTime(\"Jul 1 2013 01:02:03\")\ndecodeTime(\"1 July 2013 01:02:03\")\ndecodeTime(\"1 Jul 2013 01:02:03\")\ndecodeTime(\"2013-07-01 01:02:03\")\ndecodeTime(\"2013/07/01 01:02:03\")\ndecodeTime(\"2013/07/01\")\n\n\n\n"} {"package":"oce","topic":"despike","snippet":"### Name: despike\n### Title: Remove Spikes From a Time Series\n### Aliases: despike\n\n### ** Examples\n\nn <- 50\nx <- 1:n\ny <- rnorm(n = n)\ny[n / 2] <- 10 # 10 standard deviations\nplot(x, y, type = \"l\")\nlines(x, despike(y), col = \"red\")\nlines(x, despike(y, reference = \"smooth\"), col = \"darkgreen\")\nlines(x, despike(y, reference = \"trim\", min = -3, max = 3), col = \"blue\")\nlegend(\"topright\",\n lwd = 1, col = c(\"black\", \"red\", \"darkgreen\", \"blue\"),\n legend = c(\"raw\", \"median\", \"smooth\", \"trim\")\n)\n\n# add a spike to a CTD object\ndata(ctd)\nplot(ctd)\nT <- ctd[[\"temperature\"]]\nT[10] <- T[10] + 10\nctd[[\"temperature\"]] <- T\nCTD <- despike(ctd)\nplot(CTD)\n\n\n"} {"package":"oce","topic":"detrend","snippet":"### Name: detrend\n### Title: Detrend a Set of Observations\n### Aliases: detrend\n\n### ** Examples\n\nx <- seq(0, 0.9 * pi, length.out = 50)\ny <- sin(x)\ny[1] <- NA\ny[10] <- NA\nplot(x, y, ylim = c(0, 1))\nd <- detrend(x, y)\npoints(x, d$Y, pch = 20)\nabline(d$a, d$b, col = \"blue\")\nabline(h = 0)\npoints(x, d$Y + d$a + d$b * x, col = \"blue\", pch = \"+\")\n\n\n"} {"package":"oce","topic":"drawDirectionField","snippet":"### Name: drawDirectionField\n### Title: Draw a Direction Field\n### Aliases: drawDirectionField\n\n### ** Examples\n\nlibrary(oce)\nplot(c(-1.5, 1.5), c(-1.5, 1.5), xlab = \"\", ylab = \"\", type = \"n\")\ndrawDirectionField(x = rep(0, 2), y = rep(0, 2),\n u = c(1, 1), v = c(1, -1), scalex = 0.5, add = TRUE)\nplot(c(-1.5, 1.5), c(-1.5, 1.5), xlab = \"\", ylab = \"\", type = \"n\")\ndrawDirectionField(x = rep(0, 2), y = rep(0, 2),\n u = c(1, 1), v = c(1, -1), scalex = 0.5, add = TRUE, type = 2)\n\n# 2D example\nx <- seq(-2, 2, 0.1)\ny <- x\nxx <- expand.grid(x, y)[, 1]\nyy <- expand.grid(x, y)[, 2]\nz <- matrix(xx * exp(-xx^2 - yy^2), nrow = length(x))\ngz <- grad(z, x, y)\ndrawDirectionField(x, y, gz$gx, gz$gy, scalex = 0.5, type = 2, len = 0.02)\noceContour(x, y, z, add = TRUE)\n\n\n\n"} {"package":"oce","topic":"drawPalette","snippet":"### Name: drawPalette\n### Title: Draw a Palette, Leaving Margins Suitable for an Accompanying\n### Plot\n### Aliases: drawPalette\n\n### ** Examples\n\n\nlibrary(oce)\npar(mgp = getOption(\"oceMgp\"))\n\n# 1. A three-panel plot\npar(mfrow = c(3, 1), mar = c(3, 3, 1, 1))\nomar <- par(\"mar\") # save initial margin\n\n# 1a. top panel: simple case with Viridis scheme\ndrawPalette(zlim = c(0, 1), col = oce.colorsViridis(10))\nplot(1:10, 1:10, col = oce.colorsViridis(10)[1:10], pch = 20, cex = 3, xlab = \"x\", ylab = \"y\")\npar(mar = omar) # reset margin\n\n# 1b. middle panel: colormap\ncm <- colormap(name = \"gmt_globe\")\ndrawPalette(colormap = cm)\nicol <- seq_along(cm$col)\nplot(icol, cm$breaks[icol],\n pch = 20, cex = 2, col = cm$col,\n xlab = \"Palette index\", ylab = \"Palette breaks\"\n)\npar(mar = omar) # reset margin\n\n# 1c. bottom panel: space for palette (to line up graphs)\ndrawPalette(plot = FALSE)\nplot(1:10, 1:10, col = oce.colorsViridis(10)[1:10], pch = 20, cex = 3, xlab = \"x\", ylab = \"y\")\npar(mar = omar) # reset margin\n\n# 2. Use layout to mimic the action of imagep(), with the width\n# of the palette region being 14 percent of figure width.\nd <- 0.14\nlayout(matrix(1:2, nrow = 1), widths = c(1 - d, d))\nimage(volcano, col = oce.colorsViridis(100), zlim = c(90, 200))\ncontour(volcano, add = TRUE)\ndrawPalette(c(90, 200), fullpage = TRUE, col = oce.colorsViridis)\n\n\n"} {"package":"oce","topic":"enuToOtherAdp","snippet":"### Name: enuToOtherAdp\n### Title: Convert adp Object from ENU Coordinate to Rotated Coordinate\n### Aliases: enuToOtherAdp\n\n### ** Examples\n\n\nlibrary(oce)\ndata(adp)\no <- enuToOtherAdp(adp, heading = -31.5)\nplot(o, which = 1:3)\n\n\n\n"} {"package":"oce","topic":"errorbars","snippet":"### Name: errorbars\n### Title: Draw Error Bars on an Existing xy Diagram\n### Aliases: errorbars\n\n### ** Examples\n\nlibrary(oce)\ndata(ctd)\nS <- ctd[[\"salinity\"]]\nT <- ctd[[\"temperature\"]]\nplot(S, T)\nerrorbars(S, T, 0.05, 0.5)\n\n\n"} {"package":"oce","topic":"fillGap","snippet":"### Name: fillGap\n### Title: Fill a Gap in an oce Object\n### Aliases: fillGap\n\n### ** Examples\n\nlibrary(oce)\n# Integers\nx <- c(1:2, NA, NA, 5:6)\ny <- fillGap(x)\nprint(data.frame(x, y))\n# Floats\nx <- x + 0.1\ny <- fillGap(x)\nprint(data.frame(x, y))\n\n\n"} {"package":"oce","topic":"formatCI","snippet":"### Name: formatCI\n### Title: Format a Confidence Interval\n### Aliases: formatCI\n\n### ** Examples\n\nlibrary(oce)\n\n# Example 1: mean=1, uncertainty=0.05, in +/- notation.\nformatCI(c(0.95, 1.05)) # \"1+/-0.05\"\n\n# Example 2: save mean and uncertainty, but in parentheses notation.\nformatCI(c(0.95, 1.05), style = \"parentheses\") # \"1.00(5)\"\n\n# example 3: using t.test to find a CI.\na <- rnorm(100, mean = 10, sd = 1)\nCI <- t.test(a)$conf.int\nformatCI(CI)\nformatCI(CI, style = \"parentheses\")\n\n# example 4: specifying a model\nx <- seq(0, 10, 0.1)\ny <- 2 + 3 * x + rnorm(x, sd = 0.1)\nm <- lm(y ~ x)\nformatCI(model = m)\nformatCI(model = m, style = \"parentheses\")\n\n\n\n"} {"package":"oce","topic":"formatPosition","snippet":"### Name: formatPosition\n### Title: Format Geographical Position in Degrees and Minutes\n### Aliases: formatPosition\n\n### ** Examples\n\nlibrary(oce)\nformatPosition(10 + 1:10 / 60 + 2.8 / 3600)\nformatPosition(10 + 1:10 / 60 + 2.8 / 3600, type = \"string\")\n\n\n\n"} {"package":"oce","topic":"geodDist","snippet":"### Name: geodDist\n### Title: Compute Geodesic Distance on Surface of Earth\n### Aliases: geodDist\n\n### ** Examples\n\nlibrary(oce)\nkm <- geodDist(100, 45, 100, 46)\ndata(section)\ngeodDist(section)\ngeodDist(section, alongPath = TRUE)\n\n\n\n"} {"package":"oce","topic":"geodGc","snippet":"### Name: geodGc\n### Title: Great-circle Segments Between Points on Earth\n### Aliases: geodGc\n\n### ** Examples\n\n## No test: \nlibrary(oce)\ndata(coastlineWorld)\nmapPlot(coastlineWorld,\n type = \"l\",\n longitudelim = c(-80, 10), latitudelim = c(35, 80),\n projection = \"+proj=merc\"\n)\n# Great circle from New York to Paris (Lindberg's flight)\nl <- geodGc(c(-73.94, 2.35), c(40.67, 48.86), 1)\nmapLines(l$longitude, l$latitude, col = \"red\", lwd = 2)\n## End(No test)\n\n\n\n"} {"package":"oce","topic":"geodXy","snippet":"### Name: geodXy\n### Title: Convert From Geographical to Geodesic Coordinates\n### Aliases: geodXy\n\n### ** Examples\n\n## No test: \n# Develop a transect-based axis system for final data(section) stations\nlibrary(oce)\ndata(section)\nlon <- tail(section[[\"longitude\", \"byStation\"]], 26)\nlat <- tail(section[[\"latitude\", \"byStation\"]], 26)\nlonR <- tail(lon, 1)\nlatR <- tail(lat, 1)\ndata(coastlineWorld)\nmapPlot(coastlineWorld,\n projection = \"+proj=merc\",\n longitudelim = c(-75, -65), latitudelim = c(35, 43), col = \"gray\"\n)\nmapPoints(lon, lat)\nXY <- geodXy(lon, lat, mean(lon), mean(lat))\nangle <- 180 / pi * atan(coef(lm(y ~ x, data = XY))[2])\nmapCoordinateSystem(lonR, latR, 500, angle, col = 2)\n# Compare UTM calculation\nUTM <- lonlat2utm(lon, lat, zone = 18) # we need to set the zone for this task!\nangleUTM <- 180 / pi * atan(coef(lm(northing ~ easting, data = UTM))[2])\nmapCoordinateSystem(lonR, latR, 500, angleUTM, col = 3)\nlegend(\"topright\",\n lwd = 1, col = 2:3, bg = \"white\", title = \"Axis Rotation Angle\",\n legend = c(\n sprintf(\"geod: %.1f deg\", angle),\n sprintf(\"utm: %.1f deg\", angleUTM)\n )\n)\n## End(No test)\n\n\n"} {"package":"oce","topic":"grad","snippet":"### Name: grad\n### Title: Calculate Matrix Gradient\n### Aliases: grad\n\n### ** Examples\n\n# 1. Built-in volcano dataset\ng <- grad(volcano)\npar(mfrow = c(2, 2), mar = c(3, 3, 1, 1), mgp = c(2, 0.7, 0))\nimagep(volcano, zlab = \"h\")\nimagep(g$g, zlab = \"|grad(h)|\")\nzlim <- c(-1, 1) * max(g$g)\nimagep(g$gx, zlab = \"dh/dx\", zlim = zlim)\nimagep(g$gy, zlab = \"dh/dy\", zlim = zlim)\n\n# 2. Geostrophic flow around an eddy\nlibrary(oce)\ndx <- 5e3\ndy <- 10e3\nx <- seq(-200e3, 200e3, dx)\ny <- seq(-200e3, 200e3, dy)\nR <- 100e3\nh <- outer(x, y, function(x, y) 500 * exp(-(x^2 + y^2) / R^2))\ngrad <- grad(h, x, y)\npar(mfrow = c(2, 2), mar = c(3, 3, 1, 1), mgp = c(2, 0.7, 0))\ncontour(x, y, h, asp = 1, main = expression(h))\nf <- 1e-4\ngprime <- 9.8 * 1 / 1024\nu <- -(gprime / f) * grad$gy\nv <- (gprime / f) * grad$gx\ncontour(x, y, u, asp = 1, main = expression(u))\ncontour(x, y, v, asp = 1, main = expression(v))\ncontour(x, y, sqrt(u^2 + v^2), asp = 1, main = expression(speed))\n\n\n\n"} {"package":"oce","topic":"gravity","snippet":"### Name: gravity\n### Title: Acceleration Due to Earth Gravity\n### Aliases: gravity\n\n### ** Examples\n\ng <- gravity(45) # 9.8\n\n\n"} {"package":"oce","topic":"handleFlags,adp-method","snippet":"### Name: handleFlags,adp-method\n### Title: Handle Flags in adp Objects\n### Aliases: handleFlags,adp-method\n\n### ** Examples\n\n# Flag low \"goodness\" or high \"error beam\" values.\nlibrary(oce)\ndata(adp)\n# Same as Example 2 of ?'setFlags,adp-method'\nv <- adp[[\"v\"]]\ni2 <- array(FALSE, dim = dim(v))\ng <- adp[[\"g\", \"numeric\"]]\n# Set thresholds on percent \"goodness\" and error \"velocity\".\nG <- 25\nV4 <- 0.45\nfor (k in 1:3) {\n i2[, , k] <- ((g[, , k] + g[, , 4]) < G) | (v[, , 4] > V4)\n}\nadpQC <- initializeFlags(adp, \"v\", 2)\nadpQC <- setFlags(adpQC, \"v\", i2, 3)\nadpClean <- handleFlags(adpQC, flags = list(3), actions = list(\"NA\"))\n# Demonstrate (subtle) change graphically.\npar(mfcol = c(2, 1))\nplot(adp, which = \"u1\", drawTimeRange = FALSE)\nplot(adpClean, which = \"u1\", drawTimeRange = FALSE)\nt0 <- 1214510000 # from locator()\narrows(t0, 20, t0, 35, length = 0.1, lwd = 3, col = \"magenta\")\nmtext(\"Slight change above arrow\", col = \"magenta\", font = 2)\n\n\n\n"} {"package":"oce","topic":"handleFlags,argo-method","snippet":"### Name: handleFlags,argo-method\n### Title: Handle Flags in argo Objects\n### Aliases: handleFlags,argo-method handleFlags.argo\n\n### ** Examples\n\nlibrary(oce)\ndata(argo)\nargoNew <- handleFlags(argo)\n# Demonstrate replacement, looking at the second profile\nf <- argo[[\"salinityFlag\"]][, 2]\ndf <- data.frame(flag = f, orig = argo[[\"salinity\"]][, 2], new = argoNew[[\"salinity\"]][, 2])\ndf[11:15, ] # notice line 13\n\n\n\n"} {"package":"oce","topic":"handleFlags,ctd-method","snippet":"### Name: handleFlags,ctd-method\n### Title: Handle Flags in ctd Objects\n### Aliases: handleFlags,ctd-method\n\n### ** Examples\n\nlibrary(oce)\ndata(section)\nstn <- section[[\"station\", 100]]\n# 1. Default: anything not flagged as 2 is set to NA, to focus\n# solely on 'good', in the World Hydrographic Program scheme.\nSTN1 <- handleFlags(stn, flags = list(c(1, 3:9)))\ndata.frame(old = stn[[\"salinity\"]], new = STN1[[\"salinity\"]], salinityFlag = stn[[\"salinityFlag\"]])\n\n# 2. Use bottle salinity, if it is good and ctd is bad\nreplace <- 2 == stn[[\"salinityBottleFlag\"]] & 2 != stn[[\"salinityFlag\"]]\nS <- ifelse(replace, stn[[\"salinityBottle\"]], stn[[\"salinity\"]])\nSTN2 <- oceSetData(stn, \"salinity\", S)\n\n# 3. Use smoothed TS relationship to nudge questionable data.\nf <- function(x) {\n S <- x[[\"salinity\"]]\n T <- x[[\"temperature\"]]\n df <- 0.5 * length(S) # smooths a bit\n sp <- smooth.spline(T, S, df = df)\n 0.5 * (S + predict(sp, T)$y)\n}\npar(mfrow = c(1, 2))\nSTN3 <- handleFlags(stn, flags = list(salinity = c(1, 3:9)), action = list(salinity = f))\nplotProfile(stn, \"salinity\", mar = c(3, 3, 3, 1))\np <- stn[[\"pressure\"]]\npar(mar = c(3, 3, 3, 1))\nplot(STN3[[\"salinity\"]] - stn[[\"salinity\"]], p, ylim = rev(range(p)))\n\n# 4. Single-variable flags (vector specification)\ndata(section)\n# Multiple-flag scheme: one per data item\nA <- section[[\"station\", 100]]\ndeep <- A[[\"pressure\"]] > 1500\nflag <- ifelse(deep, 7, 2)\nfor (flagName in names(A[[\"flags\"]])) {\n A[[paste(flagName, \"Flag\", sep = \"\")]] <- flag\n}\nAf <- handleFlags(A)\nstopifnot(all.equal(is.na(Af[[\"salinity\"]]), deep))\n\n# 5. Single-variable flags (list specification)\nB <- section[[\"station\", 100]]\nB[[\"flags\"]] <- list(flag)\nBf <- handleFlags(B)\nstopifnot(all.equal(is.na(Bf[[\"salinity\"]]), deep))\n\n\n\n"} {"package":"oce","topic":"handleFlags,section-method","snippet":"### Name: handleFlags,section-method\n### Title: Handle flags in section Objects\n### Aliases: handleFlags,section-method handleFlags.section\n\n### ** Examples\n\nlibrary(oce)\ndata(section)\nsection2 <- handleFlags(section, flags = c(1, 3:9))\npar(mfrow = c(2, 1))\nplotTS(section)\nplotTS(section2)\n\n\n\n"} {"package":"oce","topic":"imagep","snippet":"### Name: imagep\n### Title: Plot an Image with a Color Palette\n### Aliases: imagep\n\n### ** Examples\n\nlibrary(oce)\n\n# 1. simplest use\nimagep(volcano)\n\n# 2. something oceanographic (internal-wave speed)\nh <- seq(0, 50, length.out = 100)\ndrho <- seq(1, 3, length.out = 200)\nspeed <- outer(h, drho, function(drho, h) sqrt(9.8 * drho * h / 1024))\nimagep(h, drho, speed,\n xlab = \"Equivalent depth [m]\",\n ylab = expression(paste(Delta * rho, \" [kg/m^3]\")),\n zlab = \"Internal-wave speed [m/s]\"\n)\n\n# 3. fancy labelling on atan() function\nx <- seq(0, 1, 0.01)\ny <- seq(0, 1, 0.01)\nangle <- outer(x, y, function(x, y) atan2(y, x))\nimagep(x, y, angle,\n filledContour = TRUE, breaks = c(0, pi / 4, pi / 2),\n col = c(\"lightgray\", \"darkgray\"),\n at = c(0, pi / 4, pi / 2),\n labels = c(0, expression(pi / 4), expression(pi / 2))\n)\n\n# 5. y-axis flipping\npar(mfrow = c(2, 2))\ndata(adp)\nd <- adp[[\"distance\"]]\nt <- adp[[\"time\"]]\nu <- adp[[\"v\"]][, , 1]\nimagep(t, d, u, drawTimeRange = FALSE)\nmtext(\"normal\")\nimagep(t, d, u, flipy = TRUE, drawTimeRange = FALSE)\nmtext(\"flipy\")\nimagep(t, d, u, ylim = rev(range(d)), drawTimeRange = FALSE)\nmtext(\"ylim\")\nimagep(t, d, u, ylim = rev(range(d)), flipy = TRUE, drawTimeRange = FALSE)\nmtext(\"flipy and ylim\")\npar(mfrow = c(1, 1))\n\n# 6. a colormap case\ndata(topoWorld)\ncm <- colormap(name = \"gmt_globe\")\nimagep(topoWorld, colormap = cm)\n\n\n\n"} {"package":"oce","topic":"initialize,ctd-method","snippet":"### Name: initialize,ctd-method\n### Title: Initialize Storage for a ctd Object\n### Aliases: initialize,ctd-method initialize.ctd\n\n### ** Examples\n\n\n# 1. empty\nnew(\"ctd\")\n\n# 2. fake data with no location information, so can only\n# plot with the UNESCO equation of state.\n# NOTE: always name arguments, in case the default order gets changed\nctd <- new(\"ctd\", salinity = 35 + 1:3 / 10, temperature = 10 - 1:3 / 10, pressure = 1:3)\nsummary(ctd)\nplot(ctd, eos = \"unesco\")\n\n# 3. as 2, but insert location and plot with GSW equation of state.\nctd <- oceSetMetadata(ctd, \"latitude\", 44)\nctd <- oceSetMetadata(ctd, \"longitude\", -63)\nplot(ctd, eos = \"gsw\")\n\n\n\n"} {"package":"oce","topic":"integerToAscii","snippet":"### Name: integerToAscii\n### Title: Infer ASCII Code From an Integer Value\n### Aliases: integerToAscii\n\n### ** Examples\n\nlibrary(oce)\nA <- integerToAscii(65)\ncat(\"A=\", A, \"\\n\")\n\n\n"} {"package":"oce","topic":"integrateTrapezoid","snippet":"### Name: integrateTrapezoid\n### Title: Trapezoidal Integration\n### Aliases: integrateTrapezoid\n\n### ** Examples\n\nx <- seq(0, 1, length.out = 10) # try larger length.out to see if area approaches 2\ny <- 2 * x + 3 * x^2\nA <- integrateTrapezoid(x, y)\ndA <- integrateTrapezoid(x, y, \"dA\")\ncA <- integrateTrapezoid(x, y, \"cA\")\nprint(A)\nprint(sum(dA))\nprint(tail(cA, 1))\nprint(integrateTrapezoid(diff(x[1:2]), y))\nprint(integrateTrapezoid(y))\n\n\n"} {"package":"oce","topic":"interpBarnes","snippet":"### Name: interpBarnes\n### Title: Grid Data Using the Barnes Algorithm\n### Aliases: interpBarnes\n\n### ** Examples\n\nlibrary(oce)\n\n# 1. contouring example, with wind-speed data from Koch et al. (1983)\ndata(wind)\nu <- interpBarnes(wind$x, wind$y, wind$z)\ncontour(u$xg, u$yg, u$zg, labcex = 1)\ntext(wind$x, wind$y, wind$z, cex = 0.7, col = \"blue\")\ntitle(\"Numbers are the data\")\n\n# 2. As 1, but blank out spots where data are sparse\nu <- interpBarnes(wind$x, wind$y, wind$z, trim = 0.1)\ncontour(u$xg, u$yg, u$zg, level = seq(0, 30, 1))\npoints(wind$x, wind$y, cex = 1.5, pch = 20, col = \"blue\")\n\n# 3. As 1, but interpolate back to points, and display the percent mismatch\nu <- interpBarnes(wind$x, wind$y, wind$z)\ncontour(u$xg, u$yg, u$zg, labcex = 1)\nmismatch <- 100 * (wind$z - u$zd) / wind$z\ntext(wind$x, wind$y, round(mismatch), col = \"blue\")\ntitle(\"Numbers are percent mismatch between grid and data\")\n\n# 4. As 3, but contour the mismatch\nmismatchGrid <- interpBarnes(wind$x, wind$y, mismatch)\ncontour(mismatchGrid$xg, mismatchGrid$yg, mismatchGrid$zg, labcex = 1)\n\n# 5. One-dimensional example, smoothing a salinity profile\ndata(ctd)\np <- ctd[[\"pressure\"]]\ny <- rep(1, length(p)) # fake y data, with arbitrary value\nS <- ctd[[\"salinity\"]]\npg <- pretty(p, n = 100)\ng <- interpBarnes(p, y, S, xg = pg, xr = 1)\nplot(S, p, cex = 0.5, col = \"blue\", ylim = rev(range(p)))\nlines(g$zg, g$xg, col = \"red\")\n\n\n"} {"package":"oce","topic":"julianCenturyAnomaly","snippet":"### Name: julianCenturyAnomaly\n### Title: Convert Julian-Day-Number to Julian Century\n### Aliases: julianCenturyAnomaly\n\n### ** Examples\n\n\nt <- ISOdatetime(1978, 11, 13, 4, 35, 0, tz = \"UTC\")\njca <- julianCenturyAnomaly(julianDay(t))\ncat(format(t), \"is Julian Century anomaly\", format(jca, digits = 8), \"\\n\")\n\n\n\n"} {"package":"oce","topic":"julianDay","snippet":"### Name: julianDay\n### Title: Convert a Time to a Julian Day\n### Aliases: julianDay\n\n### ** Examples\n\nlibrary(oce)\n# example from Meeus\nt <- ISOdatetime(1977, 4, 26, hour = 0, min = 0, sec = 0, tz = \"UTC\") + 0.4 * 86400\nstopifnot(all.equal(julianDay(t), 2443259.9))\n\n\n\n"} {"package":"oce","topic":"labelWithUnit","snippet":"### Name: labelWithUnit\n### Title: Create Label With Unit\n### Aliases: labelWithUnit\n\n### ** Examples\n\nlibrary(oce)\n# 1. temperature has a predefined unit, but this can be overruled\nlabelWithUnit(\"temperature\")\nlabelWithUnit(\n \"temperature\",\n list(unit = expression(m / s), scale = \"erroneous\")\n)\n# 2. phosphate lacks a predefined unit\nlabelWithUnit(\"phosphate\")\ndata(section)\nlabelWithUnit(\n \"phosphate\",\n section[[\"station\", 1]][[\"phosphateUnit\"]]\n)\n\n\n\n"} {"package":"oce","topic":"lobo","snippet":"### Name: lobo\n### Title: Sample lobo Data\n### Aliases: lobo\n\n### ** Examples\n\nlibrary(oce)\ndata(lobo)\nsummary(lobo)\nplot(lobo)\n\n\n\n"} {"package":"oce","topic":"lon360","snippet":"### Name: lon360\n### Title: Change Longitude From -180:180 to 0:360 Convention\n### Aliases: lon360\n\n### ** Examples\n\nlon360(c(179, -179))\n\n\n"} {"package":"oce","topic":"lonlat2map","snippet":"### Name: lonlat2map\n### Title: Convert Longitude and Latitude to X and Y\n### Aliases: lonlat2map\n\n### ** Examples\n\nlibrary(oce)\n# Cape Split, in the Minas Basin of the Bay of Fundy\ncs <- list(longitude = -64.49657, latitude = 45.33462)\nxy <- lonlat2map(cs, projection = \"+proj=merc\")\nmap2lonlat(xy)\n\n\n\n"} {"package":"oce","topic":"lonlat2utm","snippet":"### Name: lonlat2utm\n### Title: Convert Longitude and Latitude to UTM\n### Aliases: lonlat2utm\n\n### ** Examples\n\nlibrary(oce)\n# Cape Split, in the Minas Basin of the Bay of Fundy\nlonlat2utm(-64.496567, 45.334626)\n\n\n\n"} {"package":"oce","topic":"lookWithin","snippet":"### Name: lookWithin\n### Title: Look Within the First Element of a List for Replacement Values\n### Aliases: lookWithin\n\n### ** Examples\n\n# 1. If first item is not a CTD object, just return the input\nlookWithin(list(a = 1, b = 2)) # returns a list\n# 2. Extract salinity from a CTD object\ndata(ctd)\nstr(lookWithin(list(salinity = ctd)))\n# 3. Extract salinity and temperature. Note that the\n# value specified for temperature is ignored; all that matters\n# is that temperature is named.\nstr(lookWithin(list(salinity = ctd, temperature = NULL)))\n# 4. How it is used by swRho()\nrho1 <- swRho(ctd, eos = \"unesco\")\nrho2 <- swRho(ctd[[\"salinity\"]], ctd[[\"temperature\"]], ctd[[\"pressure\"]], eos = \"unesco\")\nstopifnot(all.equal(rho1, rho2))\n\n\n"} {"package":"oce","topic":"lowpass","snippet":"### Name: lowpass\n### Title: Lowpass Digital Filtering\n### Aliases: lowpass\n\n### ** Examples\n\nlibrary(oce)\npar(mfrow = c(1, 2), mar = c(4, 4, 1, 1))\ncoef <- lowpass(n = 5, coefficients = TRUE)\nplot(-2:2, coef, ylim = c(0, 1), xlab = \"Lag\", ylab = \"Coefficient\")\nx <- seq(-5, 5) + rnorm(11)\nplot(1:11, x, type = \"o\", xlab = \"time\", ylab = \"x and X\")\nX <- lowpass(x, n = 5)\nlines(1:11, X, col = 2)\npoints(1:11, X, col = 2)\n\n\n"} {"package":"oce","topic":"magneticField","snippet":"### Name: magneticField\n### Title: Earth Magnetic Declination, Inclination, and Intensity\n### Aliases: magneticField\n\n### ** Examples\n\nlibrary(oce)\n# 1. Today's value at Halifax NS\nmagneticField(-(63 + 36 / 60), 44 + 39 / 60, Sys.Date())\n\n# 2. World map of declination in year 2000.\n## No test: \ndata(coastlineWorld)\npar(mar = rep(0.5, 4)) # no axes on whole-world projection\nmapPlot(coastlineWorld, projection = \"+proj=robin\", col = \"lightgray\")\n# Construct matrix holding declination\nlon <- seq(-180, 180)\nlat <- seq(-90, 90)\ndec2000 <- function(lon, lat) {\n magneticField(lon, lat, 2000)$declination\n}\ndec <- outer(lon, lat, dec2000) # hint: outer() is very handy!\n# Contour, unlabelled for small increments, labeled for\n# larger increments.\nmapContour(lon, lat, dec,\n col = \"blue\", levels = seq(-180, -5, 5),\n lty = 3, drawlabels = FALSE\n)\nmapContour(lon, lat, dec, col = \"blue\", levels = seq(-180, -20, 20))\nmapContour(lon, lat, dec,\n col = \"red\", levels = seq(5, 180, 5),\n lty = 3, drawlabels = FALSE\n)\nmapContour(lon, lat, dec, col = \"red\", levels = seq(20, 180, 20))\nmapContour(lon, lat, dec, levels = 180, col = \"black\", lwd = 2, drawlabels = FALSE)\nmapContour(lon, lat, dec, levels = 0, col = \"black\", lwd = 2)\n## End(No test)\n\n# 3. Declination differences between versions 12 and 13\n## No test: \nlon <- seq(-180, 180)\nlat <- seq(-90, 90)\ndecDiff <- function(lon, lat) {\n old <- magneticField(lon, lat, 2020, version = 13)$declination\n new <- magneticField(lon, lat, 2020, version = 12)$declination\n new - old\n}\ndecDiff <- outer(lon, lat, decDiff)\ndecDiff <- ifelse(decDiff > 180, decDiff - 360, decDiff)\n# Overall (mean) shift -0.1deg\nt.test(decDiff)\n# View histogram, narrowed to small differences\npar(mar = c(3.5, 3.5, 2, 2), mgp = c(2, 0.7, 0))\nhist(decDiff,\n breaks = seq(-180, 180, 0.05), xlim = c(-2, 2),\n xlab = \"Declination difference [deg] from version=12 to version=13\",\n main = \"Predictions for year 2020\"\n)\nprint(quantile(decDiff, c(0.025, 0.975)))\n# Note that the large differences are at high latitudes\nimagep(lon, lat, decDiff, zlim = c(-1, 1) * max(abs(decDiff)))\nlines(coastlineWorld[[\"longitude\"]], coastlineWorld[[\"latitude\"]])\n## End(No test)\n\n\n"} {"package":"oce","topic":"makeFilter","snippet":"### Name: makeFilter\n### Title: Make a Digital Filter\n### Aliases: makeFilter\n\n### ** Examples\n\nlibrary(oce)\n\n# 1. Demonstrate step-function response\ny <- c(rep(1, 10), rep(-1, 10))\nx <- seq_along(y)\nplot(x, y, type = \"o\", ylim = c(-1.05, 1.05))\nBH <- makeFilter(\"blackman-harris\", 11, asKernel = FALSE)\nH <- makeFilter(\"hamming\", 11, asKernel = FALSE)\nyBH <- stats::filter(y, BH)\npoints(x, yBH, col = 2, type = \"o\")\nyH <- stats::filter(y, H)\npoints(yH, col = 3, type = \"o\")\nlegend(\"topright\",\n col = 1:3, cex = 2 / 3, pch = 1,\n legend = c(\"input\", \"Blackman Harris\", \"Hamming\")\n)\n\n# 2. Show theoretical and practical filter gain, where\n# the latter is based on random white noise, and\n# includes a particular value for the spans\n# argument of spectrum(), etc.\n\n\n\n"} {"package":"oce","topic":"map2lonlat","snippet":"### Name: map2lonlat\n### Title: Convert X and Y to Longitude and Latitude\n### Aliases: map2lonlat\n\n### ** Examples\n\nlibrary(oce)\n# Cape Split, in the Minas Basin of the Bay of Fundy\ncs <- list(longitude = -64.49657, latitude = 45.33462)\nxy <- lonlat2map(cs, projection = \"+proj=merc\")\nmap2lonlat(xy)\n\n\n\n"} {"package":"oce","topic":"mapArrows","snippet":"### Name: mapArrows\n### Title: Add Arrows to a Map\n### Aliases: mapArrows\n\n### ** Examples\n\n## No test: \nlibrary(oce)\ndata(coastlineWorld)\nmapPlot(coastlineWorld,\n longitudelim = c(-120, -60), latitudelim = c(30, 60),\n col = \"lightgray\", projection = \"+proj=lcc +lat_1=45 +lon_0=-100\"\n)\nlon <- seq(-120, -75, 15)\nn <- length(lon)\nlat <- 45 + rep(0, n)\n# Draw meridional arrows in N America, from 45N to 60N.\nmapArrows(lon, lat, lon, lat + 15, length = 0.05, col = \"blue\")\n## End(No test)\n\n\n\n"} {"package":"oce","topic":"mapAxis","snippet":"### Name: mapAxis\n### Title: Add Axis Labels to an Existing Map\n### Aliases: mapAxis\n\n### ** Examples\n\n## No test: \nlibrary(oce)\ndata(coastlineWorld)\npar(mar = c(2, 2, 1, 1))\nlonlim <- c(-180, 180)\nlatlim <- c(70, 110)\n# In mapPlot() call, note axes and grid args, to\n# prevent over-plotting of defaults. Some adjustments\n# might be required to the mapGrid() arguments, to\n# get agreement with the axis. This is why both\n# mapGrid() and mapAxis() are best avoided; it is\n# simpler to let mapPlot() handle these things.\nmapPlot(coastlineWorld,\n projection = \"+proj=stere +lat_0=90\",\n longitudelim = lonlim, latitudelim = latlim,\n col = \"tan\", axes = FALSE, grid = FALSE\n)\nmapGrid(15, 15)\nmapAxis(axisStyle = 5)\n## End(No test)\n\n\n\n"} {"package":"oce","topic":"mapGrid","snippet":"### Name: mapGrid\n### Title: Add a Longitude and Latitude Grid to an Existing Map\n### Aliases: mapGrid\n\n### ** Examples\n\n## No test: \nif (utils::packageVersion(\"sf\") != \"0.9.8\") {\n # sf version 0.9-8 has a problem with this projection\n library(oce)\n data(coastlineWorld)\n par(mar = c(2, 2, 1, 1))\n # In mapPlot() call, note axes and grid args, to\n # prevent over-plotting of defaults.\n mapPlot(coastlineWorld,\n type = \"l\", projection = \"+proj=ortho\",\n axes = FALSE, grid = FALSE\n )\n mapGrid(15, 15)\n}\n## End(No test)\n\n\n\n"} {"package":"oce","topic":"mapLines","snippet":"### Name: mapLines\n### Title: Add Lines to a Map\n### Aliases: mapLines\n\n### ** Examples\n\n## No test: \nif (utils::packageVersion(\"sf\") != \"0.9.8\") {\n # sf version 0.9-8 has a problem with this projection\n library(oce)\n data(coastlineWorld)\n mapPlot(coastlineWorld,\n type = \"l\",\n longitudelim = c(-80, 10), latitudelim = c(0, 120),\n projection = \"+proj=ortho +lon_0=-40\"\n )\n lon <- c(-63.5744, 0.1062) # Halifax CA to London UK\n lat <- c(44.6479, 51.5171)\n mapPoints(lon, lat, col = \"red\")\n mapLines(lon, lat, col = \"red\")\n}\n## End(No test)\n\n\n\n"} {"package":"oce","topic":"mapLongitudeLatitudeXY","snippet":"### Name: mapLongitudeLatitudeXY\n### Title: Convert From Longitude and Latitude to X and Y\n### Aliases: mapLongitudeLatitudeXY\n\n### ** Examples\n\n## No test: \nlibrary(oce)\ndata(coastlineWorld)\npar(mfrow = c(2, 1), mar = rep(2, 4))\nmapPlot(coastlineWorld, projection = \"+proj=moll\") # sets a projection\nxy <- mapLongitudeLatitudeXY(coastlineWorld)\nplot(xy, type = \"l\", asp = 1)\n## End(No test)\n\n\n\n"} {"package":"oce","topic":"mapPlot","snippet":"### Name: mapPlot\n### Title: Draw a Map\n### Aliases: mapPlot\n\n### ** Examples\n\n# NOTE: the map-projection vignette has many more examples.\nlibrary(oce)\ndata(coastlineWorld)\n# Demonstrate a high-latitude view using a built-in \"CRS\" value that is used\n# by the National Snow and Ice Data Center (NSIDC) for representing\n# the northern-hemisphere ice zone. The view is meant to mimic the figure\n# at the top of the document entitled \"A Guide to NSIDC's Polar Stereographic\n# Projection\" at https://nsidc.org/data/user-resources/help-center, with the\n# box indicating the region of the NSIDC grid.\nlibrary(oce)\ndata(coastlineWorld)\nprojection <- sf::st_crs(\"EPSG:3413\")\ncat(projection$proj4string, \"\\n\") # see the projection details\npar(mar = c(2, 2, 1, 1)) # tighten margins\nmapPlot(coastlineWorld,\n projection = projection,\n col = gray(0.9), geographical = 4,\n longitudelim = c(-180, 180), latitudelim = c(10, 90)\n)\n# Coordinates of box from Table 6 of the NSIDC document\nbox <- cbind(\n -360 + c(168.35, 102.34, 350.3, 279.26, 168.35),\n c(30.98, 31.37, 34.35, 33.92, 30.98)\n)\nmapLines(box[, 1], box[, 2], lwd = 2)\n\n\n\n"} {"package":"oce","topic":"mapPoints","snippet":"### Name: mapPoints\n### Title: Add Points to a Map\n### Aliases: mapPoints\n\n### ** Examples\n\n## No test: \nlibrary(oce)\ndata(coastlineWorld)\nmapPlot(coastlineWorld,\n longitudelim = c(-80, 0), latitudelim = c(20, 50),\n col = \"lightgray\", projection = \"+proj=laea +lon_0=-35\"\n)\ndata(section)\nmapPoints(section)\n## End(No test)\n\n\n\n"} {"package":"oce","topic":"mapPolygon","snippet":"### Name: mapPolygon\n### Title: Add a Polygon to a Map\n### Aliases: mapPolygon\n\n### ** Examples\n\n## No test: \nlibrary(oce)\ndata(coastlineWorld)\ndata(topoWorld)\n\n# Bathymetry near southeastern Canada\npar(mfrow = c(1, 1), mar = c(2, 2, 1, 1))\ncm <- colormap(zlim = c(-5000, 0), col = oceColorsGebco)\ndrawPalette(colormap = cm)\nlonlim <- c(-60, -50)\nlatlim <- c(40, 60)\nmapPlot(coastlineWorld,\n longitudelim = lonlim,\n latitudelim = latlim, projection = \"+proj=merc\", grid = FALSE\n)\nmapImage(topoWorld, colormap = cm)\nmapPolygon(coastlineWorld[[\"longitude\"]], coastlineWorld[[\"latitude\"]], col = \"lightgray\")\n## End(No test)\n\n\n\n"} {"package":"oce","topic":"mapScalebar","snippet":"### Name: mapScalebar\n### Title: Add a Scalebar to a Map\n### Aliases: mapScalebar\n\n### ** Examples\n\n## No test: \nlibrary(oce)\ndata(coastlineWorld)\n# Arctic Ocean\npar(mar = c(2.5, 2.5, 1, 1))\nmapPlot(coastlineWorld,\n latitudelim = c(60, 120), longitudelim = c(-130, -50),\n col = \"lightgray\", projection = \"+proj=stere +lat_0=90\"\n)\nmapScalebar()\n## End(No test)\n\n\n\n"} {"package":"oce","topic":"mapText","snippet":"### Name: mapText\n### Title: Add Text to a Map\n### Aliases: mapText\n\n### ** Examples\n\n## No test: \nlibrary(oce)\ndata(coastlineWorld)\nlongitude <- coastlineWorld[[\"longitude\"]]\nlatitude <- coastlineWorld[[\"latitude\"]]\nmapPlot(longitude, latitude,\n type = \"l\", grid = 5,\n longitudelim = c(-70, -50), latitudelim = c(45, 50),\n projection = \"+proj=merc\"\n)\nlon <- -63.5744 # Halifax\nlat <- 44.6479\nmapPoints(lon, lat, pch = 20, col = \"red\")\nmapText(lon, lat, \"Halifax\", col = \"red\", pos = 1, offset = 1)\n## End(No test)\n\n\n\n"} {"package":"oce","topic":"mapTissot","snippet":"### Name: mapTissot\n### Title: Add Tissot Indicatrices to a Map\n### Aliases: mapTissot\n\n### ** Examples\n\n## No test: \nlibrary(oce)\ndata(coastlineWorld)\npar(mfrow = c(1, 1), mar = c(2, 2, 1, 1))\np <- \"+proj=aea +lat_1=10 +lat_2=60 +lon_0=-45\"\nmapPlot(coastlineWorld,\n projection = p, col = \"gray\",\n longitudelim = c(-90, 0), latitudelim = c(0, 50)\n)\nmapTissot(c(15, 15), col = \"red\")\n## End(No test)\n\n\n\n"} {"package":"oce","topic":"matchBytes","snippet":"### Name: matchBytes\n### Title: Locate Byte Sequences in a Raw Vector\n### Aliases: matchBytes\n\n### ** Examples\n\nbuf <- as.raw(c(0xa5, 0x11, 0xaa, 0xa5, 0x11, 0x00))\nmatch <- matchBytes(buf, 0xa5, 0x11)\nprint(buf)\nprint(match)\n\n\n"} {"package":"oce","topic":"matrixSmooth","snippet":"### Name: matrixSmooth\n### Title: Smooth a Matrix\n### Aliases: matrixSmooth\n\n### ** Examples\n\nlibrary(oce)\nopar <- par(no.readonly = TRUE)\nm <- matrix(rep(seq(0, 1, length.out = 5), 5), nrow = 5, byrow = TRUE)\nm[3, 3] <- 2\nm1 <- matrixSmooth(m)\nm2 <- matrixSmooth(m1)\nm3 <- matrixSmooth(m2)\npar(mfrow = c(2, 2))\nimage(m, col = rainbow(100), zlim = c(0, 4), main = \"original image\")\nimage(m1, col = rainbow(100), zlim = c(0, 4), main = \"smoothed 1 time\")\nimage(m2, col = rainbow(100), zlim = c(0, 4), main = \"smoothed 2 times\")\nimage(m3, col = rainbow(100), zlim = c(0, 4), main = \"smoothed 3 times\")\npar(opar)\n\n\n"} {"package":"oce","topic":"moonAngle","snippet":"### Name: moonAngle\n### Title: Lunar Angle as Function of Space and Time\n### Aliases: moonAngle\n\n### ** Examples\n\n\nlibrary(oce)\npar(mfrow = c(3, 2))\ny <- 2012\nm <- 4\ndays <- 1:3\n# Halifax sunrise/sunset (see e.g. https://www.timeanddate.com/worldclock)\nrises <- ISOdatetime(y, m, days, c(13, 15, 16), c(55, 04, 16), 0, tz = \"UTC\") + 3 * 3600 # ADT\nsets <- ISOdatetime(y, m, days, c(3, 4, 4), c(42, 15, 45), 0, tz = \"UTC\") + 3 * 3600\nazrises <- c(69, 75, 82)\nazsets <- c(293, 288, 281)\nlatitude <- 44.65\nlongitude <- -63.6\nfor (i in 1:3) {\n t <- ISOdatetime(y, m, days[i], 0, 0, 0, tz = \"UTC\") + seq(0, 24 * 3600, 3600 / 4)\n ma <- moonAngle(t, longitude, latitude)\n oce.plot.ts(t, ma$altitude, type = \"l\", mar = c(2, 3, 1, 1), cex = 1 / 2, ylab = \"Altitude\")\n abline(h = 0)\n points(rises[i], 0, col = \"red\", pch = 3, lwd = 2, cex = 1.5)\n points(sets[i], 0, col = \"blue\", pch = 3, lwd = 2, cex = 1.5)\n oce.plot.ts(t, ma$azimuth, type = \"l\", mar = c(2, 3, 1, 1), cex = 1 / 2, ylab = \"Azimuth\")\n points(rises[i], -180 + azrises[i], col = \"red\", pch = 3, lwd = 2, cex = 1.5)\n points(sets[i], -180 + azsets[i], col = \"blue\", pch = 3, lwd = 2, cex = 1.5)\n}\n\n\n\n"} {"package":"oce","topic":"numberAsHMS","snippet":"### Name: numberAsHMS\n### Title: Convert a Numeric Time to Hour, Minute, and Second\n### Aliases: numberAsHMS\n\n### ** Examples\n\nt <- c(\"0900\", \"1234\")\nnumberAsHMS(t)\n\n\n\n"} {"package":"oce","topic":"numberAsPOSIXct","snippet":"### Name: numberAsPOSIXct\n### Title: Convert a Numeric Time to a POSIXct Time\n### Aliases: numberAsPOSIXct\n\n### ** Examples\n\n# Example 1. default (unix)\nnumberAsPOSIXct(0)\n\n# Example 2. Matlab\nnumberAsPOSIXct(1, type = \"matlab\")\n\n# Example 3. GPS with default week rollover or with no rollover (Canada Day, year 2010)\nnumberAsPOSIXct(cbind(566, 345615), type = \"gps\")\nnumberAsPOSIXct(cbind(566, 345615, 1), type = \"gps\")\nnumberAsPOSIXct(cbind(1024 + 566, 345615, 0), type = \"gps\")\n# Show how to deal with leap seconds (15 of them, in this case)\nsum(as.POSIXct(\"1980-01-01\") < .leap.seconds & .leap.seconds <= as.POSIXct(\"2010-07-01\"))\n-15 + numberAsPOSIXct(cbind(1024 + 566, 345615, 0), type = \"gps\", leap = FALSE)\n\n# Example 4. yearday\nnumberAsPOSIXct(cbind(2013, 1), type = \"yearday\") # start of 2013\n\n# Example 5. Epic time, one hour into Canada Day of year 2018. In computing the\n# Julian day, note that this starts at noon.\njd <- julianDay(as.POSIXct(\"2018-07-01 12:00:00\", tz = \"UTC\"))\nnumberAsPOSIXct(cbind(jd, 1e3 * 1 * 3600), type = \"epic\", tz = \"UTC\")\n\n# Example 6. Julian day, note that this starts at noon.\njd <- julianDay(as.POSIXct(\"2018-07-01 12:00:00\", tz = \"UTC\"))\nnumberAsPOSIXct(cbind(jd, 1e3 * 1 * 3600), type = \"epic\", tz = \"UTC\")\n\n\n\n"} {"package":"oce","topic":"oce-class","snippet":"### Name: oce-class\n### Title: Base Class for oce Objects\n### Aliases: oce-class\n\n### ** Examples\n\nstr(new(\"oce\"))\n\n\n\n"} {"package":"oce","topic":"oce.as.raw","snippet":"### Name: oce.as.raw\n### Title: Version of as.raw() That Clips Data\n### Aliases: oce.as.raw\n\n### ** Examples\n\nx <- c(-0.1, 0, 1, 255, 255.1)\ndata.frame(x, oce.as.raw(x))\n\n\n"} {"package":"oce","topic":"oce.contour","snippet":"### Name: oce.contour\n### Title: Oce Variant of contour\n### Aliases: oce.contour oceContour\n\n### ** Examples\n\nlibrary(oce)\ndata(topoWorld)\n# coastline now, and in last glacial maximum\nlon <- topoWorld[[\"longitude\"]]\nlat <- topoWorld[[\"latitude\"]]\nz <- topoWorld[[\"z\"]]\noce.contour(lon, lat, z, levels = 0, drawlabels = FALSE)\noce.contour(lon, lat, z, levels = -130, drawlabels = FALSE, col = \"blue\", add = TRUE)\n\n\n"} {"package":"oce","topic":"oce.grid","snippet":"### Name: oce.grid\n### Title: Add a Grid to an Existing Oce Plot\n### Aliases: oce.grid\n\n### ** Examples\n\nlibrary(oce)\ni <- imagep(volcano)\noce.grid(i, lwd = 2)\n\ndata(sealevel)\ni <- oce.plot.ts(sealevel[[\"time\"]], sealevel[[\"elevation\"]])\noce.grid(i, col = \"red\")\n\ndata(ctd)\ni <- plotTS(ctd)\noce.grid(i, col = \"red\")\n\ndata(adp)\ni <- plot(adp, which = 1)\noce.grid(i, col = \"gray\", lty = 1)\n\ndata(echosounder)\ni <- plot(echosounder)\noce.grid(i, col = \"pink\", lty = 1)\n\n\n\n"} {"package":"oce","topic":"oce.plot.ts","snippet":"### Name: oce.plot.ts\n### Title: Oce Variant of plot.ts\n### Aliases: oce.plot.ts\n\n### ** Examples\n\nlibrary(oce)\nt0 <- as.POSIXct(\"2008-01-01\", tz = \"UTC\")\nt <- seq(t0, length.out = 48, by = \"30 min\")\ny <- sin(as.numeric(t - t0) * 2 * pi / (12 * 3600))\noce.plot.ts(t, y, type = \"l\", xaxs = \"i\")\n# Show how col, pch and cex get recycled\noce.plot.ts(t, y,\n type = \"p\", xaxs = \"i\",\n col = 1:3, pch = c(rep(1, 6), rep(20, 6)), cex = sqrt(1:6)\n)\n# Trimming x; note the narrowing of the y view\noce.plot.ts(t, y, type = \"p\", xlim = c(t[6], t[12]))\n# Flip the y axis\noce.plot.ts(t, y, flipy = TRUE)\n\n\n"} {"package":"oce","topic":"oceApprox","snippet":"### Name: oceApprox\n### Title: Interpolate 1D Data with UNESCO or Reiniger-Ross Algorithm\n### Aliases: oceApprox oce.approx\n\n### ** Examples\n\nlibrary(oce)\nif (require(ocedata)) {\n data(RRprofile)\n zz <- seq(0, 2000, 2)\n plot(RRprofile$temperature, RRprofile$depth, ylim = c(500, 0), xlim = c(2, 11))\n # Contrast two methods\n a1 <- oce.approx(RRprofile$depth, RRprofile$temperature, zz, \"rr\")\n a2 <- oce.approx(RRprofile$depth, RRprofile$temperature, zz, \"unesco\")\n lines(a1, zz)\n lines(a2, zz, col = \"red\")\n legend(\"bottomright\", lwd = 1, col = 1:2, legend = c(\"rr\", \"unesco\"), cex = 3 / 4)\n}\n\n\n"} {"package":"oce","topic":"oceAxis","snippet":"### Name: oceAxis\n### Title: Draw an Axis, Possibly with Decade-style Logarithmic Scaling\n### Aliases: oceAxis\n\n### ** Examples\n\nlibrary(oce)\nRa <- 10^seq(4, 10, 0.1)\nNu <- 0.085 * Ra^(1 / 3)\nplot(Ra, Nu, log = \"xy\", axes = FALSE)\nbox()\noceAxis(1, logStyle = \"decade\")\noceAxis(2, logStyle = \"decade\")\n\n\n\n"} {"package":"oce","topic":"oceCRS","snippet":"### Name: oceCRS\n### Title: Coordinate Reference System Strings for Some Oceans\n### Aliases: oceCRS\n\n### ** Examples\n\n## No test: \nlibrary(oce)\ndata(coastlineWorld)\npar(mar = c(2, 2, 1, 1))\nplot(coastlineWorld, projection = oceCRS(\"Atlantic\"), span = 12000)\nplot(coastlineWorld, projection = oceCRS(\"North Atlantic\"), span = 8000)\nplot(coastlineWorld, projection = oceCRS(\"South Atlantic\"), span = 8000)\nplot(coastlineWorld, projection = oceCRS(\"Arctic\"), span = 4000)\nplot(coastlineWorld, projection = oceCRS(\"Antarctic\"), span = 10000)\n# Avoid ugly horizontal lines, an artifact of longitude shifting.\n# Note: we cannot fill the land once we shift, either.\npacific <- coastlineCut(coastlineWorld, -180)\nplot(pacific, proj = oceCRS(\"Pacific\"), span = 15000, col = NULL)\nplot(pacific, proj = oceCRS(\"North Pacific\"), span = 12000, col = NULL)\nplot(pacific, proj = oceCRS(\"South Pacific\"), span = 12000, col = NULL)\n## End(No test)\n\n\n"} {"package":"oce","topic":"oceColors9B","snippet":"### Name: oceColors9B\n### Title: Create Colors in a Red-Yellow-Blue Color Scheme\n### Aliases: oceColors9B oce.colors9B\n\n### ** Examples\n\nlibrary(oce)\nimagep(volcano,\n col = oceColors9B(128),\n zlab = \"oceColors9B\"\n)\n\n\n\n"} {"package":"oce","topic":"oceColorsCDOM","snippet":"### Name: oceColorsCDOM\n### Title: Create Colors Suitable for CDOM Fields\n### Aliases: oceColorsCDOM oce.colorsCDOM\n\n### ** Examples\n\nlibrary(oce)\n\n# Example 1\nimagep(volcano, col=oceColorsCDOM(128),\n zlab=\"oceColorsCDOM\")\n## Not run: \n##D # Example 2 (requires the cmocean package)\n##D imagep(volcano, col=cmocean::cmocean(\"matter\"),\n##D zlab=\"cmocean::cmocean(\\\"matter\\\")\")\n## End(Not run)\n\n## Not run: \n##D # Example 3 (requires the viridis package)\n##D imagep(volcano, col=viridis::inferno,\n##D zlab=\"viridis::inferno\")\n## End(Not run)\n\n\n\n"} {"package":"oce","topic":"oceColorsChlorophyll","snippet":"### Name: oceColorsChlorophyll\n### Title: Create Colors Suitable for chlorophyll Fields\n### Aliases: oceColorsChlorophyll oce.colorsChlorophyll\n\n### ** Examples\n\nlibrary(oce)\n\n# Example 1\nimagep(volcano, col=oceColorsChlorophyll(128),\n zlab=\"oceColorsChlorophyll\")\n## Not run: \n##D # Example 2 (requires the cmocean package)\n##D imagep(volcano, col=cmocean::cmocean(\"matter\"),\n##D zlab=\"cmocean::cmocean(\\\"matter\\\")\")\n## End(Not run)\n\n## Not run: \n##D # Example 3 (requires the viridis package)\n##D imagep(volcano, col=viridis::inferno,\n##D zlab=\"viridis::inferno\")\n## End(Not run)\n\n\n\n"} {"package":"oce","topic":"oceColorsDensity","snippet":"### Name: oceColorsDensity\n### Title: Create Colors Suitable for density Fields\n### Aliases: oceColorsDensity oce.colorsDensity\n\n### ** Examples\n\nlibrary(oce)\n\n# Example 1\nimagep(volcano, col=oceColorsDensity(128),\n zlab=\"oceColorsDensity\")\n## Not run: \n##D # Example 2 (requires the cmocean package)\n##D imagep(volcano, col=cmocean::cmocean(\"matter\"),\n##D zlab=\"cmocean::cmocean(\\\"matter\\\")\")\n## End(Not run)\n\n## Not run: \n##D # Example 3 (requires the viridis package)\n##D imagep(volcano, col=viridis::inferno,\n##D zlab=\"viridis::inferno\")\n## End(Not run)\n\n\n\n"} {"package":"oce","topic":"oceColorsFreesurface","snippet":"### Name: oceColorsFreesurface\n### Title: Create Colors Suitable for freesurface Fields\n### Aliases: oceColorsFreesurface oce.colorsFreesurface\n\n### ** Examples\n\nlibrary(oce)\n\n# Example 1\nimagep(volcano, col=oceColorsFreesurface(128),\n zlab=\"oceColorsFreesurface\")\n## Not run: \n##D # Example 2 (requires the cmocean package)\n##D imagep(volcano, col=cmocean::cmocean(\"matter\"),\n##D zlab=\"cmocean::cmocean(\\\"matter\\\")\")\n## End(Not run)\n\n## Not run: \n##D # Example 3 (requires the viridis package)\n##D imagep(volcano, col=viridis::inferno,\n##D zlab=\"viridis::inferno\")\n## End(Not run)\n\n\n\n"} {"package":"oce","topic":"oceColorsGebco","snippet":"### Name: oceColorsGebco\n### Title: Create Colors in a GEBCO-like Scheme\n### Aliases: oceColorsGebco oce.colorsGebco\n\n### ** Examples\n\nlibrary(oce)\nimagep(volcano, col = oceColorsGebco(128, region = \"both\"))\n\n\n\n"} {"package":"oce","topic":"oceColorsJet","snippet":"### Name: oceColorsJet\n### Title: Create Colors Similar to the Matlab Jet Scheme\n### Aliases: oceColorsJet oce.colorsJet oceColors9A oce.colors9A\n\n### ** Examples\n\nlibrary(oce)\nimagep(volcano, col = oceColorsJet, zlab = \"oceColorsJet\")\n\n\n\n"} {"package":"oce","topic":"oceColorsOxygen","snippet":"### Name: oceColorsOxygen\n### Title: Create Colors Suitable for oxygen Fields\n### Aliases: oceColorsOxygen oce.colorsOxygen\n\n### ** Examples\n\nlibrary(oce)\n\n# Example 1\nimagep(volcano, col=oceColorsOxygen(128),\n zlab=\"oceColorsOxygen\")\n## Not run: \n##D # Example 2 (requires the cmocean package)\n##D imagep(volcano, col=cmocean::cmocean(\"matter\"),\n##D zlab=\"cmocean::cmocean(\\\"matter\\\")\")\n## End(Not run)\n\n## Not run: \n##D # Example 3 (requires the viridis package)\n##D imagep(volcano, col=viridis::inferno,\n##D zlab=\"viridis::inferno\")\n## End(Not run)\n\n\n\n"} {"package":"oce","topic":"oceColorsPAR","snippet":"### Name: oceColorsPAR\n### Title: Create Colors Suitable for PAR Fields\n### Aliases: oceColorsPAR oce.colorsPAR\n\n### ** Examples\n\nlibrary(oce)\n\n# Example 1\nimagep(volcano, col=oceColorsPAR(128),\n zlab=\"oceColorsPAR\")\n## Not run: \n##D # Example 2 (requires the cmocean package)\n##D imagep(volcano, col=cmocean::cmocean(\"matter\"),\n##D zlab=\"cmocean::cmocean(\\\"matter\\\")\")\n## End(Not run)\n\n## Not run: \n##D # Example 3 (requires the viridis package)\n##D imagep(volcano, col=viridis::inferno,\n##D zlab=\"viridis::inferno\")\n## End(Not run)\n\n\n\n"} {"package":"oce","topic":"oceColorsPhase","snippet":"### Name: oceColorsPhase\n### Title: Create Colors Suitable for phase Fields\n### Aliases: oceColorsPhase oce.colorsPhase\n\n### ** Examples\n\nlibrary(oce)\n\n# Example 1\nimagep(volcano, col=oceColorsPhase(128),\n zlab=\"oceColorsPhase\")\n## Not run: \n##D # Example 2 (requires the cmocean package)\n##D imagep(volcano, col=cmocean::cmocean(\"matter\"),\n##D zlab=\"cmocean::cmocean(\\\"matter\\\")\")\n## End(Not run)\n\n## Not run: \n##D # Example 3 (requires the viridis package)\n##D imagep(volcano, col=viridis::inferno,\n##D zlab=\"viridis::inferno\")\n## End(Not run)\n\n\n\n"} {"package":"oce","topic":"oceColorsSalinity","snippet":"### Name: oceColorsSalinity\n### Title: Create Colors Suitable for salinity Fields\n### Aliases: oceColorsSalinity oce.colorsSalinity\n\n### ** Examples\n\nlibrary(oce)\n\n# Example 1\nimagep(volcano, col=oceColorsSalinity(128),\n zlab=\"oceColorsSalinity\")\n## Not run: \n##D # Example 2 (requires the cmocean package)\n##D imagep(volcano, col=cmocean::cmocean(\"matter\"),\n##D zlab=\"cmocean::cmocean(\\\"matter\\\")\")\n## End(Not run)\n\n## Not run: \n##D # Example 3 (requires the viridis package)\n##D imagep(volcano, col=viridis::inferno,\n##D zlab=\"viridis::inferno\")\n## End(Not run)\n\n\n\n"} {"package":"oce","topic":"oceColorsTemperature","snippet":"### Name: oceColorsTemperature\n### Title: Create Colors Suitable for temperature Fields\n### Aliases: oceColorsTemperature oce.colorsTemperature\n\n### ** Examples\n\nlibrary(oce)\n\n# Example 1\nimagep(volcano, col=oceColorsTemperature(128),\n zlab=\"oceColorsTemperature\")\n## Not run: \n##D # Example 2 (requires the cmocean package)\n##D imagep(volcano, col=cmocean::cmocean(\"matter\"),\n##D zlab=\"cmocean::cmocean(\\\"matter\\\")\")\n## End(Not run)\n\n## Not run: \n##D # Example 3 (requires the viridis package)\n##D imagep(volcano, col=viridis::inferno,\n##D zlab=\"viridis::inferno\")\n## End(Not run)\n\n\n\n"} {"package":"oce","topic":"oceColorsTurbidity","snippet":"### Name: oceColorsTurbidity\n### Title: Create Colors Suitable for turbidity Fields\n### Aliases: oceColorsTurbidity oce.colorsTurbidity\n\n### ** Examples\n\nlibrary(oce)\n\n# Example 1\nimagep(volcano, col=oceColorsTurbidity(128),\n zlab=\"oceColorsTurbidity\")\n## Not run: \n##D # Example 2 (requires the cmocean package)\n##D imagep(volcano, col=cmocean::cmocean(\"matter\"),\n##D zlab=\"cmocean::cmocean(\\\"matter\\\")\")\n## End(Not run)\n\n## Not run: \n##D # Example 3 (requires the viridis package)\n##D imagep(volcano, col=viridis::inferno,\n##D zlab=\"viridis::inferno\")\n## End(Not run)\n\n\n\n"} {"package":"oce","topic":"oceColorsTurbo","snippet":"### Name: oceColorsTurbo\n### Title: Create Colors Similar to the Google Turbo Scheme\n### Aliases: oceColorsTurbo oce.colorsTurbo\n\n### ** Examples\n\nlibrary(oce)\nimagep(volcano,\n col = oceColorsTurbo(128),\n zlab = \"oceColorsTurbo\"\n)\n\n\n\n"} {"package":"oce","topic":"oceColorsTwo","snippet":"### Name: oceColorsTwo\n### Title: Create Two-Color Palette\n### Aliases: oceColorsTwo oce.colorsTwo\n\n### ** Examples\n\nlibrary(oce)\nimagep(volcano - mean(range(volcano)),\n col = oceColorsTwo(128),\n zlim = \"symmetric\", zlab = \"oceColorsTwo\"\n)\n\n\n"} {"package":"oce","topic":"oceColorsVelocity","snippet":"### Name: oceColorsVelocity\n### Title: Create Colors Suitable for velocity Fields\n### Aliases: oceColorsVelocity oce.colorsVelocity\n\n### ** Examples\n\nlibrary(oce)\n\n# Example 1\nimagep(volcano, col=oceColorsVelocity(128),\n zlab=\"oceColorsVelocity\")\n## Not run: \n##D # Example 2 (requires the cmocean package)\n##D imagep(volcano, col=cmocean::cmocean(\"matter\"),\n##D zlab=\"cmocean::cmocean(\\\"matter\\\")\")\n## End(Not run)\n\n## Not run: \n##D # Example 3 (requires the viridis package)\n##D imagep(volcano, col=viridis::inferno,\n##D zlab=\"viridis::inferno\")\n## End(Not run)\n\n\n\n"} {"package":"oce","topic":"oceColorsViridis","snippet":"### Name: oceColorsViridis\n### Title: Create Colors Similar to the Matlab Viridis Scheme\n### Aliases: oceColorsViridis oce.colorsViridis\n\n### ** Examples\n\nlibrary(oce)\n# Example 1: oceColorsViridis\nimagep(volcano,\n col = oceColorsViridis(128),\n zlab = \"oceColorsViridis\"\n)\n\n\n\n"} {"package":"oce","topic":"oceColorsVorticity","snippet":"### Name: oceColorsVorticity\n### Title: Create Colors Suitable for vorticity Fields\n### Aliases: oceColorsVorticity oce.colorsVorticity\n\n### ** Examples\n\nlibrary(oce)\n\n# Example 1\nimagep(volcano, col=oceColorsVorticity(128),\n zlab=\"oceColorsVorticity\")\n## Not run: \n##D # Example 2 (requires the cmocean package)\n##D imagep(volcano, col=cmocean::cmocean(\"matter\"),\n##D zlab=\"cmocean::cmocean(\\\"matter\\\")\")\n## End(Not run)\n\n## Not run: \n##D # Example 3 (requires the viridis package)\n##D imagep(volcano, col=viridis::inferno,\n##D zlab=\"viridis::inferno\")\n## End(Not run)\n\n\n\n"} {"package":"oce","topic":"oceConvolve","snippet":"### Name: oceConvolve\n### Title: Convolve Two Time Series\n### Aliases: oceConvolve oce.convolve\n\n### ** Examples\n\nlibrary(oce)\nt <- 0:1027\nn <- length(t)\nsignal <- ifelse(sin(t * 2 * pi / 128) > 0, 1, 0)\ntau <- 10\nfilter <- exp(-seq(5 * tau, 0) / tau)\nfilter <- filter / sum(filter)\nobservation <- oce.convolve(signal, filter)\nplot(t, signal, type = \"l\")\nlines(t, observation, lty = \"dotted\")\n\n\n"} {"package":"oce","topic":"oceDebug","snippet":"### Name: oceDebug\n### Title: Print a Debugging Message\n### Aliases: oceDebug oce.debug\n\n### ** Examples\n\noceDebug(debug = 1, \"Example\", 1, \"Plain text\")\noceDebug(debug = 1, \"Example\", 2, \"Bold\", style = \"bold\")\noceDebug(debug = 1, \"Example\", 3, \"Italic\", style = \"italic\")\noceDebug(debug = 1, \"Example\", 4, \"Red\", style = \"red\")\noceDebug(debug = 1, \"Example\", 5, \"Green\", style = \"green\")\noceDebug(debug = 1, \"Example\", 6, \"Blue\", style = \"blue\")\nmycyan <- function(...) paste(\"\\033[36m\", paste(..., sep = \" \"), \"\\033[0m\", sep = \"\")\noceDebug(debug = 1, \"Example\", 7, \"User-set cyan\", style = mycyan)\n\n\n"} {"package":"oce","topic":"oceEdit","snippet":"### Name: oceEdit\n### Title: Edit an Oce Object\n### Aliases: oceEdit oce.edit\n\n### ** Examples\n\nlibrary(oce)\ndata(ctd)\n# Example 1: change latitude\nctd2 <- oceEdit(ctd,\n item = \"latitude\", value = 47.8879,\n reason = \"illustration\", person = \"Dan Kelley\"\n)\n# Example 2: add 0.1 dbar to pressure\nctd3 <- oceEdit(ctd, action = \"x@data$pressure<-x@data$pressure+0.1\")\n\n\n"} {"package":"oce","topic":"oceFilter","snippet":"### Name: oceFilter\n### Title: Filter a Time Series\n### Aliases: oceFilter oce.filter\n\n### ** Examples\n\nlibrary(oce)\npar(mar = c(4, 4, 1, 1))\nb <- rep(1, 5) / 5\na <- 1\nx <- seq(0, 10)\ny <- ifelse(x == 5, 1, 0)\nf1 <- oceFilter(y, a, b)\nplot(x, y, ylim = c(-0, 1.5), pch = \"o\", type = \"b\")\npoints(x, f1, pch = \"x\", col = \"red\")\n\n# remove the phase lag\nf2 <- oceFilter(y, a, b, TRUE)\npoints(x, f2, pch = \"+\", col = \"blue\")\n\nlegend(\"topleft\",\n col = c(\"black\", \"red\", \"blue\"), pch = c(\"o\", \"x\", \"+\"),\n legend = c(\"data\", \"normal filter\", \"zero-phase filter\")\n)\nmtext(\"note that normal filter rolls off at end\")\n\n\n"} {"package":"oce","topic":"ocePmatch","snippet":"### Name: ocePmatch\n### Title: Partial Matching of Strings or Numbers\n### Aliases: ocePmatch oce.pmatch\n\n### ** Examples\n\nlibrary(oce)\noce.pmatch(c(\"s\", \"at\", \"te\"), list(salinity = 1, temperature = 3.1))\n\n\n"} {"package":"oce","topic":"oceRenameData","snippet":"### Name: oceRenameData\n### Title: Rename Something in the data slot of an oce Object\n### Aliases: oceRenameData\n\n### ** Examples\n\nlibrary(oce)\ndata(ctd)\nCTD <- oceRenameData(ctd, \"salinity\", \"SALT\")\nstopifnot(all.equal(ctd[[\"salinity\"]], CTD[[\"SALT\"]]))\nstopifnot(all.equal(ctd[[\"sal00\"]], CTD[[\"SALT\"]]))\n\n\n\n"} {"package":"oce","topic":"oceSetData","snippet":"### Name: oceSetData\n### Title: Set Something in the data Slot of an oce Object\n### Aliases: oceSetData\n\n### ** Examples\n\ndata(ctd)\nTf <- swTFreeze(ctd)\nctd <- oceSetData(ctd, \"freezing\", Tf,\n unit = list(unit = expression(degree * C), scale = \"ITS-90\")\n)\nplotProfile(ctd, \"freezing\")\n\n\n\n"} {"package":"oce","topic":"oceSetMetadata","snippet":"### Name: oceSetMetadata\n### Title: Set Something in the metadata Slot of an oce Object\n### Aliases: oceSetMetadata\n\n### ** Examples\n\n# Add an estimate of MLD (mixed layer depth) to a ctd object\nlibrary(oce)\ndata(ctd)\nctdWithMLD <- oceSetMetadata(ctd, \"MLD\", 3)\nctdWithMLD[[\"MLD\"]] # 3\n\n\n\n"} {"package":"oce","topic":"oceSmooth","snippet":"### Name: oceSmooth\n### Title: Smooth an oce Object\n### Aliases: oceSmooth oce.smooth\n\n### ** Examples\n\nlibrary(oce)\ndata(ctd)\nd <- oce.smooth(ctd)\nplot(d)\n\n\n"} {"package":"oce","topic":"oceSpectrum","snippet":"### Name: oceSpectrum\n### Title: Normalize a Spectrum\n### Aliases: oceSpectrum oce.spectrum\n\n### ** Examples\n\nx <- rnorm(1e3)\ns <- spectrum(x, plot = FALSE)\nss <- oce.spectrum(x, plot = FALSE)\ncat(\"variance of x=\", var(x), \"\\n\")\ncat(\"integral of spectrum=\", sum(s$spec) * diff(s$freq[1:2]), \"\\n\")\ncat(\"integral of oce.spectrum=\", sum(ss$spec) * diff(ss$freq[1:2]), \"\\n\")\n\n\n"} {"package":"oce","topic":"plot,adp-method","snippet":"### Name: plot,adp-method\n### Title: Plot an adp Object\n### Aliases: plot,adp-method plot.adp\n\n### ** Examples\n\nlibrary(oce)\ndata(adp)\nplot(adp, which = 1:3)\nplot(adp, which = \"temperature\", tformat = \"%H:%M\")\n\n\n\n"} {"package":"oce","topic":"plot,adv-method","snippet":"### Name: plot,adv-method\n### Title: Plot an adv Object\n### Aliases: plot,adv-method plot.adv\n\n### ** Examples\n\nlibrary(oce)\ndata(adv)\nplot(adv)\n\n\n\n"} {"package":"oce","topic":"plot,amsr-method","snippet":"### Name: plot,amsr-method\n### Title: Plot an amsr Object\n### Aliases: plot,amsr-method plot.amsr\n\n### ** Examples\n\nlibrary(oce)\ndata(coastlineWorld)\ndata(amsr) # see ?amsr for how to read and composite such objects\n\n# Example 1: plot with default color scheme, oceColorsTemperature()\nplot(amsr, \"SST\")\nlines(coastlineWorld[[\"longitude\"]], coastlineWorld[[\"latitude\"]])\n\n# Example 2: 'turbo' color scheme\nplot(amsr, \"SST\", col = oceColorsTurbo)\nlines(coastlineWorld[[\"longitude\"]], coastlineWorld[[\"latitude\"]])\n\n\n\n"} {"package":"oce","topic":"plot,argo-method","snippet":"### Name: plot,argo-method\n### Title: Plot an argo Object\n### Aliases: plot,argo-method plot.argo\n\n### ** Examples\n\nlibrary(oce)\ndata(argo)\ntc <- cut(argo[[\"time\"]], \"year\")\n# Example 1: plot map, which reveals float trajectory.\nplot(argo, pch = as.integer(tc))\nyear <- substr(levels(tc), 1, 4)\ndata(topoWorld)\ncontour(topoWorld[[\"longitude\"]], topoWorld[[\"latitude\"]],\n topoWorld[[\"z\"]],\n add = TRUE\n)\nlegend(\"bottomleft\", pch = seq_along(year), legend = year, bg = \"white\", cex = 3 / 4)\n\n# Example 2: plot map, TS, T(z) and S(z). Note the use\n# of handleFlags(), to skip over questionable data.\nplot(handleFlags(argo), which = c(1, 4, 6, 5))\n\n\n\n"} {"package":"oce","topic":"plot,cm-method","snippet":"### Name: plot,cm-method\n### Title: Plot a cm Object\n### Aliases: plot,cm-method plot.cm\n\n### ** Examples\n\nlibrary(oce)\ndata(cm)\nsummary(cm)\nplot(cm)\n\n\n\n"} {"package":"oce","topic":"plot,coastline-method","snippet":"### Name: plot,coastline-method\n### Title: Plot a coastline Object\n### Aliases: plot,coastline-method plot.coastline\n\n### ** Examples\n\n## No test: \nlibrary(oce)\npar(mar = c(2, 2, 1, 1))\ndata(coastlineWorld)\nplot(coastlineWorld)\nplot(coastlineWorld, clongitude = -63.6, clatitude = 44.6, span = 1000)\n\n# Canada in Lambert projection\nplot(coastlineWorld,\n clongitude = -95, clatitude = 65, span = 5500,\n grid = 10, projection = \"+proj=laea +lon_0=-100 +lat_0=55\"\n)\n## End(No test)\n\n\n\n"} {"package":"oce","topic":"plot,ctd-method","snippet":"### Name: plot,ctd-method\n### Title: Plot a ctd Object\n### Aliases: plot,ctd-method plot.ctd\n\n### ** Examples\n\n# 1. simple plot\nlibrary(oce)\ndata(ctd)\nplot(ctd)\n\n# 2. how to customize depth contours\npar(mfrow = c(1, 2))\ndata(section)\nstn <- section[[\"station\", 105]]\nplot(stn, which = \"map\", drawIsobaths = TRUE)\nplot(stn, which = \"map\")\ndata(topoWorld)\ntlon <- topoWorld[[\"longitude\"]]\ntlat <- topoWorld[[\"latitude\"]]\ntdep <- -topoWorld[[\"z\"]]\ncontour(tlon, tlat, tdep,\n drawlabels = FALSE,\n levels = seq(1000, 6000, 1000), col = \"lightblue\", add = TRUE\n)\ncontour(tlon, tlat, tdep,\n vfont = c(\"sans serif\", \"bold\"),\n levels = stn[[\"waterDepth\"]], col = \"red\", lwd = 2, add = TRUE\n)\n\n\n\n"} {"package":"oce","topic":"plot,echosounder-method","snippet":"### Name: plot,echosounder-method\n### Title: Plot an echosounder Object\n### Aliases: plot,echosounder-method plot.echosounder\n\n### ** Examples\n\nlibrary(oce)\ndata(echosounder)\nplot(echosounder, drawBottom = TRUE)\n\n\n\n"} {"package":"oce","topic":"plot,lisst-method","snippet":"### Name: plot,lisst-method\n### Title: Plot a lisst Object\n### Aliases: plot,lisst-method plot.lisst\n\n### ** Examples\n\nlibrary(oce)\ndata(lisst)\nplot(lisst)\n\n\n\n"} {"package":"oce","topic":"plot,met-method","snippet":"### Name: plot,met-method\n### Title: Plot a met Object\n### Aliases: plot,met-method plot.met\n\n### ** Examples\n\nlibrary(oce)\ndata(met)\nplot(met, which = 3:4)\n\n# Wind speed and direction during Hurricane Juan\n# Compare with the final figure in a white paper by Chris Fogarty\n# (available at http://www.novaweather.net/Hurricane_Juan_files/McNabs_plot.pdf\n# downloaded 2017-01-02).\nlibrary(oce)\ndata(met)\nt0 <- as.POSIXct(\"2003-09-29 04:00:00\", tz = \"UTC\")\ndt <- 12 * 3600\njuan <- subset(met, t0 - dt <= time & time <= t0 + dt)\npar(mfrow = c(2, 1))\nplot(juan, which = 5)\nabline(v = t0)\nplot(juan, which = 6)\nabline(v = t0)\n\n\n\n"} {"package":"oce","topic":"plot,oce-method","snippet":"### Name: plot,oce-method\n### Title: Plot an oce Object\n### Aliases: plot,oce-method plot.oce\n\n### ** Examples\n\nlibrary(oce)\no <- new(\"oce\")\no <- oceSetData(o, \"x\", rnorm(10))\no <- oceSetData(o, \"y\", rnorm(10))\no <- oceSetData(o, \"z\", rnorm(10))\nplot(o)\n\n\n"} {"package":"oce","topic":"plot,rsk-method","snippet":"### Name: plot,rsk-method\n### Title: Plot a rsk Object\n### Aliases: plot,rsk-method plot.rsk\n\n### ** Examples\n\nlibrary(oce)\ndata(rsk)\n# 1. default timeseries plot of all data fields\nplot(rsk)\n# 2. plot in ctd format\nplot(as.ctd(rsk))\n\n\n\n"} {"package":"oce","topic":"plot,sealevel-method","snippet":"### Name: plot,sealevel-method\n### Title: Plot a sealevel Object\n### Aliases: plot,sealevel-method plot.sealevel\n\n### ** Examples\n\nlibrary(oce)\ndata(sealevel)\n# local Halifax time is UTC + 4h\njuan <- as.POSIXct(\"2003-09-29 00:15:00\", tz = \"UTC\") + 4 * 3600\nplot(sealevel, which = 1, xlim = juan + 86400 * c(-7, 7))\nabline(v = juan, col = \"red\")\n\n\n\n"} {"package":"oce","topic":"plot,section-method","snippet":"### Name: plot,section-method\n### Title: Plot a section Object\n### Aliases: plot,section-method plot.section\n\n### ** Examples\n\nlibrary(oce)\ndata(section)\nGS <- subset(section, 113 <= stationId & stationId <= 129)\nGSg <- sectionGrid(GS, p = seq(0, 2000, 100))\n\n# Gulf Stream, salinity and temperature contours\nplot(GSg, which = c(\"salinity\", \"temperature\"))\n\n# Gulf Stream, Temperature image\nplot(GSg,\n which = \"temperature\", ztype = \"image\",\n zbreaks = seq(0, 25, 2), zcol = oceColorsTemperature\n)\n\n\n\n"} {"package":"oce","topic":"plot,topo-method","snippet":"### Name: plot,topo-method\n### Title: Plot a topo Object\n### Aliases: plot,topo-method plot.topo\n\n### ** Examples\n\nlibrary(oce)\ndata(topoWorld)\nplot(topoWorld, clongitude = -60, clatitude = 45, span = 10000)\n\n\n\n"} {"package":"oce","topic":"plot,windrose-method","snippet":"### Name: plot,windrose-method\n### Title: Plot a windrose Object\n### Aliases: plot,windrose-method plot.windrose\n\n### ** Examples\n\nlibrary(oce)\nset.seed(1234)\ntheta <- seq(0, 360, 0.25)\nx <- 1 + cos(pi / 180 * theta) + rnorm(theta)\ny <- sin(pi / 180 * theta) + rnorm(theta)\nwr <- as.windrose(x, y)\nplot(wr)\nplot(wr, type = \"fivenum\")\n\n\n\n"} {"package":"oce","topic":"plot,xbt-method","snippet":"### Name: plot,xbt-method\n### Title: Plot an xbt Object\n### Aliases: plot,xbt-method plot.xbt\n\n### ** Examples\n\nlibrary(oce)\ndata(xbt)\nsummary(xbt)\nplot(xbt)\n\n\n\n"} {"package":"oce","topic":"plotInset","snippet":"### Name: plotInset\n### Title: Plot an Inset Diagram\n### Aliases: plotInset\n\n### ** Examples\n\nlibrary(oce)\n# power law in linear and log form\nx <- 1:10\ny <- x^2\nplot(x, y, log = \"xy\", type = \"l\")\nplotInset(3, 1, 10, 8,\n expr = plot(x, y, type = \"l\", cex.axis = 3 / 4, mgp = c(3 / 2, 1 / 2, 0)),\n mar = c(2.5, 2.5, 1, 1)\n)\n\n# CTD data with location\ndata(ctd)\nplot(ctd, which = \"TS\")\nplotInset(29.9, 2.7, 31, 10,\n expr = plot(ctd,\n which = \"map\",\n coastline = \"coastlineWorld\",\n span = 5000, mar = NULL, cex.axis = 3 / 4\n )\n)\n\n\n"} {"package":"oce","topic":"plotPolar","snippet":"### Name: plotPolar\n### Title: Draw a Polar Plot\n### Aliases: plotPolar\n\n### ** Examples\n\nlibrary(oce)\nr <- rnorm(50, mean = 2, sd = 0.1)\ntheta <- runif(50, 0, 360)\nplotPolar(r, theta)\n\n\n"} {"package":"oce","topic":"plotProfile","snippet":"### Name: plotProfile\n### Title: Plot a ctd Profile\n### Aliases: plotProfile\n\n### ** Examples\n\nlibrary(oce)\ndata(ctd)\nplotProfile(ctd, xtype = \"temperature\")\n\n\n\n"} {"package":"oce","topic":"plotScan","snippet":"### Name: plotScan\n### Title: Plot a ctd Object in a Low-Level Fashion\n### Aliases: plotScan\n\n### ** Examples\n\nlibrary(oce)\ndata(ctdRaw)\nplotScan(ctdRaw)\nabline(v = c(130, 350), col = \"red\") # useful for ctdTrim()\n\n\n\n"} {"package":"oce","topic":"plotSticks","snippet":"### Name: plotSticks\n### Title: Draw a Stick Plot\n### Aliases: plotSticks\n\n### ** Examples\n\nlibrary(oce)\n\n# Flow from a point source\nn <- 16\nx <- rep(0, n)\ny <- rep(0, n)\ntheta <- seq(0, 2 * pi, length.out = n)\nu <- sin(theta)\nv <- cos(theta)\nplotSticks(x, y, u, v, xlim = c(-2, 2), ylim = c(-2, 2))\nrm(n, x, y, theta, u, v)\n\n# Oceanographic example\ndata(met)\nt <- met[[\"time\"]]\nu <- met[[\"u\"]]\nv <- met[[\"v\"]]\np <- met[[\"pressure\"]]\noce.plot.ts(t, p)\nplotSticks(t, 99, u, v, yscale = 25, add = TRUE)\n\n\n"} {"package":"oce","topic":"plotTS","snippet":"### Name: plotTS\n### Title: Plot Temperature-Salinity Diagram\n### Aliases: plotTS\n\n### ** Examples\n\n# 1. ctd object\nlibrary(oce)\ndata(ctd)\nplotTS(ctd)\n\n# 2. section object (note the outlier!)\ndata(section)\nplotTS(section)\n\n# 3. argo object\ndata(argo)\nplotTS(handleFlags(argo))\n\n# 4. oxygen-based colormap\nmarOrig <- par(\"mar\") # so later plots with palettes have same margins\ncm <- colormap(section[[\"oxygen\"]])\ndrawPalette(colormap = cm, zlab = \"Oxygen\")\nplotTS(section, pch = 19, col = cm$zcol, mar = par(\"mar\")) # the mar adjusts for the palette\n\n# 5. waters near Gulf Stream, colour-coded for longitude.\nsec <- subset(section, abs(longitude + 71.6) < 1)\ncm <- colormap(sec[[\"longitude\", \"byStation\"]], col = oceColors9B)\npar(mar = c(3.3, 3.3, 1, 1.5))\ndrawPalette(colormap = cm, zlab = \"Longitude\")\nplotTS(sec, type = \"n\", xaxs = \"r\", mar = par(\"mar\"))\njnk <- mapply(\n function(s, col) {\n plotTS(s, type = \"o\", col = \"gray\", pt.bg = col, pch = 21, add = TRUE)\n },\n sec[[\"station\"]],\n col = cm$zcol\n)\n\n# 6. with added spiciness contours\ndata(ctd)\nplotTS(ctd, eos = \"gsw\") # MANDATORY so x=SA and y=CT\nusr <- par(\"usr\")\nn <- 100\nSAgrid <- seq(usr[1], usr[2], length.out = n)\nCTgrid <- seq(usr[3], usr[4], length.out = n)\ng <- expand.grid(SA = SAgrid, CT = CTgrid)\nspiciness <- matrix(gsw::gsw_spiciness0(g$SA, g$CT), nrow = n)\ncontour(SAgrid, CTgrid, spiciness, col = 2, labcex = 1, add = TRUE)\n\n\n\n"} {"package":"oce","topic":"plotTaylor","snippet":"### Name: plotTaylor\n### Title: Plot a Model-data Comparison Diagram\n### Aliases: plotTaylor\n\n### ** Examples\n\nlibrary(oce)\ndata(sealevel)\nx <- sealevel[[\"elevation\"]]\nM2 <- predict(tidem(sealevel, constituents = \"M2\"))\nS2 <- predict(tidem(sealevel, constituents = c(\"S2\")))\nplotTaylor(x, cbind(M2, S2))\n\n\n"} {"package":"oce","topic":"predict.tidem","snippet":"### Name: predict.tidem\n### Title: Predict a Tidal Signal\n### Aliases: predict.tidem\n\n### ** Examples\n\n\n# Show non-tidal sealevel signal in Halifax Harbour during\n# the year 2002. The spike resulted from Hurricane Juan.\nlibrary(oce)\ndata(sealevel)\ntime <- sealevel[[\"time\"]]\nelevation <- sealevel[[\"elevation\"]]\nprediction <- tidem(sealevel) |> predict()\noce.plot.ts(time, elevation - prediction)\n\n\n\n"} {"package":"oce","topic":"preferAdjusted","snippet":"### Name: preferAdjusted\n### Title: Set Preference for Adjusted Values\n### Aliases: preferAdjusted\n\n### ** Examples\n\nlibrary(oce)\ndata(argo)\nargoAdjusted <- preferAdjusted(argo)\nall.equal(argo[[\"salinityAdjusted\"]], argoAdjusted[[\"salinity\"]])\nall.equal(argo[[\"salinityFlagsAdjusted\"]], argoAdjusted[[\"salinityFlags\"]])\nall.equal(argo[[\"salinityUnitsAdjusted\"]], argoAdjusted[[\"salinityUnits\"]])\n\n\n\n"} {"package":"oce","topic":"presentTime","snippet":"### Name: presentTime\n### Title: Get the Present Time, in a Stated Timezone\n### Aliases: presentTime\n\n### ** Examples\n\npresentTime() # UTC\npresentTime(\"\") # the local timezone\n\n\n\n"} {"package":"oce","topic":"prettyPosition","snippet":"### Name: prettyPosition\n### Title: Pretty Longitude/Latitude in Degree-Minute-Second Format\n### Aliases: prettyPosition\n\n### ** Examples\n\nlibrary(oce)\nformatPosition(prettyPosition(10 + 1:10 / 60 + 2.8 / 3600))\n\n\n"} {"package":"oce","topic":"processingLog<-","snippet":"### Name: processingLog<-\n### Title: Add an Item to a Processing Log\n### Aliases: processingLog<-\n\n### ** Examples\n\ndata(ctd)\nprocessingLogShow(ctd)\nprocessingLog(ctd) <- \"test\"\nprocessingLogShow(ctd)\n\n\n\n"} {"package":"oce","topic":"pwelch","snippet":"### Name: pwelch\n### Title: Welch Periodogram\n### Aliases: pwelch\n\n### ** Examples\n\nlibrary(oce)\nFs <- 1000\nt <- seq(0, 0.296, 1 / Fs)\nx <- cos(2 * pi * t * 200) + rnorm(n = length(t))\nX <- ts(x, frequency = Fs)\ns <- spectrum(X, spans = c(3, 2), main = \"random + 200 Hz\", log = \"no\")\nw <- pwelch(X, plot = FALSE)\nlines(w$freq, w$spec, col = \"red\")\nw2 <- pwelch(X, nfft = 75, plot = FALSE)\nlines(w2$freq, w2$spec, col = \"green\")\nabline(v = 200, col = \"blue\", lty = \"dotted\")\ncat(\"Checking spectral levels with Parseval's theorem:\\n\")\ncat(\"var(x) = \", var(x), \"\\n\")\ncat(\"2 * sum(s$spec) * diff(s$freq[1:2]) = \", 2 * sum(s$spec) * diff(s$freq[1:2]), \"\\n\")\ncat(\"sum(w$spec) * diff(s$freq[1:2]) = \", sum(w$spec) * diff(w$freq[1:2]), \"\\n\")\ncat(\"sum(w2$spec) * diff(s$freq[1:2]) = \", sum(w2$spec) * diff(w2$freq[1:2]), \"\\n\")\n# co2\npar(mar = c(3, 3, 2, 1), mgp = c(2, 0.7, 0))\ns <- spectrum(co2, plot = FALSE)\nplot(log10(s$freq), s$spec * s$freq,\n xlab = expression(log[10] * Frequency), ylab = \"Power*Frequency\", type = \"l\"\n)\ntitle(\"Variance-preserving spectrum\")\npw <- pwelch(co2, nfft = 256, plot = FALSE)\nlines(log10(pw$freq), pw$spec * pw$freq, col = \"red\")\n\n\n\n"} {"package":"oce","topic":"rangeLimit","snippet":"### Name: rangeLimit\n### Title: Substitute NA for Data Outside a Range\n### Aliases: rangeLimit\n\n### ** Examples\n\n\nten.to.twenty <- rangeLimit(1:100, 10, 20)\n\n\n"} {"package":"oce","topic":"read.adp.ad2cp","snippet":"### Name: read.adp.ad2cp\n### Title: Read an adp File in Nortek AD2CP Format\n### Aliases: read.adp.ad2cp\n\n### ** Examples\n\nlibrary(oce)\n# You can run this within the oce directory, if you clone from github.\nfile <- \"tests/testthat/local_data/ad2cp/S102791A002_Barrow_v2.ad2cp\"\nif (file.exists(file)) {\n library(oce)\n d <- read.oce(file)\n}\n\n\n\n"} {"package":"oce","topic":"read.adp.rdi","snippet":"### Name: read.adp.rdi\n### Title: Read an adp File in Teledyne/RDI Format\n### Aliases: read.adp.rdi\n\n### ** Examples\n\nadp <- read.adp.rdi(system.file(\"extdata\", \"adp_rdi.000\", package = \"oce\"))\nsummary(adp)\n\n\n\n"} {"package":"oce","topic":"read.adv","snippet":"### Name: read.adv\n### Title: Read an adv File\n### Aliases: read.adv\n\n### ** Examples\n\n## Not run: \n##D library(oce)\n##D # A nortek Vector file\n##D d <- read.oce(\"/data/archive/sleiwex/2008/moorings/m05/adv/nortek_1943/raw/adv_nortek_1943.vec\",\n##D from=as.POSIXct(\"2008-06-26 00:00:00\", tz=\"UTC\"),\n##D to=as.POSIXct(\"2008-06-26 00:00:10\", tz=\"UTC\"))\n##D plot(d, which=c(1:3,15))\n## End(Not run)\n\n\n"} {"package":"oce","topic":"read.adv.nortek","snippet":"### Name: read.adv.nortek\n### Title: Read an adv File\n### Aliases: read.adv.nortek\n\n### ** Examples\n\n## Not run: \n##D library(oce)\n##D # A nortek Vector file\n##D d <- read.oce(\"/data/archive/sleiwex/2008/moorings/m05/adv/nortek_1943/raw/adv_nortek_1943.vec\",\n##D from=as.POSIXct(\"2008-06-26 00:00:00\", tz=\"UTC\"),\n##D to=as.POSIXct(\"2008-06-26 00:00:10\", tz=\"UTC\"))\n##D plot(d, which=c(1:3,15))\n## End(Not run)\n\n\n"} {"package":"oce","topic":"read.adv.sontek.adr","snippet":"### Name: read.adv.sontek.adr\n### Title: Read an adv File\n### Aliases: read.adv.sontek.adr\n\n### ** Examples\n\n## Not run: \n##D library(oce)\n##D # A nortek Vector file\n##D d <- read.oce(\"/data/archive/sleiwex/2008/moorings/m05/adv/nortek_1943/raw/adv_nortek_1943.vec\",\n##D from=as.POSIXct(\"2008-06-26 00:00:00\", tz=\"UTC\"),\n##D to=as.POSIXct(\"2008-06-26 00:00:10\", tz=\"UTC\"))\n##D plot(d, which=c(1:3,15))\n## End(Not run)\n\n\n"} {"package":"oce","topic":"read.adv.sontek.serial","snippet":"### Name: read.adv.sontek.serial\n### Title: Read an adv File\n### Aliases: read.adv.sontek.serial\n\n### ** Examples\n\n## Not run: \n##D library(oce)\n##D # A nortek Vector file\n##D d <- read.oce(\"/data/archive/sleiwex/2008/moorings/m05/adv/nortek_1943/raw/adv_nortek_1943.vec\",\n##D from=as.POSIXct(\"2008-06-26 00:00:00\", tz=\"UTC\"),\n##D to=as.POSIXct(\"2008-06-26 00:00:10\", tz=\"UTC\"))\n##D plot(d, which=c(1:3,15))\n## End(Not run)\n\n\n"} {"package":"oce","topic":"read.adv.sontek.text","snippet":"### Name: read.adv.sontek.text\n### Title: Read an adv File\n### Aliases: read.adv.sontek.text\n\n### ** Examples\n\n## Not run: \n##D library(oce)\n##D # A nortek Vector file\n##D d <- read.oce(\"/data/archive/sleiwex/2008/moorings/m05/adv/nortek_1943/raw/adv_nortek_1943.vec\",\n##D from=as.POSIXct(\"2008-06-26 00:00:00\", tz=\"UTC\"),\n##D to=as.POSIXct(\"2008-06-26 00:00:10\", tz=\"UTC\"))\n##D plot(d, which=c(1:3,15))\n## End(Not run)\n\n\n"} {"package":"oce","topic":"read.ctd.aml","snippet":"### Name: read.ctd.aml\n### Title: Read a ctd File in AML Format\n### Aliases: read.ctd.aml\n\n### ** Examples\n\nlibrary(oce)\nf <- system.file(\"extdata\", \"ctd_aml.csv.gz\", package = \"oce\")\nd <- read.ctd.aml(f)\nsummary(d)\n\n\n\n"} {"package":"oce","topic":"read.ctd.sbe","snippet":"### Name: read.ctd.sbe\n### Title: Read a ctd File in Seabird Format\n### Aliases: read.ctd.sbe\n\n### ** Examples\n\nf <- system.file(\"extdata\", \"ctd.cnv.gz\", package = \"oce\")\nd <- read.ctd(f)\n\n\n\n"} {"package":"oce","topic":"read.oce","snippet":"### Name: read.oce\n### Title: Read an Oceanographic Data File\n### Aliases: read.oce\n\n### ** Examples\n\nlibrary(oce)\nx <- read.oce(system.file(\"extdata\", \"ctd.cnv.gz\", package = \"oce\"))\nplot(x) # summary with TS and profiles\nplotTS(x) # just the TS\n\n\n"} {"package":"oce","topic":"read.odf","snippet":"### Name: read.odf\n### Title: Read an odf File\n### Aliases: read.odf\n\n### ** Examples\n\nlibrary(oce)\n#\n# 1. Read a CTD cast made on the Scotian Shelf. Note that the file's metadata\n# states that conductivity is in S/m, but it is really conductivity ratio,\n# so we must alter the unit before converting to a CTD object. Note that\n# read.odf() on this data file produces a warning suggesting that the user\n# repair the unit, using the method outlined here.\nodf <- read.odf(system.file(\"extdata\", \"CTD_BCD2014666_008_1_DN.ODF.gz\", package = \"oce\"))\nctd <- as.ctd(odf) # so we can e.g. extract potential temperature\nctd[[\"conductivityUnit\"]] <- list(unit = expression(), scale = \"\")\n#\n# 2. Make a CTD, and plot (with span to show NS)\nplot(ctd, span = 500)\n#\n# 3. Highlight bad data on TS diagram. (Note that the eos\n# is specified, because we will extract practical-salinity and\n# UNESCO-defined potential temperatures for the added points.)\nplotTS(ctd, type = \"o\", eos = \"unesco\") # use a line to show loops\nbad <- ctd[[\"QCFlag\"]] != 0\npoints(ctd[[\"salinity\"]][bad], ctd[[\"theta\"]][bad], col = \"red\", pch = 20)\n\n\n\n"} {"package":"oce","topic":"read.xbt","snippet":"### Name: read.xbt\n### Title: Read an xbt file\n### Aliases: read.xbt\n\n### ** Examples\n\nlibrary(oce)\nxbt <- read.oce(system.file(\"extdata\", \"xbt.edf\", package = \"oce\"))\nsummary(xbt)\nplot(xbt)\n\n\n\n"} {"package":"oce","topic":"read.xbt.edf","snippet":"### Name: read.xbt.edf\n### Title: Read an xbt File in Sippican Format\n### Aliases: read.xbt.edf\n\n### ** Examples\n\nlibrary(oce)\nxbt <- read.oce(system.file(\"extdata\", \"xbt.edf\", package = \"oce\"))\nsummary(xbt)\nplot(xbt)\n\n\n\n"} {"package":"oce","topic":"rescale","snippet":"### Name: rescale\n### Title: Rescale Values to lie in a Given Range\n### Aliases: rescale\n\n### ** Examples\n\nlibrary(oce)\n# Fake tow-yow data\nt <- seq(0, 600, 5)\nx <- 0.5 * t\nz <- 50 * (-1 + sin(2 * pi * t / 360))\nT <- 5 + 10 * exp(z / 100)\npalette <- oce.colorsViridis(100)\nzlim <- range(T)\ndrawPalette(zlim = zlim, col = palette)\nplot(x, z,\n type = \"p\", pch = 20, cex = 3,\n col = palette[rescale(T, xlow = zlim[1], xhigh = zlim[2], rlow = 1, rhigh = 100)]\n)\n\n\n"} {"package":"oce","topic":"retime","snippet":"### Name: retime\n### Title: Adjust The Time Within an oce Object\n### Aliases: retime\n\n### ** Examples\n\nlibrary(oce)\ndata(adv)\nadv2 <- retime(adv, 0, 1e-4, as.POSIXct(\"2008-07-01 00:00:00\", tz = \"UTC\"))\nplot(adv[[\"time\"]], adv2[[\"time\"]] - adv[[\"time\"]], type = \"l\")\n\n\n"} {"package":"oce","topic":"rotateAboutZ","snippet":"### Name: rotateAboutZ\n### Title: Rotate Velocity Components Within an oce Object\n### Aliases: rotateAboutZ\n\n### ** Examples\n\nlibrary(oce)\npar(mfcol = c(2, 3))\n# adp (acoustic Doppler profiler)\ndata(adp)\nplot(adp, which = \"uv\")\nmtext(\"adp\", side = 3, line = 0, adj = 1, cex = 0.7)\nadpRotated <- rotateAboutZ(adp, 30)\nplot(adpRotated, which = \"uv\")\nmtext(\"adp rotated 30 deg\", side = 3, line = 0, adj = 1, cex = 0.7)\n# adv (acoustic Doppler velocimeter)\ndata(adv)\nplot(adv, which = \"uv\")\nmtext(\"adv\", side = 3, line = 0, adj = 1, cex = 0.7)\nadvRotated <- rotateAboutZ(adv, 125)\nplot(advRotated, which = \"uv\")\nmtext(\"adv rotated 125 deg\", side = 3, line = 0, adj = 1, cex = 0.7)\n# cm (current meter)\ndata(cm)\nplot(cm, which = \"uv\")\nmtext(\"cm\", side = 3, line = 0, adj = 1, cex = 0.7)\ncmRotated <- rotateAboutZ(cm, 30)\nplot(cmRotated, which = \"uv\")\nmtext(\"cm rotated 30 deg\", side = 3, line = 0, adj = 1, cex = 0.7)\n\n\n\n"} {"package":"oce","topic":"rsk","snippet":"### Name: rsk\n### Title: Sample rsk Data\n### Aliases: rsk\n\n### ** Examples\n\nlibrary(oce)\ndata(rsk)\n# The object doesn't \"know\" it is CTD until told so\nplot(rsk)\nplot(as.ctd(rsk))\n\n\n\n"} {"package":"oce","topic":"rskPatm","snippet":"### Name: rskPatm\n### Title: Estimate Atmospheric Pressure in an rsk Object\n### Aliases: rskPatm\n\n### ** Examples\n\nlibrary(oce)\ndata(rsk)\nprint(rskPatm(rsk))\n\n\n\n"} {"package":"oce","topic":"runlm","snippet":"### Name: runlm\n### Title: Calculate Running Linear Models\n### Aliases: runlm\n\n### ** Examples\n\n\nlibrary(oce)\n\n# Case 1: smooth a noisy signal\nx <- 1:100\ny <- 1 + x / 100 + sin(x / 5)\nyn <- y + rnorm(100, sd = 0.1)\nL <- 4\ncalc <- runlm(x, y, L = L)\nplot(x, y, type = \"l\", lwd = 7, col = \"gray\")\npoints(x, yn, pch = 20, col = \"blue\")\nlines(x, calc$y, lwd = 2, col = \"red\")\n\n# Case 2: square of buoyancy frequency\ndata(ctd)\npar(mfrow = c(1, 1))\nplot(ctd, which = \"N2\")\nrho <- swRho(ctd)\nz <- swZ(ctd)\nzz <- seq(min(z), max(z), 0.1)\nN2 <- -9.8 / mean(rho) * runlm(z, rho, zz, deriv = 1)\nlines(N2, -zz, col = \"red\")\nlegend(\"bottomright\",\n lwd = 2, bg = \"white\",\n col = c(\"black\", \"red\"),\n legend = c(\"swN2()\", \"using runlm()\")\n)\n\n\n"} {"package":"oce","topic":"secondsToCtime","snippet":"### Name: secondsToCtime\n### Title: Express Time Interval as Colon-Separated String\n### Aliases: secondsToCtime\n\n### ** Examples\n\nlibrary(oce)\ncat(\" 10 s = \", secondsToCtime(10), \"\\n\", sep = \"\")\ncat(\" 61 s = \", secondsToCtime(61), \"\\n\", sep = \"\")\ncat(\"86400 s = \", secondsToCtime(86400), \"\\n\", sep = \"\")\n\n\n"} {"package":"oce","topic":"section-class","snippet":"### Name: section-class\n### Title: Class to Store Hydrographic Section Data\n### Aliases: section-class\n\n### ** Examples\n\nlibrary(oce)\ndata(section)\nplot(section[[\"station\", 1]])\npairs(cbind(z = -section[[\"pressure\"]], T = section[[\"temperature\"]], S = section[[\"salinity\"]]))\n# T profiles for first few stations in section, at common scale\npar(mfrow = c(3, 3))\nTlim <- range(section[[\"temperature\"]])\nylim <- rev(range(section[[\"pressure\"]]))\nfor (stn in section[[\"station\", 1:9]]) {\n plotProfile(stn, xtype = \"potential temperature\", ylim = ylim, Tlim = Tlim)\n}\n\n\n\n"} {"package":"oce","topic":"section","snippet":"### Name: section\n### Title: Sample section Data\n### Aliases: section\n\n### ** Examples\n\nlibrary(oce)\n# Gulf Stream\ndata(section)\nGS <- subset(section, 113 <= stationId & stationId <= 129)\nGSg <- sectionGrid(GS, p = seq(0, 5000, 100))\nplot(GSg, span = 1500) # increase span to show more coastline\n\n\n\n"} {"package":"oce","topic":"sectionAddStation","snippet":"### Name: sectionAddStation\n### Title: Add a ctd Profile to a section Object\n### Aliases: sectionAddStation sectionAddCtd\n\n### ** Examples\n\nlibrary(oce)\ndata(ctd)\nctd2 <- ctd\nctd2[[\"temperature\"]] <- ctd2[[\"temperature\"]] + 0.5\nctd2[[\"latitude\"]] <- ctd2[[\"latitude\"]] + 0.1\nsection <- as.section(c(\"ctd\", \"ctd2\"))\nctd3 <- ctd\nctd3[[\"temperature\"]] <- ctd[[\"temperature\"]] + 1\nctd3[[\"latitude\"]] <- ctd[[\"latitude\"]] + 0.1\nctd3[[\"station\"]] <- \"Stn 3\"\nsectionAddStation(section, ctd3)\n\n\n\n"} {"package":"oce","topic":"sectionGrid","snippet":"### Name: sectionGrid\n### Title: Grid a Section in Pressure Space\n### Aliases: sectionGrid\n\n### ** Examples\n\n# Gulf Stream\nlibrary(oce)\ndata(section)\nGS <- subset(section, 113 <= stationId & stationId <= 129)\nGSg <- sectionGrid(GS, p = seq(0, 5000, 100))\nplot(GSg, which = \"temperature\")\n## Show effects of various depth schemes\n\n\n\n"} {"package":"oce","topic":"sectionSmooth","snippet":"### Name: sectionSmooth\n### Title: Smooth a Section\n### Aliases: sectionSmooth\n\n### ** Examples\n\n# Unsmoothed (Gulf Stream)\nlibrary(oce)\ndata(section)\ngs <- subset(section, 115 <= stationId & stationId <= 125)\npar(mfrow = c(2, 2))\n\nplot(gs, which = \"temperature\")\nmtext(\"Original data, without smoothing\", line = 0.5)\n\n# Spline\ngsg <- sectionGrid(gs, p = seq(0, 5000, 100))\ngsSpline <- sectionSmooth(gsg, \"spline\")\nplot(gsSpline, which = \"temperature\")\nmtext(\"sectionSmooth(..., method=\\\"spline\\\")\", line = 0.5)\n\n# Barnes\ngsBarnes <- sectionSmooth(gs, \"barnes\", xr = 50, yr = 200)\nplot(gsBarnes, which = \"temperature\")\nmtext(\"sectionSmooth(..., method=\\\"barnes\\\")\", line = 0.5)\n\n\n\n"} {"package":"oce","topic":"sectionSort","snippet":"### Name: sectionSort\n### Title: Sort a Section\n### Aliases: sectionSort\n\n### ** Examples\n\nlibrary(oce)\ndata(section)\nsectionByLongitude <- sectionSort(section, by = \"longitude\")\nhead(section)\nhead(sectionByLongitude)\n\n\n\n"} {"package":"oce","topic":"setFlags,adp-method","snippet":"### Name: setFlags,adp-method\n### Title: Set Data-Quality Flags within a adp Object\n### Aliases: setFlags,adp-method\n\n### ** Examples\n\nlibrary(oce)\ndata(adp)\n\n# Example 1: flag first 10 samples in a mid-depth bin of beam 1\ni1 <- data.frame(1:20, 40, 1)\nadpQC <- initializeFlags(adp, \"v\", 2)\nadpQC <- setFlags(adpQC, \"v\", i1, 3)\nadpClean1 <- handleFlags(adpQC, flags = list(3), actions = list(\"NA\"))\npar(mfrow = c(2, 1))\n# Top: original, bottom: altered\nplot(adp, which = \"u1\")\nplot(adpClean1, which = \"u1\")\n\n# Example 2: percent-good and error-beam scheme\nv <- adp[[\"v\"]]\ni2 <- array(FALSE, dim = dim(v))\ng <- adp[[\"g\", \"numeric\"]]\n# Thresholds on percent \"goodness\" and error \"velocity\"\nG <- 25\nV4 <- 0.45\nfor (k in 1:3) {\n i2[, , k] <- ((g[, , k] + g[, , 4]) < G) | (v[, , 4] > V4)\n}\nadpQC2 <- initializeFlags(adp, \"v\", 2)\nadpQC2 <- setFlags(adpQC2, \"v\", i2, 3)\nadpClean2 <- handleFlags(adpQC2, flags = list(3), actions = list(\"NA\"))\n# Top: original, bottom: altered\nplot(adp, which = \"u1\")\nplot(adpClean2, which = \"u1\") # differs at 8h and 20h\n\n\n\n"} {"package":"oce","topic":"setFlags,ctd-method","snippet":"### Name: setFlags,ctd-method\n### Title: Set Data-Quality Flags within a ctd Object\n### Aliases: setFlags,ctd-method\n\n### ** Examples\n\nlibrary(oce)\n# Example 1: Range-check salinity\ndata(ctdRaw)\n# Salinity and temperature range checks\nqc <- ctdRaw\n# Initialize flags to 2, meaning good data in the default\n# scheme for handleFlags(ctd).\nqc <- initializeFlags(qc, \"salinity\", 2)\nqc <- initializeFlags(qc, \"temperature\", 2)\n# Flag bad salinities as 4\noddS <- with(qc[[\"data\"]], salinity < 25 | 40 < salinity)\nqc <- setFlags(qc, name = \"salinity\", i = oddS, value = 4)\n# Flag bad temperatures as 4\noddT <- with(qc[[\"data\"]], temperature < -2 | 40 < temperature)\nqc <- setFlags(qc, name = \"temperature\", i = oddT, value = 4)\n# Compare results in TS space\npar(mfrow = c(2, 1))\nplotTS(ctdRaw)\nplotTS(handleFlags(qc, flags = c(1, 3:9)))\n\n\n\n"} {"package":"oce","topic":"showMetadataItem","snippet":"### Name: showMetadataItem\n### Title: Show an Item in the metadata Slot of an oce Object\n### Aliases: showMetadataItem\n\n### ** Examples\n\nlibrary(oce)\ndata(ctd)\nshowMetadataItem(ctd, \"ship\", \"ship\")\n\n\n"} {"package":"oce","topic":"siderealTime","snippet":"### Name: siderealTime\n### Title: Convert From POSIXt Time to Sidereal Time\n### Aliases: siderealTime\n\n### ** Examples\n\n\nt <- ISOdatetime(1978, 11, 13, 0, 0, 0, tz = \"UTC\")\nprint(siderealTime(t))\n\n\n\n"} {"package":"oce","topic":"snakeToCamel","snippet":"### Name: snakeToCamel\n### Title: Convert From Snake-Case to Camel-Case Notation\n### Aliases: snakeToCamel\n\n### ** Examples\n\nlibrary(oce)\nsnakeToCamel(\"PARAMETER_DATA_MODE\") # \"parameterDataMode\"\nsnakeToCamel(\"PARAMETER\") # \"parameter\"\nsnakeToCamel(\"HISTORY_QCTEST\") # \"historyQctest\"\nsnakeToCamel(\"HISTORY_QCTEST\", \"QC\") # \"historyQCTest\"\nsnakeToCamel(\"PROFILE_DOXY_QC\") # \"profileDoxyQc\"\nsnakeToCamel(\"PROFILE_DOXY_QC\", \"QC\") # \"profileDoxyQC\"\n\n\n"} {"package":"oce","topic":"standardDepths","snippet":"### Name: standardDepths\n### Title: Standard Oceanographic Depths\n### Aliases: standardDepths\n\n### ** Examples\n\ndepth <- standardDepths()\ndepth1 <- standardDepths(1)\nplot(depth, depth)\npoints(depth1, depth1, col = 2, pch = 20, cex = 1 / 2)\n\n\n\n"} {"package":"oce","topic":"[[,adp-method","snippet":"### Name: [[,adp-method\n### Title: Extract Something From an adp Object\n### Aliases: [[,adp-method\n\n### ** Examples\n\ndata(adp)\n# Tests for beam 1, distance bin 1, first 5 observation times\nadp[[\"v\"]][1:5, 1, 1]\nadp[[\"a\"]][1:5, 1, 1]\nadp[[\"a\", \"numeric\"]][1:5, 1, 1]\nas.numeric(adp[[\"a\"]][1:5, 1, 1]) # same as above\n\n\n\n"} {"package":"oce","topic":"[[,adv-method","snippet":"### Name: [[,adv-method\n### Title: Extract Something from an adv Object\n### Aliases: [[,adv-method\n\n### ** Examples\n\ndata(adv)\nhead(adv[[\"q\"]]) # in raw form\nhead(adv[[\"q\", \"numeric\"]]) # in numeric form\n\n\n\n"} {"package":"oce","topic":"[[,amsr-method","snippet":"### Name: [[,amsr-method\n### Title: Extract Something From an amsr Object\n### Aliases: [[,amsr-method\n\n### ** Examples\n\n# Histogram of SST values (for an old-format dataset)\nlibrary(oce)\ndata(amsr)\nhist(amsr[[\"SST\"]])\n\n\n\n"} {"package":"oce","topic":"[[,argo-method","snippet":"### Name: [[,argo-method\n### Title: Extract Something From an argo Object\n### Aliases: [[,argo-method\n\n### ** Examples\n\ndata(argo)\n# 1. show that dataset has 223 profiles, each with 56 levels\ndim(argo[[\"temperature\"]])\n\n# 2. show importance of focussing on data flagged 'good'\nfivenum(argo[[\"salinity\"]], na.rm = TRUE)\nfivenum(argo[[\"salinity\"]][argo[[\"salinityFlag\"]] == 1], na.rm = TRUE)\n\n\n\n"} {"package":"oce","topic":"[[,ctd-method","snippet":"### Name: [[,ctd-method\n### Title: Extract Something From a ctd Object\n### Aliases: [[,ctd-method\n\n### ** Examples\n\ndata(ctd)\nhead(ctd[[\"temperature\"]])\n\n\n\n"} {"package":"oce","topic":"[[,section-method","snippet":"### Name: [[,section-method\n### Title: Extract Something From a section Object\n### Aliases: [[,section-method\n\n### ** Examples\n\ndata(section)\nlength(section[[\"latitude\"]])\nlength(section[[\"latitude\", \"byStation\"]])\n# Vector of all salinities, for all stations\nSv <- section[[\"salinity\"]]\n# List of salinities, grouped by station\nSl <- section[[\"salinity\", \"byStation\"]]\n# First station salinities\nSl[[1]]\n\n\n\n"} {"package":"oce","topic":"[[,topo-method","snippet":"### Name: [[,topo-method\n### Title: Extract Something From a topo Object\n### Aliases: [[,topo-method\n\n### ** Examples\n\ndata(topoWorld)\ndim(topoWorld[[\"z\"]])\n\n\n\n"} {"package":"oce","topic":"[[<-,ctd-method","snippet":"### Name: [[<-,ctd-method\n### Title: Replace Parts of a ctd Object\n### Aliases: [[<-,ctd-method\n\n### ** Examples\n\ndata(ctd)\nsummary(ctd)\n# Move the CTD profile a nautical mile north.\nctd[[\"latitude\"]] <- 1 / 60 + ctd[[\"latitude\"]] # acts in metadata\n# Increase the salinity by 0.01.\nctd[[\"salinity\"]] <- 0.01 + ctd[[\"salinity\"]] # acts in data\nsummary(ctd)\n\n\n\n"} {"package":"oce","topic":"[[<-,section-method","snippet":"### Name: [[<-,section-method\n### Title: Replace Parts of a section Object\n### Aliases: [[<-,section-method\n\n### ** Examples\n\n# 1. Change section ID from a03 to A03\ndata(section)\nsection[[\"sectionId\"]]\nsection[[\"sectionId\"]] <- toupper(section[[\"sectionId\"]])\nsection[[\"sectionId\"]]\n# 2. Add a millidegree to temperatures at station 10\nsection[[\"station\", 10]][[\"temperature\"]] <-\n 1e-3 + section[[\"station\", 10]][[\"temperature\"]]\n\n\n\n"} {"package":"oce","topic":"subset,adp-method","snippet":"### Name: subset,adp-method\n### Title: Subset an adp Object\n### Aliases: subset,adp-method\n\n### ** Examples\n\nlibrary(oce)\ndata(adp)\n# 1. Look at first part of time series, organized by time\nearlyTime <- subset(adp, time < mean(range(adp[[\"time\"]])))\nplot(earlyTime)\n\n# 2. Look at first ten ensembles (AKA profiles)\nen <- adp[[\"ensembleNumber\"]]\nfirstTen <- subset(adp, ensembleNumber < en[11])\nplot(firstTen)\n\n\n\n"} {"package":"oce","topic":"subset,adv-method","snippet":"### Name: subset,adv-method\n### Title: Subset an adv Object\n### Aliases: subset,adv-method\n\n### ** Examples\n\nlibrary(oce)\ndata(adv)\nplot(adv)\nplot(subset(adv, time < mean(range(adv[[\"time\"]]))))\n\n\n\n"} {"package":"oce","topic":"subset,amsr-method","snippet":"### Name: subset,amsr-method\n### Title: Subset an amsr Object\n### Aliases: subset,amsr-method\n\n### ** Examples\n\nlibrary(oce)\ndata(amsr) # see ?amsr for how to read and composite such objects\nsub <- subset(amsr, -75 < longitude & longitude < -45)\nsub <- subset(sub, 40 < latitude & latitude < 50)\nplot(sub)\ndata(coastlineWorld)\nlines(coastlineWorld[[\"longitude\"]], coastlineWorld[[\"latitude\"]])\n\n\n\n"} {"package":"oce","topic":"subset,argo-method","snippet":"### Name: subset,argo-method\n### Title: Subset an argo Object\n### Aliases: subset,argo-method subset.argo\n\n### ** Examples\n\nlibrary(oce)\ndata(argo)\n\n# Example 1: subset by time, longitude, and pressure\npar(mfrow = c(2, 2))\nplot(argo)\nplot(subset(argo, time > mean(time)))\nplot(subset(argo, longitude > mean(longitude)))\nplot(subset(argoGrid(argo), pressure > 500 & pressure < 1000), which = 5)\n\n\n\n"} {"package":"oce","topic":"subset,cm-method","snippet":"### Name: subset,cm-method\n### Title: Subset a cm Object\n### Aliases: subset,cm-method\n\n### ** Examples\n\nlibrary(oce)\ndata(cm)\nplot(cm)\nplot(subset(cm, time < mean(range(cm[[\"time\"]]))))\n\n\n\n"} {"package":"oce","topic":"subset,coastline-method","snippet":"### Name: subset,coastline-method\n### Title: Subset a coastline Object\n### Aliases: subset,coastline-method subset.coastline\n\n### ** Examples\n\nlibrary(oce)\ndata(coastlineWorld)\n# Subset to a box centred on Nova Scotia, Canada\nif (requireNamespace(\"sf\")) {\n cl <- subset(coastlineWorld, -80 < lon & lon <- 50 & 30 < lat & lat < 60)\n # The plot demonstrates that the trimming is as requested.\n plot(cl, clon = -65, clat = 45, span = 6000)\n rect(-80, 30, -50, 60, bg = \"transparent\", border = \"red\")\n}\n\n\n"} {"package":"oce","topic":"subset,ctd-method","snippet":"### Name: subset,ctd-method\n### Title: Subset a ctd Object\n### Aliases: subset,ctd-method\n\n### ** Examples\n\nlibrary(oce)\ndata(ctd)\nplot(ctd)\n# Example 1\nplot(subset(ctd, pressure < 10))\n# Example 2\nplot(subset(ctd, indices = 1:10))\n\n\n\n"} {"package":"oce","topic":"subset,echosounder-method","snippet":"### Name: subset,echosounder-method\n### Title: Subset an echosounder Object\n### Aliases: subset,echosounder-method\n\n### ** Examples\n\nlibrary(oce)\ndata(echosounder)\nplot(echosounder)\nplot(subset(echosounder, depth < 10))\nplot(subset(echosounder, time < mean(range(echosounder[[\"time\"]]))))\n\n\n\n"} {"package":"oce","topic":"subset,met-method","snippet":"### Name: subset,met-method\n### Title: Subset a met Object\n### Aliases: subset,met-method\n\n### ** Examples\n\nlibrary(oce)\ndata(met)\n# Few days surrounding Hurricane Juan\nplot(subset(met, time > as.POSIXct(\"2003-09-27\", tz = \"UTC\")))\n\n\n\n"} {"package":"oce","topic":"subset,oce-method","snippet":"### Name: subset,oce-method\n### Title: Subset an oce Object\n### Aliases: subset,oce-method\n\n### ** Examples\n\nlibrary(oce)\ndata(ctd)\n# Select just the top 10 metres (pressure less than 10 dbar)\ntop10 <- subset(ctd, pressure < 10)\npar(mfrow = c(1, 2))\nplotProfile(ctd)\nplotProfile(top10)\n\n\n"} {"package":"oce","topic":"subset,rsk-method","snippet":"### Name: subset,rsk-method\n### Title: Subset a rsk Object\n### Aliases: subset,rsk-method\n\n### ** Examples\n\nlibrary(oce)\ndata(rsk)\nplot(rsk)\nplot(subset(rsk, time < mean(range(rsk[[\"time\"]]))))\n\n\n\n"} {"package":"oce","topic":"subset,sealevel-method","snippet":"### Name: subset,sealevel-method\n### Title: Subset a sealevel Object\n### Aliases: subset,sealevel-method\n\n### ** Examples\n\nlibrary(oce)\ndata(sealevel)\nplot(sealevel)\nplot(subset(sealevel, time < mean(range(sealevel[[\"time\"]]))))\n\n\n\n"} {"package":"oce","topic":"subset,section-method","snippet":"### Name: subset,section-method\n### Title: Subset a section Object\n### Aliases: subset,section-method subset.section\n\n### ** Examples\n\nlibrary(oce)\ndata(section)\n\n# Example 1. Stations within 500 km of the first station\nstarting <- subset(section, distance < 500)\n\n# Example 2. Stations east of 50W\neast <- subset(section, longitude > (-50))\n\n# Example 3. Gulf Stream\nGS <- subset(section, 113 <= stationId & stationId <= 129)\n\n# Example 4. Only stations with more than 5 pressure levels\nlong <- subset(section, length(pressure) > 5)\n\n# Example 5. Only stations that have some data in top 50 dbar\nsurfacing <- subset(section, min(pressure) < 50)\n\n# Example 6. Similar to #4, but done in more detailed way\nlong <- subset(section,\n indices = unlist(lapply(\n section[[\"station\"]],\n function(s) 5 < length(s[[\"pressure\"]])\n ))\n)\n\n\n\n"} {"package":"oce","topic":"subset,topo-method","snippet":"### Name: subset,topo-method\n### Title: Subset a topo Object\n### Aliases: subset,topo-method\n\n### ** Examples\n\n# northern hemisphere\nlibrary(oce)\ndata(topoWorld)\nplot(subset(topoWorld, latitude > 0))\n\n\n\n"} {"package":"oce","topic":"subset,xbt-method","snippet":"### Name: subset,xbt-method\n### Title: Subset an xbt Object\n### Aliases: subset,xbt-method\n\n### ** Examples\n\nlibrary(oce)\ndata(xbt)\nplot(xbt)\nplot(subset(xbt, depth < mean(range(xbt[[\"depth\"]]))))\n\n\n\n"} {"package":"oce","topic":"summary,adv-method","snippet":"### Name: summary,adv-method\n### Title: Summarize an adv Object\n### Aliases: summary,adv-method\n\n### ** Examples\n\nlibrary(oce)\ndata(adv)\nsummary(adv)\n\n\n\n"} {"package":"oce","topic":"summary,argo-method","snippet":"### Name: summary,argo-method\n### Title: Summarize an argo Object\n### Aliases: summary,argo-method summary.argo\n\n### ** Examples\n\nlibrary(oce)\ndata(argo)\nsummary(argo)\n\n\n\n"} {"package":"oce","topic":"summary,cm-method","snippet":"### Name: summary,cm-method\n### Title: Summarize a cm Object\n### Aliases: summary,cm-method\n\n### ** Examples\n\nlibrary(oce)\ndata(cm)\nsummary(cm)\n\n\n\n"} {"package":"oce","topic":"summary,ctd-method","snippet":"### Name: summary,ctd-method\n### Title: Summarize a ctd Object\n### Aliases: summary,ctd-method summary.ctd\n\n### ** Examples\n\nlibrary(oce)\ndata(ctd)\nsummary(ctd)\n\n\n\n"} {"package":"oce","topic":"summary,lisst-method","snippet":"### Name: summary,lisst-method\n### Title: Summarize a lisst Object\n### Aliases: summary,lisst-method\n\n### ** Examples\n\nlibrary(oce)\ndata(lisst)\nsummary(lisst)\n\n\n\n"} {"package":"oce","topic":"summary,lobo-method","snippet":"### Name: summary,lobo-method\n### Title: Summarize a lobo Object\n### Aliases: summary,lobo-method\n\n### ** Examples\n\n\nlibrary(oce)\ndata(lobo)\nsummary(lobo)\n\n\n"} {"package":"oce","topic":"summary,oce-method","snippet":"### Name: summary,oce-method\n### Title: Summarize an oce Object\n### Aliases: summary,oce-method\n\n### ** Examples\n\no <- new(\"oce\")\nsummary(o)\n\n\n"} {"package":"oce","topic":"summary,rsk-method","snippet":"### Name: summary,rsk-method\n### Title: Summarize a rsk Object\n### Aliases: summary,rsk-method\n\n### ** Examples\n\nlibrary(oce)\ndata(rsk)\nsummary(rsk)\n\n\n\n"} {"package":"oce","topic":"summary,sealevel-method","snippet":"### Name: summary,sealevel-method\n### Title: Summarize a sealevel Object\n### Aliases: summary,sealevel-method\n\n### ** Examples\n\nlibrary(oce)\ndata(sealevel)\nsummary(sealevel)\n\n\n\n"} {"package":"oce","topic":"summary,section-method","snippet":"### Name: summary,section-method\n### Title: Summarize a section Object\n### Aliases: summary,section-method summary.section\n\n### ** Examples\n\nlibrary(oce)\ndata(section)\nsummary(section)\n\n\n\n"} {"package":"oce","topic":"summary,topo-method","snippet":"### Name: summary,topo-method\n### Title: Summarize a topo Object\n### Aliases: summary,topo-method\n\n### ** Examples\n\nlibrary(oce)\ndata(topoWorld)\nsummary(topoWorld)\n\n\n\n"} {"package":"oce","topic":"sunAngle","snippet":"### Name: sunAngle\n### Title: Solar Angle as Function of Space and Time\n### Aliases: sunAngle\n\n### ** Examples\n\n\nrise <- as.POSIXct(\"2011-03-03 06:49:00\", tz = \"UTC\") + 4 * 3600\nset <- as.POSIXct(\"2011-03-03 18:04:00\", tz = \"UTC\") + 4 * 3600\nmismatch <- function(lonlat) {\n sunAngle(rise, lonlat[1], lonlat[2])$altitude^2 + sunAngle(set, lonlat[1], lonlat[2])$altitude^2\n}\nresult <- optim(c(1, 1), mismatch)\nlonHfx <- (-63.55274)\nlatHfx <- 44.65\ndist <- geodDist(result$par[1], result$par[2], lonHfx, latHfx)\ncat(sprintf(\n \"Infer Halifax latitude %.2f and longitude %.2f; distance mismatch %.0f km\",\n result$par[2], result$par[1], dist\n))\n\n\n\n"} {"package":"oce","topic":"sunDeclinationRightAscension","snippet":"### Name: sunDeclinationRightAscension\n### Title: Sun Declination and Right Ascension\n### Aliases: sunDeclinationRightAscension\n\n### ** Examples\n\n# Example 24.a in Meeus (1991) (page 158 PDF, 153 print)\ntime <- as.POSIXct(\"1992-10-13 00:00:00\", tz = \"UTC\")\na <- sunDeclinationRightAscension(time, apparent = TRUE)\nstopifnot(abs(a$declination - (-7.78507)) < 0.00004)\nstopifnot(abs(a$rightAscension - (-161.61919)) < 0.00003)\nb <- sunDeclinationRightAscension(time)\n# check against previous results, to protect aginst code-drift errors\nstopifnot(abs(b$declination - (-7.785464443)) < 0.000000001)\nstopifnot(abs(b$rightAscension - (-161.6183305)) < 0.0000001)\n\n\n\n"} {"package":"oce","topic":"swAbsoluteSalinity","snippet":"### Name: swAbsoluteSalinity\n### Title: Seawater Absolute Salinity (GSW Formulation)\n### Aliases: swAbsoluteSalinity\n\n### ** Examples\n\nswAbsoluteSalinity(35.5, 300, 260, 16) # 35.67136\n\n\n\n"} {"package":"oce","topic":"swAlphaOverBeta","snippet":"### Name: swAlphaOverBeta\n### Title: Ratio of Seawater Thermal Expansion Coefficient to Haline\n### Contraction Coefficient\n### Aliases: swAlphaOverBeta\n\n### ** Examples\n\nswAlphaOverBeta(40, 10, 4000, eos = \"unesco\") # 0.3476\n\n\n\n"} {"package":"oce","topic":"swCSTp","snippet":"### Name: swCSTp\n### Title: Electrical Conductivity Ratio From Salinity, Temperature and\n### Pressure\n### Aliases: swCSTp\n\n### ** Examples\n\nstopifnot(abs(1.0 - swCSTp(35, T90fromT68(15), 0, eos = \"unesco\")) < 1e-7)\nstopifnot(abs(1.0 - swCSTp(34.25045, T90fromT68(15), 2000, eos = \"unesco\")) < 1e-7)\nstopifnot(abs(1.0 - swCSTp(34.25045, T90fromT68(15), 2000, eos = \"gsw\")) < 1e-7)\n\n\n\n"} {"package":"oce","topic":"swConservativeTemperature","snippet":"### Name: swConservativeTemperature\n### Title: Seawater Conservative Temperature (GSW Formulation)\n### Aliases: swConservativeTemperature\n\n### ** Examples\n\nswConservativeTemperature(35, 10, 1000, 188, 4) # 9.86883\n\n\n\n"} {"package":"oce","topic":"swDepth","snippet":"### Name: swDepth\n### Title: Water Depth\n### Aliases: swDepth\n\n### ** Examples\n\nd <- swDepth(10, 45)\n\n\n\n"} {"package":"oce","topic":"swLapseRate","snippet":"### Name: swLapseRate\n### Title: Seawater Lapse Rate\n### Aliases: swLapseRate\n\n### ** Examples\n\nlr <- swLapseRate(40, 40, 10000) # 3.255976e-4\n\n\n\n"} {"package":"oce","topic":"swN2","snippet":"### Name: swN2\n### Title: Squared Buoyancy Frequency for Seawater\n### Aliases: swN2\n\n### ** Examples\n\n\nlibrary(oce)\ndata(ctd)\n# Left panel: density\np <- ctd[[\"pressure\"]]\nylim <- rev(range(p))\npar(mfrow = c(1, 2), mar = c(3, 3, 1, 1), mgp = c(2, 0.7, 0))\nplot(ctd[[\"sigmaTheta\"]], p, ylim = ylim, type = \"l\", xlab = expression(sigma[theta]))\n# Right panel: N2, with default settings (black) and with df=2 (red)\nN2 <- swN2(ctd)\nplot(N2, p, ylim = ylim, xlab = \"N2 [1/s^2]\", ylab = \"p\", type = \"l\")\nlines(swN2(ctd, df = 3), p, col = 2)\n\n\n\n"} {"package":"oce","topic":"swPressure","snippet":"### Name: swPressure\n### Title: Water Pressure\n### Aliases: swPressure\n\n### ** Examples\n\nswPressure(9712.653, 30, eos = \"unesco\") # 10000\nswPressure(9712.653, 30, eos = \"gsw\") # 9998.863\n\n\n\n"} {"package":"oce","topic":"swRho","snippet":"### Name: swRho\n### Title: Seawater Density\n### Aliases: swRho\n\n### ** Examples\n\nlibrary(oce)\n# The numbers in the comments are the check values listed in reference 1;\n# note that temperature in that reference was on the T68 scale, but that\n# the present function works with the ITS-90 scale, so a conversion\n# is required.\nswRho(35, T90fromT68(5), 0, eos = \"unesco\") # 1027.67547\nswRho(35, T90fromT68(5), 10000, eos = \"unesco\") # 1069.48914\nswRho(35, T90fromT68(25), 0, eos = \"unesco\") # 1023.34306\nswRho(35, T90fromT68(25), 10000, eos = \"unesco\") # 1062.53817\n\n\n\n"} {"package":"oce","topic":"swRrho","snippet":"### Name: swRrho\n### Title: Density Ratio\n### Aliases: swRrho\n\n### ** Examples\n\nlibrary(oce)\ndata(ctd)\nu <- swRrho(ctd, eos = \"unesco\")\ng <- swRrho(ctd, eos = \"gsw\")\np <- ctd[[\"p\"]]\nplot(u, p, ylim = rev(range(p)), type = \"l\", xlab = expression(R[rho]))\nlines(g, p, lty = 2, col = \"red\")\nlegend(\"topright\", lty = 1:2, legend = c(\"unesco\", \"gsw\"), col = c(\"black\", \"red\"))\n\n\n\n"} {"package":"oce","topic":"swSCTp","snippet":"### Name: swSCTp\n### Title: Practical Salinity From Electrical Conductivity, Temperature and\n### Pressure\n### Aliases: swSCTp\n\n### ** Examples\n\n# 1. Demonstrate agreement with test value in UNESCO documents\nswSCTp(1, T90fromT68(15), 0, eos = \"unesco\") # expect 35\n# 2. Demonstrate agreement of gsw and unesco, S>2 case\nswSCTp(1, T90fromT68(15), 0, eos = \"gsw\") # again, expect 35\n# 3. Demonstrate close values even in very brackish water\nswSCTp(0.02, 10, 100, eos = \"gsw\") # 0.6013981\nswSCTp(0.02, 10, 100, eos = \"unesco\") # 0.6011721\n\n\n\n"} {"package":"oce","topic":"swSR","snippet":"### Name: swSR\n### Title: Seawater Reference Salinity (GSW Formulation)\n### Aliases: swSR\n\n### ** Examples\n\nSR <- swSR(35.0) # 35.16504\n\n\n\n"} {"package":"oce","topic":"swSTrho","snippet":"### Name: swSTrho\n### Title: Seawater Salinity From Temperature and Density\n### Aliases: swSTrho\n\n### ** Examples\n\nswSTrho(10, 22, 0, eos = \"gsw\") # 28.76285\nswSTrho(10, 22, 0, eos = \"unesco\") # 28.651625\n\n\n\n"} {"package":"oce","topic":"swSigma","snippet":"### Name: swSigma\n### Title: Seawater Density Anomaly\n### Aliases: swSigma\n\n### ** Examples\n\nlibrary(oce)\nswSigma(35, 13, 1000, longitude = 300, latitude = 30, eos = \"gsw\") # 30.82374\nswSigma(35, T90fromT68(13), 1000, eos = \"unesco\") # 30.8183\n\n\n\n"} {"package":"oce","topic":"swSigmaT","snippet":"### Name: swSigmaT\n### Title: Seawater Quasi-Potential Density Anomaly\n### Aliases: swSigmaT\n\n### ** Examples\n\nswSigmaT(35, 13, 1000, longitude = 300, latitude = 30, eos = \"gsw\") # 26.39623\nswSigmaT(35, T90fromT68(13), 1000, eos = \"unesco\") # 26.39354\n\n\n\n"} {"package":"oce","topic":"swSigmaTheta","snippet":"### Name: swSigmaTheta\n### Title: Seawater Potential Density Anomaly\n### Aliases: swSigmaTheta\n\n### ** Examples\n\nstopifnot(abs(26.4212790994 - swSigmaTheta(35, 13, 1000, eos = \"unesco\")) < 1e-7)\n\n\n\n"} {"package":"oce","topic":"swSoundAbsorption","snippet":"### Name: swSoundAbsorption\n### Title: Seawater Sound Absorption\n### Aliases: swSoundAbsorption\n\n### ** Examples\n\n# Fisher & Simmons (1977 table IV) gives 0.52 dB/km for 35 PSU, 5 degC, 500 atm\n# (4990 dbar of water)a and 10 kHz\nalpha <- swSoundAbsorption(35, 4, 4990, 10e3)\n\n# reproduce part of Fig 8 of Francois and Garrison (1982 Fig 8)\nf <- 1e3 * 10^(seq(-1, 3, 0.1)) # in KHz\nplot(f / 1000, 1e3 * swSoundAbsorption(f, 35, 10, 0, formulation = \"fr\"),\n xlab = \" Freq [kHz]\", ylab = \" dB/km\", type = \"l\", log = \"xy\"\n)\nlines(f / 1000, 1e3 * swSoundAbsorption(f, 0, 10, 0, formulation = \"fr\"), lty = \"dashed\")\nlegend(\"topleft\", lty = c(\"solid\", \"dashed\"), legend = c(\"S=35\", \"S=0\"))\n\n\n\n"} {"package":"oce","topic":"swSoundSpeed","snippet":"### Name: swSoundSpeed\n### Title: Seawater Sound Speed\n### Aliases: swSoundSpeed\n\n### ** Examples\n\nswSoundSpeed(40, T90fromT68(40), 10000) # 1731.995 (p48 of Fofonoff + Millard 1983)\n\n\n\n"} {"package":"oce","topic":"swSpecificHeat","snippet":"### Name: swSpecificHeat\n### Title: Seawater Specific Heat\n### Aliases: swSpecificHeat\n\n### ** Examples\n\nswSpecificHeat(40, T90fromT68(40), 10000, eos = \"unesco\") # 3949.499\n\n\n\n"} {"package":"oce","topic":"swSpice","snippet":"### Name: swSpice\n### Title: Seawater Spiciness\n### Aliases: swSpice\n\n### ** Examples\n\n# Contrast the two formulations.\nlibrary(oce)\ndata(ctd)\np <- ctd[[\"pressure\"]]\nplot(swSpice(ctd, eos = \"unesco\"), p,\n xlim = c(-2.7, -1.5), ylim = rev(range(p)),\n xlab = \"Spice\", ylab = \"Pressure (dbar)\"\n)\npoints(swSpice(ctd, eos = \"gsw\"), p, col = 2)\nmtext(\"black=unesco, red=gsw\")\n\n\n\n"} {"package":"oce","topic":"swSstar","snippet":"### Name: swSstar\n### Title: Seawater Preformed Salinity (GSW Formulation)\n### Aliases: swSstar\n\n### ** Examples\n\nswSstar(35.5, 300, 260, 16) # 35.66601\n\n\n\n"} {"package":"oce","topic":"swTFreeze","snippet":"### Name: swTFreeze\n### Title: Seawater Freezing Temperature\n### Aliases: swTFreeze\n\n### ** Examples\n\n# 1. Test for a check-value given in reference 1. This value, -2.588567 degC,\n# is in the 1968 temperature scale (IPTS-68), but swTFreeze reports\n# in the newer ITS-90 scale, so we must convert before checking.\nTcheck <- -2.588567 # IPTS-68\nT <- swTFreeze(salinity = 40, pressure = 500, eos = \"unesco\")\nstopifnot(abs(Tcheck - T68fromT90(T)) < 1e-6)\n\n# 2. Compare unesco and gsw formulations.\ndata(ctd)\np <- ctd[[\"pressure\"]]\npar(mfrow = c(1, 2), mar = c(3, 3, 1, 2), mgp = c(2, 0.7, 0))\nplot(swTFreeze(ctd, eos = \"unesco\"),\n p,\n xlab = \"unesco\", ylim = rev(range(p))\n)\nplot(swTFreeze(ctd, eos = \"unesco\") - swTFreeze(ctd, eos = \"gsw\"),\n p,\n xlab = \"unesco-gsw\", ylim = rev(range(p))\n)\n\n\n\n"} {"package":"oce","topic":"swTSrho","snippet":"### Name: swTSrho\n### Title: Seawater Temperature from Salinity and Density\n### Aliases: swTSrho\n\n### ** Examples\n\nswTSrho(35, 23, 0, eos = \"unesco\") # 26.11301\n\n\n\n"} {"package":"oce","topic":"swThermalConductivity","snippet":"### Name: swThermalConductivity\n### Title: Seawater Thermal Conductivity\n### Aliases: swThermalConductivity\n\n### ** Examples\n\nlibrary(oce)\n# Values in m^2/s, a unit that is often used instead of W/(m*degC).\nswThermalConductivity(35, 10, 100) / (swRho(35, 10, 100) * swSpecificHeat(35, 10, 100)) # ocean\nswThermalConductivity(0, 20, 0) / (swRho(0, 20, 0) * swSpecificHeat(0, 20, 0)) # lab\n# Caldwell Table 1 gives 1478e-6 cal/(cm*sec*degC) at 31.5 o/oo, 10degC, 1kbar\njoulePerCalorie <- 4.18400\ncmPerM <- 100\nswThermalConductivity(31.5, 10, 1000) / joulePerCalorie / cmPerM\n\n\n\n"} {"package":"oce","topic":"swTheta","snippet":"### Name: swTheta\n### Title: Seawater Potential Temperature (UNESCO Version)\n### Aliases: swTheta\n\n### ** Examples\n\nlibrary(oce)\n# Example 1: test value from Fofonoff et al., 1983\nstopifnot(abs(36.8818748026 - swTheta(40, T90fromT68(40), 10000, 0, eos = \"unesco\")) < 0.0000000001)\n\n# Example 2: a deep-water station. Note that theta and CT are\n# visually identical on this scale.\ndata(section)\nstn <- section[[\"station\", 70]]\nplotProfile(stn, \"temperature\", ylim = c(6000, 1000))\nlines(stn[[\"theta\"]], stn[[\"pressure\"]], col = 2)\nlines(stn[[\"CT\"]], stn[[\"pressure\"]], col = 4, lty = 2)\nlegend(\"bottomright\",\n lwd = 1, col = c(1, 2, 4), lty = c(1, 1, 2),\n legend = c(\"in-situ\", \"theta\", \"CT\"),\n title = sprintf(\"MAD(theta-CT)=%.4f\", mean(abs(stn[[\"theta\"]] - stn[[\"CT\"]])))\n)\n\n\n\n"} {"package":"oce","topic":"swViscosity","snippet":"### Name: swViscosity\n### Title: Seawater Viscosity\n### Aliases: swViscosity\n\n### ** Examples\n\nswViscosity(30, 10) # 0.001383779\n\n\n\n"} {"package":"oce","topic":"threenum","snippet":"### Name: threenum\n### Title: Calculate Minimum, Mean, and Maximum Values\n### Aliases: threenum\n\n### ** Examples\n\nlibrary(oce)\nthreenum(1:10)\n\n\n"} {"package":"oce","topic":"tidalCurrent","snippet":"### Name: tidalCurrent\n### Title: Tidal Current Dataset\n### Aliases: tidalCurrent\n\n### ** Examples\n\nlibrary(oce)\ndata(tidalCurrent)\npar(mfrow = c(2, 1))\noce.plot.ts(tidalCurrent$time, tidalCurrent$u, ylab = \"u [m/s]\")\nabline(h = 0, col = 2)\noce.plot.ts(tidalCurrent$time, tidalCurrent$v, ylab = \"v [m/s]\")\nabline(h = 0, col = 2)\n\n\n\n"} {"package":"oce","topic":"tidem","snippet":"### Name: tidem\n### Title: Fit a Tidal Model to a Timeseries\n### Aliases: tidem\n\n### ** Examples\n\nlibrary(oce)\n# The demonstration time series from Foreman (1978),\n# also used in T_TIDE (Pawlowicz, 2002).\ndata(sealevelTuktoyaktuk)\ntide <- tidem(sealevelTuktoyaktuk)\nsummary(tide)\n\n# AIC analysis\nextractAIC(tide[[\"model\"]])\n\n# Fake data at M2\nlibrary(oce)\ndata(\"tidedata\")\nM2 <- with(tidedata$const, freq[name == \"M2\"])\nt <- seq(0, 10 * 86400, 3600)\neta <- sin(M2 * t * 2 * pi / 3600)\nsl <- as.sealevel(eta)\nm <- tidem(sl)\nsummary(m)\n\n\n\n"} {"package":"oce","topic":"tidemAstron","snippet":"### Name: tidemAstron\n### Title: Astronomical Calculations for tidem\n### Aliases: tidemAstron\n\n### ** Examples\n\ntidemAstron(as.POSIXct(\"2008-01-22 18:50:24\"))\n\n\n\n"} {"package":"oce","topic":"tidemVuf","snippet":"### Name: tidemVuf\n### Title: Nodal Modulation Calculations for Tidal Analyses\n### Aliases: tidemVuf\n\n### ** Examples\n\n# Look up values for the M2 constituent in Halifax Harbour, Canada.\nlibrary(oce)\ndata(\"tidedata\")\nj <- with(tidedata$const, which(name == \"M2\"))\ntidemVuf(t = as.POSIXct(\"2008-01-22 18:50:24\"), j = j, lat = 44.63)\n\n\n\n"} {"package":"oce","topic":"timeToArgoJuld","snippet":"### Name: timeToArgoJuld\n### Title: Convert Time to Argo Julian Day (juld)\n### Aliases: timeToArgoJuld\n\n### ** Examples\n\ntimeToArgoJuld(\"2020-07-01\")\n\n\n\n"} {"package":"oce","topic":"topoInterpolate","snippet":"### Name: topoInterpolate\n### Title: Interpolate Within a topo Object\n### Aliases: topoInterpolate\n\n### ** Examples\n\nlibrary(oce)\ndata(topoWorld)\n# \"The Gully\", approx. 400m deep, connects Gulf of St Lawrence with North Atlantic\ntopoInterpolate(45, -57, topoWorld)\n\n\n\n"} {"package":"oce","topic":"unabbreviateYear","snippet":"### Name: unabbreviateYear\n### Title: Determine Year From Various Abbreviations\n### Aliases: unabbreviateYear\n\n### ** Examples\n\nfullYear <- unabbreviateYear(c(99, 8, 108))\n\n\n"} {"package":"oce","topic":"unduplicateNames","snippet":"### Name: unduplicateNames\n### Title: Rename Duplicated Character Strings\n### Aliases: unduplicateNames\n\n### ** Examples\n\nunduplicateNames(c(\"a\", \"b\", \"a\", \"c\", \"b\"))\nunduplicateNames(c(\"a\", \"b\", \"a\", \"c\", \"b\"), style = 2)\n\n\n"} {"package":"oce","topic":"ungrid","snippet":"### Name: ungrid\n### Title: Extract (x, y, z) From (x, y, grid)\n### Aliases: ungrid\n\n### ** Examples\n\nlibrary(oce)\ndata(wind)\nu <- interpBarnes(wind$x, wind$y, wind$z)\ncontour(u$xg, u$yg, u$zg)\nU <- ungrid(u$xg, u$yg, u$zg)\npoints(U$x, U$y, col = oce.colorsViridis(100)[rescale(U$grid, rlow = 1, rhigh = 100)], pch = 20)\n\n\n"} {"package":"oce","topic":"unitFromString","snippet":"### Name: unitFromString\n### Title: Decode Units From Strings\n### Aliases: unitFromString\n\n### ** Examples\n\nunitFromString(\"dbar\") # dbar (no scale)\nunitFromString(\"deg c\") # modern temperature (ITS-90 scale)\n\n\n"} {"package":"oce","topic":"unwrapAngle","snippet":"### Name: unwrapAngle\n### Title: Unwrap an Angle That Suffers Modulo-360 Problems\n### Aliases: unwrapAngle\n\n### ** Examples\n\nlibrary(oce)\ntrue <- 355\na <- true + rnorm(100, sd = 10)\na <- ifelse(a > 360, a - 360, a)\na2 <- unwrapAngle(a)\npar(mar = c(3, 3, 5, 3))\nhist(a, breaks = 360)\nabline(v = a2$mean, col = \"blue\", lty = \"dashed\")\nabline(v = true, col = \"blue\")\nmtext(\"true (solid)\\n estimate (dashed)\", at = true, side = 3, col = \"blue\")\nabline(v = mean(a), col = \"red\")\nmtext(\"mean\", at = mean(a), side = 3, col = \"red\")\n\n\n"} {"package":"oce","topic":"utm2lonlat","snippet":"### Name: utm2lonlat\n### Title: Convert UTM to Longitude and Latitude\n### Aliases: utm2lonlat\n\n### ** Examples\n\nlibrary(oce)\n# Cape Split, in the Minas Basin of the Bay of Fundy\nutm2lonlat(852863, 5029997, 19)\n\n\n\n"} {"package":"oce","topic":"vectorShow","snippet":"### Name: vectorShow\n### Title: Show Some Values From a List, Vector or Matrix\n### Aliases: vectorShow\n\n### ** Examples\n\n# List\nlimits <- list(low = 0, high = 1)\nvectorShow(limits)\n\n# Vector of named items\nplanktonCount <- c(phytoplankton = 100, zooplankton = 20)\nvectorShow(planktonCount)\n\n# Vector\nvectorShow(pi)\n\n# Matrix\nvectorShow(volcano)\n\n# Other arguments\nknot2mps <- 0.5144444\nvectorShow(knot2mps, postscript = \"knots per m/s\")\nvectorShow(\"January\", msg = \"The first month is\")\n\n\n\n"} {"package":"oce","topic":"velocityStatistics","snippet":"### Name: velocityStatistics\n### Title: Report Statistics of adp or adv Velocities\n### Aliases: velocityStatistics\n\n### ** Examples\n\nlibrary(oce)\ndata(adp)\na <- velocityStatistics(adp)\nprint(a)\nt <- seq(0, 2 * pi, length.out = 100)\ntheta <- a$ellipseAngle * pi / 180\ny <- a$ellipseMajor * cos(t) * sin(theta) + a$ellipseMinor * sin(t) * cos(theta)\nx <- a$ellipseMajor * cos(t) * cos(theta) - a$ellipseMinor * sin(t) * sin(theta)\nplot(adp, which = \"uv+ellipse+arrow\")\nlines(x, y, col = \"blue\", lty = \"dashed\", lwd = 5)\narrows(0, 0, a$uMean, a$vMean, lwd = 5, length = 1 / 10, col = \"blue\", lty = \"dashed\")\n\n\n\n"} {"package":"oce","topic":"window.oce","snippet":"### Name: window.oce\n### Title: Window an oce Object by Time or Distance\n### Aliases: window.oce\n\n### ** Examples\n\nlibrary(oce)\ndata(adp)\nplot(adp)\nearly <- window(adp, start = \"2008-06-26 00:00:00\", end = \"2008-06-26 12:00:00\")\nplot(early)\nbottom <- window(adp, start = 0, end = 20, which = \"distance\")\nplot(bottom)\n\n\n"} {"package":"oce","topic":"xbt","snippet":"### Name: xbt\n### Title: Sample xbt Data\n### Aliases: xbt\n\n### ** Examples\n\nlibrary(oce)\ndata(xbt)\nsummary(xbt)\nplot(xbt)\n\n\n\n"} {"package":"oce","topic":"xbt.edf","snippet":"### Name: xbt.edf\n### Title: Sample xbt File in .edf Format\n### Aliases: xbt.edf\n\n### ** Examples\n\nxbt <- read.oce(system.file(\"extdata\", \"xbt.edf\", package=\"oce\"))\n\n\n\n"} {"package":"rbioapi","topic":"rba_connection_test","snippet":"### Name: rba_connection_test\n### Title: Test if the Supported Services Are Responding\n### Aliases: rba_connection_test\n### Keywords: Helper\n\n### ** Examples\n\n## No test: \nrba_connection_test()\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_enrichr","snippet":"### Name: rba_enrichr\n### Title: A One-step Wrapper for Gene-list Enrichment Using Enrichr\n### Aliases: rba_enrichr\n\n### ** Examples\n\n## Not run: \n##D rba_enrichr(gene_list = c(\"TP53\", \"TNF\", \"EGFR\"))\n## End(Not run)\n## No test: \nrba_enrichr(gene_list = c(\"TP53\", \"TNF\", \"EGFR\"),\n gene_set_library = \"GO_Molecular_Function_2017\",\n regex_library_name = FALSE)\n## End(No test)\n## No test: \nrba_enrichr(gene_list = c(\"TP53\", \"TNF\", \"EGFR\"),\n gene_set_library = \"go\",\n regex_library_name = TRUE)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_enrichr_add_list","snippet":"### Name: rba_enrichr_add_list\n### Title: Upload Your Gene-List to Enrichr\n### Aliases: rba_enrichr_add_list\n\n### ** Examples\n\n## No test: \nrba_enrichr_add_list(gene_list = c(\"TP53\", \"TNF\", \"EGFR\"),\n description = \"tumoral genes\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_enrichr_enrich","snippet":"### Name: rba_enrichr_enrich\n### Title: Get Enrichr Enrichment Results\n### Aliases: rba_enrichr_enrich\n\n### ** Examples\n\n## Not run: \n##D rba_enrichr_enrich(user_list_id = \"11111\")\n## End(Not run)\n## Not run: \n##D rba_enrichr_enrich(user_list_id = \"11111\",\n##D gene_set_library = \"GO_Molecular_Function_2017\",\n##D regex_library_name = FALSE)\n## End(Not run)\n## Not run: \n##D rba_enrichr_enrich(user_list_id = \"11111\",\n##D gene_set_library = \"go\",\n##D regex_library_name = TRUE)\n## End(Not run)\n\n\n\n"} {"package":"rbioapi","topic":"rba_enrichr_gene_map","snippet":"### Name: rba_enrichr_gene_map\n### Title: Find Enrichr Terms That Contain a Given Gene\n### Aliases: rba_enrichr_gene_map\n\n### ** Examples\n\n## No test: \nrba_enrichr_gene_map(gene = \"p53\")\n## End(No test)\n## No test: \nrba_enrichr_gene_map(gene = \"p53\", catagorize = TRUE)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_enrichr_libs","snippet":"### Name: rba_enrichr_libs\n### Title: Retrieve a List of available libraries from Enrichr\n### Aliases: rba_enrichr_libs\n\n### ** Examples\n\n## No test: \nrba_enrichr_libs()\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_enrichr_view_list","snippet":"### Name: rba_enrichr_view_list\n### Title: View an Uploaded Gene List\n### Aliases: rba_enrichr_view_list\n\n### ** Examples\n\n## Not run: \n##D rba_enrichr_view_list(user_list_id = 11111)\n## End(Not run)\n\n\n\n"} {"package":"rbioapi","topic":"rba_jaspar_collections","snippet":"### Name: rba_jaspar_collections\n### Title: List collections available in JASPAR\n### Aliases: rba_jaspar_collections\n\n### ** Examples\n\n## No test: \nrba_jaspar_collections(release = 2022)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_jaspar_collections_matrices","snippet":"### Name: rba_jaspar_collections_matrices\n### Title: List matrices available in a JASPAR collection\n### Aliases: rba_jaspar_collections_matrices\n\n### ** Examples\n\n## No test: \nrba_jaspar_collections_matrices(collection = \"CORE\",\n release = 2022,\n page_size = 100,\n page = 2)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_jaspar_matrix","snippet":"### Name: rba_jaspar_matrix\n### Title: Get a Position Frequency Matrices (PFM) with annotations\n### Aliases: rba_jaspar_matrix\n\n### ** Examples\n\n## No test: \nrba_jaspar_matrix(\"MA0600.2\")\n## End(No test)\n## Not run: \n##D rba_jaspar_matrix(matrix_id = \"MA0600.2\",\n##D file_format = \"meme\",\n##D save_to = \"my_matrix.meme\")\n## End(Not run)\n\n\n\n"} {"package":"rbioapi","topic":"rba_jaspar_matrix_search","snippet":"### Name: rba_jaspar_matrix_search\n### Title: Search matrix profiles available in JASPAR\n### Aliases: rba_jaspar_matrix_search\n\n### ** Examples\n\n## No test: \nrba_jaspar_matrix_search(term = \"FOX\")\nrba_jaspar_matrix_search(tf_name = \"FOXP3\")\nrba_jaspar_matrix_search(tf_name = \"FOXP3\", only_last_version = TRUE)\nrba_jaspar_matrix_search(tf_class = \"Zipper-Type\")\nrba_jaspar_matrix_search(tax_group = \"insects\")\nrba_jaspar_matrix_search(page_size = 100)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_jaspar_matrix_versions","snippet":"### Name: rba_jaspar_matrix_versions\n### Title: List matrix profile versions associated with a base ID\n### Aliases: rba_jaspar_matrix_versions\n\n### ** Examples\n\n## No test: \nrba_jaspar_matrix_versions(\"MA0600\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_jaspar_releases","snippet":"### Name: rba_jaspar_releases\n### Title: Get information about JASPAR database releases\n### Aliases: rba_jaspar_releases\n\n### ** Examples\n\n## No test: \nrba_jaspar_releases()\nrba_jaspar_releases(7)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_jaspar_sites","snippet":"### Name: rba_jaspar_sites\n### Title: Get binding sites of a matrix profile\n### Aliases: rba_jaspar_sites\n\n### ** Examples\n\n## No test: \nrba_jaspar_sites(\"MA0600.1\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_jaspar_species","snippet":"### Name: rba_jaspar_species\n### Title: List available species in JASPAR\n### Aliases: rba_jaspar_species\n\n### ** Examples\n\n## No test: \nrba_jaspar_species(release = 2022)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_jaspar_species_matrices","snippet":"### Name: rba_jaspar_species_matrices\n### Title: List matrices available in JASPAR of a species\n### Aliases: rba_jaspar_species_matrices\n\n### ** Examples\n\n## No test: \nrba_jaspar_species_matrices(tax_id = 9606, page_size = 100)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_jaspar_taxons","snippet":"### Name: rba_jaspar_taxons\n### Title: List available taxonomic groups in JASPAR\n### Aliases: rba_jaspar_taxons\n\n### ** Examples\n\n## No test: \nrba_jaspar_taxons(release = 2022)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_jaspar_taxons_matrices","snippet":"### Name: rba_jaspar_taxons_matrices\n### Title: List matrices available in JASPAR of a taxonomic group\n### Aliases: rba_jaspar_taxons_matrices\n\n### ** Examples\n\n## No test: \nrba_jaspar_taxons_matrices(tax_group = \"plants\", page_size = 100)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_jaspar_tffm","snippet":"### Name: rba_jaspar_tffm\n### Title: Get a TF flexible models (TFFMs) information\n### Aliases: rba_jaspar_tffm\n\n### ** Examples\n\n## No test: \nrba_jaspar_tffm(\"TFFM0056.3\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_jaspar_tffm_search","snippet":"### Name: rba_jaspar_tffm_search\n### Title: Search TF flexible models (TFFMs) available in JASPAR\n### Aliases: rba_jaspar_tffm_search\n\n### ** Examples\n\n## No test: \nrba_jaspar_tffm_search(term = \"FOX\")\nrba_jaspar_tffm_search(tax_group = \"insects\")\nrba_jaspar_tffm_search(page_size = 100)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_mieaa_cats","snippet":"### Name: rba_mieaa_cats\n### Title: Get Supported Enrichment Categories for a Species and miRNA Type\n### Aliases: rba_mieaa_cats\n\n### ** Examples\n\n## No test: \nrba_mieaa_cats(\"mature\", \"Homo sapiens\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_mieaa_convert_type","snippet":"### Name: rba_mieaa_convert_type\n### Title: Convert Between Mature and precursor miRNA Accession\n### Aliases: rba_mieaa_convert_type\n\n### ** Examples\n\n## No test: \nSys.sleep(1) # to prevent 429 error during R CMD check\nrba_mieaa_convert_type(mirna = c(\"hsa-miR-20b-5p\", \"hsa-miR-144-5p\"),\n input_type = \"mature\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_mieaa_convert_version","snippet":"### Name: rba_mieaa_convert_version\n### Title: Convert miRNA accession Between Different miRBase Versions\n### Aliases: rba_mieaa_convert_version\n\n### ** Examples\n\n## No test: \nSys.sleep(1) # to prevent 429 error during R CMD check\nrba_mieaa_convert_version(mirna = c(\"hsa-miR-20b-5p\", \"hsa-miR-144-5p\"),\n mirna_type = \"mature\", input_version = 22, output_version = 16)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_mieaa_enrich","snippet":"### Name: rba_mieaa_enrich\n### Title: A One-step Wrapper for miRNA Enrichment Using miEAA\n### Aliases: rba_mieaa_enrich\n\n### ** Examples\n\n## Not run: \n##D rba_mieaa_enrich(test_set = c(\"hsa-miR-20b-5p\", \"hsa-miR-144-5p\",\n##D \"hsa-miR-17-5p\", \"hsa-miR-20a-5p\"),\n##D mirna_type = \"mature\",\n##D test_type = \"ORA\",\n##D species = 9606,\n##D categories = \"miRPathDB_GO_Biological_process_mature\")\n## End(Not run)\n\n\n\n"} {"package":"rbioapi","topic":"rba_mieaa_enrich_results","snippet":"### Name: rba_mieaa_enrich_results\n### Title: Retrieve Results of a finished Enrichment Analysis from miEAA\n### Aliases: rba_mieaa_enrich_results\n\n### ** Examples\n\n## Not run: \n##D rba_mieaa_enrich_results(\"f52d1aef-6d3d-4d51-9020-82e68fe99012\")\n## End(Not run)\n\n\n\n"} {"package":"rbioapi","topic":"rba_mieaa_enrich_status","snippet":"### Name: rba_mieaa_enrich_status\n### Title: Check Status of a Submitted Enrichment Analysis in miEAA\n### Aliases: rba_mieaa_enrich_status\n\n### ** Examples\n\n## Not run: \n##D Sys.sleep(1) # to prevent 429 error during R CMD check\n##D rba_mieaa_enrich_status(\"f52d1aef-6d3d-4d51-9020-82e68fe99012\")\n## End(Not run)\n\n\n\n"} {"package":"rbioapi","topic":"rba_mieaa_enrich_submit","snippet":"### Name: rba_mieaa_enrich_submit\n### Title: Submit miEAA miRNA Enrichment Analysis Request\n### Aliases: rba_mieaa_enrich_submit\n\n### ** Examples\n\n## No test: \nSys.sleep(1) # to prevent 429 error during R CMD check\nrba_mieaa_enrich_submit(test_set = c(\"hsa-miR-20b-5p\", \"hsa-miR-144-5p\"),\n mirna_type = \"mature\",\n test_type = \"GSEA\",\n species = 9606,\n categories = NULL)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_options","snippet":"### Name: rba_options\n### Title: Set rbioapi Global Options\n### Aliases: rba_options\n### Keywords: Helper\n\n### ** Examples\n\nrba_options()\n## Not run: \n##D rba_options(verbose = FALSE)\n## End(Not run)\n## Not run: \n##D rba_options(save_file = TRUE)\n## End(Not run)\n## Not run: \n##D rba_options(diagnostics = TRUE, progress = TRUE)\n## End(Not run)\n\n\n\n"} {"package":"rbioapi","topic":"rba_pages","snippet":"### Name: rba_pages\n### Title: Get Multiple Pages of a Paginated Resource\n### Aliases: rba_pages\n### Keywords: Helper\n\n### ** Examples\n\n## No test: \nrba_pages(input_call = quote(rba_uniprot_taxonomy(ids = 189831,\n hierarchy = \"siblings\",\n page_size = 50,\n page_number = \"pages:1:5\")))\n## End(No test)\n## No test: \nrba_pages(input_call = quote(rba_uniprot_taxonomy_name(name = \"adenovirus\",\n field = \"scientific\",\n search_type = \"contain\",\n page_size = 200,\n page_number = \"pages:1:5\",\n verbose = FALSE)))\n## End(No test)\n## No test: \nrba_pages(input_call = quote(rba_panther_info(what = \"families\",\n families_page = \"pages:9:11\")))\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_panther_enrich","snippet":"### Name: rba_panther_enrich\n### Title: PANTHER Over-Representation Enrichment Analysis\n### Aliases: rba_panther_enrich\n\n### ** Examples\n\n## No test: \nrba_panther_enrich(genes = c(\"p53\", \"BRCA1\", \"cdk2\", \"Q99835\", \"CDC42\",\n \"CDK1\", \"KIF23\", \"PLK1\", \"RAC2\", \"RACGAP1\"),\n organism = 9606, annot_dataset = \"GO:0008150\",\n cutoff = 0.01)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_panther_family","snippet":"### Name: rba_panther_family\n### Title: Get PANTHER Families and Sub-Families\n### Aliases: rba_panther_family\n\n### ** Examples\n\n## No test: \nrba_panther_family(\"PTHR10000\", what = \"ortholog\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_panther_homolog","snippet":"### Name: rba_panther_homolog\n### Title: Search PANTHER for Homologs of Gene(s)\n### Aliases: rba_panther_homolog\n\n### ** Examples\n\n## No test: \nrba_panther_homolog(\"OR4F5\", organism = 9606, type = \"P\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_panther_info","snippet":"### Name: rba_panther_info\n### Title: Get PANTHER database Information\n### Aliases: rba_panther_info\n\n### ** Examples\n\n## No test: \nrba_panther_info(what = \"organisms\")\n## End(No test)\n## No test: \nrba_panther_info(what = \"families\", families_page = 4)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_panther_mapping","snippet":"### Name: rba_panther_mapping\n### Title: Map A Gene-set to PANTHER Database\n### Aliases: rba_panther_mapping\n\n### ** Examples\n\n## No test: \nrba_panther_mapping(genes = c(\"Cd40\", 7124, \"ENSG00000203747\", \"P33681\"),\n organism = 9606)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_panther_ortholog","snippet":"### Name: rba_panther_ortholog\n### Title: Search PANTHER for Orthologs of Gene(s)\n### Aliases: rba_panther_ortholog\n\n### ** Examples\n\n## No test: \nrba_panther_ortholog(\"CD40\", organism = 9606, type = \"LDO\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_panther_tree_grafter","snippet":"### Name: rba_panther_tree_grafter\n### Title: PANTHER Tree Grafter Use this function to retrieve a PANTHER\n### family's tree topology information with a node corresponding to your\n### sequence grafted in the best location in that tree.\n### Aliases: rba_panther_tree_grafter\n\n### ** Examples\n\n## No test: \nrba_panther_tree_grafter(\"MKVLWAALLVTFLAGCQAKVEQAVETE\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_analysis","snippet":"### Name: rba_reactome_analysis\n### Title: Reactome Over-Representation or Expression Analysis\n### Aliases: rba_reactome_analysis\n\n### ** Examples\n\n## Not run: \n##D rba_reactome_analysis(input = c(\"p53\", \"BRCA1\", \"cdk2\", \"Q99835\", \"CDC42\"))\n## End(Not run)\n## Not run: \n##D rba_reactome_analysis(input = \"c:/rbioapi/genes.txt\")\n## End(Not run)\n## Not run: \n##D rba_reactome_analysis(input = \"https://qazwsx.com/genes.txt\")\n## End(Not run)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_analysis_download","snippet":"### Name: rba_reactome_analysis_download\n### Title: Download Different Reactome Analysis Results\n### Aliases: rba_reactome_analysis_download\n\n### ** Examples\n\n## Not run: \n##D rba_reactome_analysis_download(token = \"MjAyMDEwMTYwMTI3MTNfMjY1MjM\",\n##D request = \"found_ids\", save_to = \"found_ids.csv\")\n## End(Not run)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_analysis_import","snippet":"### Name: rba_reactome_analysis_import\n### Title: Import Saved Analysis JSON to Reactome\n### Aliases: rba_reactome_analysis_import\n\n### ** Examples\n\n## Not run: \n##D rba_reactome_analysis_import(\"c:/rbioapi/res.json\")\n## End(Not run)\n## Not run: \n##D rba_reactome_analysis_import(\"https://qaz.com/res.json.gz\")\n## End(Not run)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_analysis_mapping","snippet":"### Name: rba_reactome_analysis_mapping\n### Title: Maps Molecule Identifiers\n### Aliases: rba_reactome_analysis_mapping\n\n### ** Examples\n\n## Not run: \n##D rba_reactome_analysis_mapping(c(\"Q8SQ34\", \"cd40\"))\n## End(Not run)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_analysis_pdf","snippet":"### Name: rba_reactome_analysis_pdf\n### Title: Generate PDF file with Reactome Analysis Results\n### Aliases: rba_reactome_analysis_pdf\n\n### ** Examples\n\n## Not run: \n##D rba_reactome_analysis_pdf(token = \"MjAyMDEwMTYwMTI3MTNfMjY1MjM%3D\",\n##D species = 9606, save_to = \"my_analysis.pdf\")\n## End(Not run)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_analysis_species","snippet":"### Name: rba_reactome_analysis_species\n### Title: Compare Human Pathways with with Other Species\n### Aliases: rba_reactome_analysis_species\n\n### ** Examples\n\n## No test: \nrba_reactome_analysis_species(species_dbid = 48892)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_analysis_token","snippet":"### Name: rba_reactome_analysis_token\n### Title: Return the Results Associated with a Token\n### Aliases: rba_reactome_analysis_token\n\n### ** Examples\n\n## Not run: \n##D rba_reactome_analysis_token(token = \"MjAyMDEwMTYwMTI3MTNfMjY1MjM\",\n##D species = 9606)\n## End(Not run)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_complex_list","snippet":"### Name: rba_reactome_complex_list\n### Title: Get Complexes That Include a Molecule\n### Aliases: rba_reactome_complex_list\n\n### ** Examples\n\n## No test: \nrba_reactome_complex_list(id = \"3845\", resource = \"NCBI Gene\")\n## End(No test)\n## No test: \nrba_reactome_complex_list(id = \"P00533\", resource = \"UniProt\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_complex_subunits","snippet":"### Name: rba_reactome_complex_subunits\n### Title: Get a Complex's Subunits\n### Aliases: rba_reactome_complex_subunits\n\n### ** Examples\n\n## No test: \nrba_reactome_complex_subunits(complex_id = \"R-HSA-5674003\",\n exclude_structures = FALSE)\n## End(No test)\n## No test: \nrba_reactome_complex_subunits(complex_id = \"R-HSA-109783\",\n exclude_structures = TRUE)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_diseases","snippet":"### Name: rba_reactome_diseases\n### Title: Reactome Diseases\n### Aliases: rba_reactome_diseases\n\n### ** Examples\n\n## No test: \nrba_reactome_diseases()\n## End(No test)\n## No test: \nrba_reactome_diseases(doid = TRUE)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_entity_other_forms","snippet":"### Name: rba_reactome_entity_other_forms\n### Title: Get Other forms of a Reactome Entity\n### Aliases: rba_reactome_entity_other_forms\n\n### ** Examples\n\n## No test: \nrba_reactome_entity_other_forms(\"R-HSA-199420\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_event_ancestors","snippet":"### Name: rba_reactome_event_ancestors\n### Title: Get Reactome Events Ancestors\n### Aliases: rba_reactome_event_ancestors\n\n### ** Examples\n\n## No test: \nrba_reactome_event_ancestors(\"R-HSA-5673001\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_event_hierarchy","snippet":"### Name: rba_reactome_event_hierarchy\n### Title: Get Full Event Hierarchy of a Species\n### Aliases: rba_reactome_event_hierarchy\n\n### ** Examples\n\n## Not run: \n##D #very large response!\n##D rba_reactome_event_hierarchy(\"Homo sapiens\")\n## End(Not run)\n## Not run: \n##D #very large response!\n##D rba_reactome_event_hierarchy(9606)\n## End(Not run)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_exporter_diagram","snippet":"### Name: rba_reactome_exporter_diagram\n### Title: Get a Reactome Event Diagram\n### Aliases: rba_reactome_exporter_diagram\n\n### ** Examples\n\n## Not run: \n##D rba_reactome_exporter_diagram(event_id = \"R-HSA-177929\",\n##D create_document = FALSE)\n## End(Not run)\n## Not run: \n##D rba_reactome_exporter_diagram(event_id = \"R-HSA-6787403\",\n##D create_document = FALSE)\n## End(Not run)\n## Not run: \n##D rba_reactome_exporter_diagram(event_id = \"R-HSA-177929\",\n##D create_document = TRUE)\n## End(Not run)\n## Not run: \n##D rba_reactome_exporter_diagram(event_id = \"R-HSA-177929\",\n##D output_format = \"svg\",\n##D save_to = \"reactome_event_diagram.svg\")\n## End(Not run)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_exporter_event","snippet":"### Name: rba_reactome_exporter_event\n### Title: Exports A Reactome Event to SBGN or SBML\n### Aliases: rba_reactome_exporter_event\n\n### ** Examples\n\n## Not run: \n##D rba_reactome_exporter_event(event_id = \"R-HSA-177929\",\n##D output_format = \"sbgn\",\n##D save_to = \"R-HSA-177929.sbgn\")\n## End(Not run)\n## Not run: \n##D rba_reactome_exporter_event(event_id = \"R-HSA-177929\",\n##D output_format = \"sbgn\")\n## End(Not run)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_exporter_overview","snippet":"### Name: rba_reactome_exporter_overview\n### Title: Get a Reactome Pathway Overview\n### Aliases: rba_reactome_exporter_overview\n\n### ** Examples\n\n## Not run: \n##D rba_reactome_exporter_overview(species = 9606,\n##D output_format = \"svg\",\n##D save_to = \"human_pathways.svg\")\n## End(Not run)\n## Not run: \n##D rba_reactome_exporter_overview(species = 9606,\n##D token = 123456789)\n## End(Not run)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_exporter_reaction","snippet":"### Name: rba_reactome_exporter_reaction\n### Title: Get a Reactome Reaction Event\n### Aliases: rba_reactome_exporter_reaction\n\n### ** Examples\n\n## Not run: \n##D rba_reactome_exporter_diagram(event_id = \"R-HSA-6787403\",\n##D create_document = FALSE)\n## End(Not run)\n## Not run: \n##D rba_reactome_exporter_diagram(event_id = \"R-HSA-6787403\",\n##D output_format = \"svg\",\n##D save_to = \"reactome_reacion_image.svg\")\n## End(Not run)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_interactors_psicquic","snippet":"### Name: rba_reactome_interactors_psicquic\n### Title: The interface From Reactome to PSICQUIC\n### Aliases: rba_reactome_interactors_psicquic\n\n### ** Examples\n\n## No test: \nrba_reactome_interactors_psicquic()\n## End(No test)\n## No test: \nrba_reactome_interactors_psicquic(proteins = c(\"TP53\", \"MYC\"),\n resource = \"BioGrid\",\n details = FALSE)\n## End(No test)\n## No test: \nrba_reactome_interactors_psicquic(proteins = c(\"TP53\", \"MYC\"),\n resource = \"BioGrid\",\n details = TRUE)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_interactors_static","snippet":"### Name: rba_reactome_interactors_static\n### Title: Get Static(IntAct) Interaction Information of a Protein\n### Aliases: rba_reactome_interactors_static\n\n### ** Examples\n\n## No test: \nrba_reactome_interactors_static(proteins = \"Q9BXM7-1\",\n endpoint = \"pathways\", species = \"Homo sapiens\")\n## End(No test)\n## No test: \nrba_reactome_interactors_static(proteins = c(\"Q9BXM7-1\", \"Q13501\"),\n endpoint = \"details\")\n## End(No test)\n## No test: \nrba_reactome_interactors_static(proteins = c(\"Q9BXM7-1\", \"Q13501\"),\n endpoint = \"summary\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_mapping","snippet":"### Name: rba_reactome_mapping\n### Title: Map External ID to Reactome Pathways/Reactions\n### Aliases: rba_reactome_mapping\n\n### ** Examples\n\n## No test: \nrba_reactome_mapping(id = \"PTEN\", resource = \"UniProt\",\n map_to = \"reactions\", species = 9606)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_orthology","snippet":"### Name: rba_reactome_orthology\n### Title: Get Orthologous (Computationally Inferred) Events\n### Aliases: rba_reactome_orthology\n\n### ** Examples\n\n## No test: \nrba_reactome_orthology(event_ids = c(\"R-HSA-6799198\", \" R-HSA-72764\"),\n species_dbid = 49633)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_participant_of","snippet":"### Name: rba_reactome_participant_of\n### Title: Get Larger Reactome Structures Which Include an Entity\n### Aliases: rba_reactome_participant_of\n\n### ** Examples\n\n## No test: \nrba_reactome_participant_of(entity_id = \"R-HSA-199420\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_participants","snippet":"### Name: rba_reactome_participants\n### Title: Get Participants of a Reactome Event\n### Aliases: rba_reactome_participants\n\n### ** Examples\n\n## No test: \nrba_reactome_participants(\"R-HSA-5682012\")\n## End(No test)\n## No test: \nrba_reactome_participants(\"R-HSA-5682012\", only_physical_entities = TRUE)\n## End(No test)\n## No test: \nrba_reactome_participants(\"R-HSA-5682012\", only_reference_entities = TRUE)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_pathways_events","snippet":"### Name: rba_reactome_pathways_events\n### Title: Get Events Contained in an Upstream Events\n### Aliases: rba_reactome_pathways_events\n\n### ** Examples\n\n## No test: \nrba_reactome_pathways_events(event_id = \"R-HSA-5673001\")\n## End(No test)\n## No test: \nrba_reactome_pathways_events(event_id = \"R-HSA-5673001\",\n attribute_name = \"displayName\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_pathways_low","snippet":"### Name: rba_reactome_pathways_low\n### Title: Get lower level pathways Containing a 'Physical Entity' or Event\n### Aliases: rba_reactome_pathways_low\n\n### ** Examples\n\n## No test: \nrba_reactome_pathways_low(entity_id = \"R-HSA-199420\")\n## End(No test)\n## No test: \nrba_reactome_pathways_low(entity_id = \"R-HSA-199420\", with_diagram = TRUE)\n## End(No test)\n## No test: \nrba_reactome_pathways_low(entity_id = \"R-HSA-199420\", with_diagram = TRUE,\n all_forms = TRUE)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_pathways_top","snippet":"### Name: rba_reactome_pathways_top\n### Title: Get Top Level Pathways in a Species\n### Aliases: rba_reactome_pathways_top\n\n### ** Examples\n\n## No test: \nrba_reactome_pathways_top(species = 9606)\n## End(No test)\n## No test: \nrba_reactome_pathways_top(species = \"Saccharomyces cerevisiae\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_people_id","snippet":"### Name: rba_reactome_people_id\n### Title: A person by his identifiers\n### Aliases: rba_reactome_people_id\n\n### ** Examples\n\n## No test: \nrba_reactome_people_id(\"391309\")\n## End(No test)\n## No test: \nrba_reactome_people_id(person_id = \"391309\", authored_pathways = TRUE)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_people_name","snippet":"### Name: rba_reactome_people_name\n### Title: Get Persons Information by Name\n### Aliases: rba_reactome_people_name\n\n### ** Examples\n\n## No test: \nrba_reactome_people_name(\"Jupe\")\n## End(No test)\n## No test: \nrba_reactome_people_name(\"Steve Jupe\", exact_match = TRUE)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_query","snippet":"### Name: rba_reactome_query\n### Title: Query and Retrieve any Reactome knowledge-base Object\n### Aliases: rba_reactome_query\n\n### ** Examples\n\n## No test: \nrba_reactome_query(ids = c(\"8953958\", \"11982506\", \"R-ALL-9649879\"))\n## End(No test)\n## No test: \nrba_reactome_query(ids = \"R-HSA-9656256\", enhanced = TRUE)\n## End(No test)\n## No test: \nrba_reactome_query(ids = \"8863054\", attribute_name = \"displayName\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_species","snippet":"### Name: rba_reactome_species\n### Title: Get Reactome Species\n### Aliases: rba_reactome_species\n\n### ** Examples\n\n## No test: \nrba_reactome_species()\n## End(No test)\n## No test: \nrba_reactome_species(only_main = TRUE)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_version","snippet":"### Name: rba_reactome_version\n### Title: The version number of current database\n### Aliases: rba_reactome_version\n\n### ** Examples\n\n## No test: \nrba_reactome_version()\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_reactome_xref","snippet":"### Name: rba_reactome_xref\n### Title: Map Cross References IDs to Reactome ReferenceEntity\n### Aliases: rba_reactome_xref\n\n### ** Examples\n\n## No test: \nrba_reactome_xref(\"CD40\")\n## End(No test)\n## No test: \nrba_reactome_xref(\"ENSP00000361350\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_string_annotations","snippet":"### Name: rba_string_annotations\n### Title: Retrieving Functional Annotation\n### Aliases: rba_string_annotations\n\n### ** Examples\n\n## No test: \nrba_string_annotations(ids = \"TP53\", species = 9606)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_string_enrichment","snippet":"### Name: rba_string_enrichment\n### Title: Getting Functional Enrichment\n### Aliases: rba_string_enrichment\n\n### ** Examples\n\n## No test: \nrba_string_enrichment(ids = c(\"TP53\", \"TNF\", \"EGFR\"), species = 9606)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_string_enrichment_ppi","snippet":"### Name: rba_string_enrichment_ppi\n### Title: Get Protein-Protein Interaction Enrichment\n### Aliases: rba_string_enrichment_ppi\n\n### ** Examples\n\n## No test: \nrba_string_enrichment_ppi(ids = c(\"p53\", \"BRCA1\", \"cdk2\", \"Q99835\",\n \"CDC42\", \"CDK1\", \"KIF23\", \"PLK1\", \"RAC2\", \"RACGAP1\"),\n species = 9606)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_string_homology_inter","snippet":"### Name: rba_string_homology_inter\n### Title: Get Similarity Scores Hits of Proteins in Different Species\n### Aliases: rba_string_homology_inter\n\n### ** Examples\n\n## No test: \nrba_string_homology_inter(ids = \"p53\",\n species = 9606,\n species_b = 7070)\n## End(No test)\n## No test: \nrba_string_homology_inter(ids = \"ENSP00000269305\", species = 9606)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_string_homology_intra","snippet":"### Name: rba_string_homology_intra\n### Title: Get Similarity Scores Hits of Proteins in a Species\n### Aliases: rba_string_homology_intra\n\n### ** Examples\n\n## No test: \nrba_string_homology_intra(ids = c(\"CDK1\", \"CDK2\"), species = 9606)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_string_interaction_partners","snippet":"### Name: rba_string_interaction_partners\n### Title: Get All STRING Interaction Partners\n### Aliases: rba_string_interaction_partners\n\n### ** Examples\n\n## No test: \nrba_string_interaction_partners(ids = c(\"9606.ENSP00000269305\",\n \"9606.ENSP00000398698\",\n \"9606.ENSP00000275493\"),\n network_type = \"functional\")\n## End(No test)\n## No test: \n rba_string_interaction_partners(ids = \"9606.ENSP00000269305\",\n species = 9606,\n required_score = 700)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_string_interactions_network","snippet":"### Name: rba_string_interactions_network\n### Title: Get STRING Network Interactions\n### Aliases: rba_string_interactions_network\n\n### ** Examples\n\n## No test: \nrba_string_interactions_network(ids = c(\"9606.ENSP00000269305\",\n \"9606.ENSP00000398698\",\n \"9606.ENSP00000275493\"),\n network_type = \"functional\")\n## End(No test)\n## No test: \nrba_string_interactions_network(ids = c(\"9606.ENSP00000269305\",\n \"9606.ENSP00000398698\",\n \"9606.ENSP00000275493\"),\n species = 9606,\n add_nodes = 10)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_string_map_ids","snippet":"### Name: rba_string_map_ids\n### Title: Map a Set of Identifiers to STRING Identifiers\n### Aliases: rba_string_map_ids\n\n### ** Examples\n\n## No test: \nrba_string_map_ids(ids = c(\"TP53\", \"TNF\", \"EGFR\"), species = 9606)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_string_network_image","snippet":"### Name: rba_string_network_image\n### Title: Get STRING Network Image\n### Aliases: rba_string_network_image\n\n### ** Examples\n\n## Not run: \n##D rba_string_network_image(ids = c(\"9606.ENSP00000269305\",\n##D \"9606.ENSP00000398698\",\n##D \"9606.ENSP00000275493\"),\n##D network_type = \"functional\",\n##D save_image = FALSE)\n## End(Not run)\n## Not run: \n##D rba_string_network_image(ids = c(\"TP53\", \"TNF\", \"EGFR\"),\n##D species = 9606,\n##D save_image = TRUE)\n## End(Not run)\n## Not run: \n##D rba_string_network_image(ids = \"9606.ENSP00000269305\",\n##D image_format = \"highres_image\",\n##D save_image = file.path(getwd(), \"TP53_network.png\"))\n## End(Not run)\n\n\n\n"} {"package":"rbioapi","topic":"rba_string_version","snippet":"### Name: rba_string_version\n### Title: Get Current STRING Version\n### Aliases: rba_string_version\n\n### ** Examples\n\n## No test: \nrba_string_version()\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_antigens","snippet":"### Name: rba_uniprot_antigens\n### Title: Get Antigens by UniProt Accession\n### Aliases: rba_uniprot_antigens\n\n### ** Examples\n\n## No test: \nrba_uniprot_antigens(\"P04626\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_antigens_search","snippet":"### Name: rba_uniprot_antigens_search\n### Title: Search Antigens in UniProt\n### Aliases: rba_uniprot_antigens_search\n\n### ** Examples\n\n## No test: \nrba_uniprot_antigens_search(antigen_id = \"HPA001060\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_coordinates","snippet":"### Name: rba_uniprot_coordinates\n### Title: Get Genomic Coordinates of a Protein\n### Aliases: rba_uniprot_coordinates\n\n### ** Examples\n\n## No test: \nrba_uniprot_coordinates(accession = \"P25942\")\n## End(No test)\n## No test: \nrba_uniprot_coordinates(db_type = \"HGNC\", db_id = \"CD40\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_coordinates_location","snippet":"### Name: rba_uniprot_coordinates_location\n### Title: Search UniProt entries by taxonomy and genomic coordinates\n### Aliases: rba_uniprot_coordinates_location\n\n### ** Examples\n\n## No test: \nrba_uniprot_coordinates_location(taxid = 9606,\n locations = \"Y:17100001-19600000\", in_range = TRUE)\n## End(No test)\n## No test: \nrba_uniprot_coordinates_location(taxid = 9606,\n locations = \"20:39000001\", in_range = FALSE)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_coordinates_search","snippet":"### Name: rba_uniprot_coordinates_search\n### Title: Search Genomic Coordinates of UniProt entries\n### Aliases: rba_uniprot_coordinates_search\n\n### ** Examples\n\n## No test: \nrba_uniprot_coordinates_search(taxid = 9606, chromosome = \"y\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_coordinates_sequence","snippet":"### Name: rba_uniprot_coordinates_sequence\n### Title: Get Genome coordinate by Protein Sequence position\n### Aliases: rba_uniprot_coordinates_sequence\n\n### ** Examples\n\n## No test: \nrba_uniprot_coordinates_sequence(accession = \"P25942\", p_position = 1)\n## End(No test)\n## No test: \nrba_uniprot_coordinates_sequence(accession = \"P25942\",\n p_start = 1, p_end = 277)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_features","snippet":"### Name: rba_uniprot_features\n### Title: Get UniProt protein sequence features by accession\n### Aliases: rba_uniprot_features\n\n### ** Examples\n\n## No test: \nrba_uniprot_features(\"Q99616\")\n## End(No test)\n## No test: \nrba_uniprot_features(accession = \"Q99616\", types = \"DISULFID\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_features_search","snippet":"### Name: rba_uniprot_features_search\n### Title: UniProt maintains sequence annotations (features) that describe\n### regions in the protein sequence. Using this function, you can search\n### and retrieve UniProt proteins' sequence annotations (features). you\n### may also refine your search query with variety of modifiers.\n### Aliases: rba_uniprot_features_search\n\n### ** Examples\n\n## No test: \nrba_uniprot_features_search(accession = \"Q99616\")\n## End(No test)\n## No test: \nrba_uniprot_features_search(gene = \"cd40\")\n## End(No test)\n## No test: \nrba_uniprot_features_search(gene = \"cd40 ligand\")\n## End(No test)\n## No test: \nrba_uniprot_features_search(gene = \"cd40\", reviewed = TRUE)\n## End(No test)\n## No test: \nrba_uniprot_features_search(accession = \"Q99616\",\n categories = c(\"MOLECULE_PROCESSING\", \"TOPOLOGY\"))\n## End(No test)\n## No test: \nrba_uniprot_features_search(accession = \"Q99616\", types = \"DISULFID\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_genecentric","snippet":"### Name: rba_uniprot_genecentric\n### Title: Get Gene-Centric proteins by UniProt Accession\n### Aliases: rba_uniprot_genecentric\n\n### ** Examples\n\n## No test: \nrba_uniprot_genecentric(\"P29965\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_genecentric_search","snippet":"### Name: rba_uniprot_genecentric_search\n### Title: Search Gene-Centric Proteins\n### Aliases: rba_uniprot_genecentric_search\n\n### ** Examples\n\n## No test: \nrba_uniprot_genecentric_search(accession = \"P59594\")\n## End(No test)\n## No test: \nrba_uniprot_genecentric_search(gene = \"Spike\")\n## End(No test)\n## No test: \nrba_uniprot_genecentric_search(upid = \"UP000000354\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_mutagenesis","snippet":"### Name: rba_uniprot_mutagenesis\n### Title: Get Mutagenesis by UniProt Accession\n### Aliases: rba_uniprot_mutagenesis\n\n### ** Examples\n\n## No test: \nrba_uniprot_mutagenesis(accession = \"P0DTC2\", location = \"300-400\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_mutagenesis_search","snippet":"### Name: rba_uniprot_mutagenesis_search\n### Title: Search Mutagenesis in UniProt\n### Aliases: rba_uniprot_mutagenesis_search\n\n### ** Examples\n\n## No test: \n#search all mutations in COVID19 proteins\nrba_uniprot_mutagenesis_search(taxid = 2697049)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_proteins","snippet":"### Name: rba_uniprot_proteins\n### Title: Get UniProt entry by accession\n### Aliases: rba_uniprot_proteins\n\n### ** Examples\n\n## No test: \nrba_uniprot_proteins(accession = \"P01730\")\n## End(No test)\n## No test: \nrba_uniprot_proteins(accession = \"P01730\", interaction = TRUE)\n## End(No test)\n## No test: \nrba_uniprot_proteins(accession = \"Q29983\", isoforms = TRUE)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_proteins_crossref","snippet":"### Name: rba_uniprot_proteins_crossref\n### Title: Get UniProt Entry by UniProt Cross-Reference Database and ID\n### Aliases: rba_uniprot_proteins_crossref\n\n### ** Examples\n\n## No test: \nrba_uniprot_proteins_crossref(\"cd40\", \"hgnc\")\n## End(No test)\n## No test: \nrba_uniprot_proteins_crossref(\"cd40\", \"hgnc\", reviewed = TRUE)\n## End(No test)\n## No test: \nrba_uniprot_proteins_crossref(\"mica\", \"hgnc\", isoform = 0)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_proteins_search","snippet":"### Name: rba_uniprot_proteins_search\n### Title: Search UniProt entries\n### Aliases: rba_uniprot_proteins_search\n\n### ** Examples\n\n## No test: \nrba_uniprot_proteins_search(accession = \"Q99616\")\n## End(No test)\n## No test: \nrba_uniprot_proteins_search(gene = \"cd40\")\n## End(No test)\n## No test: \nrba_uniprot_proteins_search(gene = \"cd40 ligand\")\n## End(No test)\n## No test: \nrba_uniprot_proteins_search(gene = \"cd40\", reviewed = TRUE)\n## End(No test)\n## No test: \nrba_uniprot_proteins_search(gene = \"cd40\", reviewed = TRUE, isoform = 1)\n## End(No test)\n## No test: \nrba_uniprot_proteins_search(keyword = \"Inhibition of host chemokines by virus\")\n## End(No test)\n## No test: \nrba_uniprot_proteins_search(keyword = \"chemokines\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_proteomes","snippet":"### Name: rba_uniprot_proteomes\n### Title: Get proteome by proteome/proteins UPID\n### Aliases: rba_uniprot_proteomes\n\n### ** Examples\n\n## No test: \nrba_uniprot_proteomes(upid = \"UP000000354\")\n## End(No test)\n## No test: \nrba_uniprot_proteomes(upid = \"UP000000354\", get_proteins = TRUE)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_proteomes_search","snippet":"### Name: rba_uniprot_proteomes_search\n### Title: Search Proteomes in UniProt\n### Aliases: rba_uniprot_proteomes_search\n\n### ** Examples\n\n## No test: \nrba_uniprot_proteomes_search(name = \"SARS-CoV\")\n## End(No test)\n## No test: \nrba_uniprot_proteomes_search(name = \"SARS-CoV\", is_ref_proteome = TRUE)\n## End(No test)\n## No test: \nrba_uniprot_proteomes_search(name = \"SARS-CoV\", is_ref_proteome = TRUE)\n## End(No test)\n## No test: \nrba_uniprot_proteomes_search(genome_acc = \"AY274119\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_proteomics","snippet":"### Name: rba_uniprot_proteomics\n### Title: Get Proteomics Peptides Mapped to UniProt Protein\n### Aliases: rba_uniprot_proteomics\n\n### ** Examples\n\n## No test: \nrba_uniprot_proteomics(accession = \"P25942\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_proteomics_search","snippet":"### Name: rba_uniprot_proteomics_search\n### Title: Search Proteomics Peptides in UniProt\n### Aliases: rba_uniprot_proteomics_search\n\n### ** Examples\n\n## No test: \nrba_uniprot_proteomics_search(peptide = \"MEDYTKIEK\")\n## End(No test)\n## No test: \nrba_uniprot_proteomics_search(peptide = \"MEDYTKIEK\")\n## End(No test)\n## Not run: \n##D ### this will generate a very large response!\n##D rba_uniprot_proteomics_search(taxid = 9606,\n##D data_source = \"PeptideAtlas\",\n##D progress = TRUE, timeout = 999999, unique = TRUE)\n## End(Not run)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_ptm","snippet":"### Name: rba_uniprot_ptm\n### Title: Get Post-Translational Modification of UniProt Protein\n### Aliases: rba_uniprot_ptm\n\n### ** Examples\n\n## No test: \nrba_uniprot_ptm(accession = \"P04234\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_ptm_search","snippet":"### Name: rba_uniprot_ptm_search\n### Title: Search Post-Translational Modification in UniProt\n### Aliases: rba_uniprot_ptm_search\n\n### ** Examples\n\n## No test: \nrba_uniprot_ptm_search(peptide = \"NDQVYQPLRDRDDAQYSHLGGNWAR\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_taxonomy","snippet":"### Name: rba_uniprot_taxonomy\n### Title: Get UniProt Taxonomy Nodes\n### Aliases: rba_uniprot_taxonomy\n\n### ** Examples\n\n## No test: \nrba_uniprot_taxonomy(ids = c(9606, 10090))\n## End(No test)\n## No test: \nrba_uniprot_taxonomy(ids = 9989, hierarchy = \"children\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_taxonomy_lca","snippet":"### Name: rba_uniprot_taxonomy_lca\n### Title: Get Lowest Common Ancestor (LCA) of Two Taxonomy Nodes\n### Aliases: rba_uniprot_taxonomy_lca\n\n### ** Examples\n\n## No test: \nrba_uniprot_taxonomy_lca(c(9606,10090,9823,7712))\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_taxonomy_lineage","snippet":"### Name: rba_uniprot_taxonomy_lineage\n### Title: Get Taxonomic Lineage\n### Aliases: rba_uniprot_taxonomy_lineage\n\n### ** Examples\n\n## No test: \nrba_uniprot_taxonomy_lineage(id = 9989)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_taxonomy_name","snippet":"### Name: rba_uniprot_taxonomy_name\n### Title: Search UniProt Taxonomic Names\n### Aliases: rba_uniprot_taxonomy_name\n\n### ** Examples\n\n## No test: \nrba_uniprot_taxonomy_name(name = \"homo\", field = \"scientific\",\n search_type = \"start_with\")\n## End(No test)\n## No test: \nrba_uniprot_taxonomy_name(name = \"adenovirus\", field = \"scientific\",\n search_type = \"contain\", page_size = 200, page_number = 2)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_taxonomy_path","snippet":"### Name: rba_uniprot_taxonomy_path\n### Title: Traverse UniProt Taxonomic Tree Path\n### Aliases: rba_uniprot_taxonomy_path\n\n### ** Examples\n\n## No test: \nrba_uniprot_taxonomy_path(id = 9606, direction = \"TOP\", depth = 3)\n## End(No test)\n## No test: \nrba_uniprot_taxonomy_path(id = 207598, direction = \"BOTTOM\", depth = 3)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_taxonomy_relationship","snippet":"### Name: rba_uniprot_taxonomy_relationship\n### Title: Get Shortest Path Between Two Taxonomy Nodes\n### Aliases: rba_uniprot_taxonomy_relationship\n\n### ** Examples\n\n## No test: \nrba_uniprot_taxonomy_relationship(from = 9606, to = 10090)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_uniparc","snippet":"### Name: rba_uniprot_uniparc\n### Title: Get UniParc entry\n### Aliases: rba_uniprot_uniparc\n\n### ** Examples\n\n## No test: \nrba_uniprot_uniparc(upi = \"UPI00000000C9\")\n## End(No test)\n## No test: \nrba_uniprot_uniparc(upi = \"UPI00000000C9\")\n## End(No test)\n## No test: \nrba_uniprot_uniparc(upi = \"UPI00000000C9\", rf_active = FALSE)\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_uniparc_bestguess","snippet":"### Name: rba_uniprot_uniparc_bestguess\n### Title: Get UniParc Longest Sequence for Entries\n### Aliases: rba_uniprot_uniparc_bestguess\n\n### ** Examples\n\n## No test: \nrba_uniprot_uniparc_bestguess(\"UPI00000000C9\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_uniparc_search","snippet":"### Name: rba_uniprot_uniparc_search\n### Title: Search UniParc Entries\n### Aliases: rba_uniprot_uniparc_search\n\n### ** Examples\n\n## No test: \nrba_uniprot_uniparc_search(upi = \"UPI00000000C9\")\n## End(No test)\n## No test: \nrba_uniprot_uniparc_search(accession = \"P30914\")\n## End(No test)\n## No test: \nrba_uniprot_uniparc_search(accession = \"P30914\", rf_active = TRUE)\n## End(No test)\n## No test: \nrba_uniprot_uniparc_search(taxid = \"694009\", protein = \"Nucleoprotein\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_uniparc_sequence","snippet":"### Name: rba_uniprot_uniparc_sequence\n### Title: Get UniParc Entries by Sequence\n### Aliases: rba_uniprot_uniparc_sequence\n\n### ** Examples\n\n## No test: \nrba_uniprot_uniparc_sequence(\"GMRSCPRGCSQRGRCENGRCVCNPGYTGEDC\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_variation","snippet":"### Name: rba_uniprot_variation\n### Title: Get natural variants in UniProt by NIH-NCBI SNP database\n### identifier\n### Aliases: rba_uniprot_variation\n\n### ** Examples\n\n## No test: \nrba_uniprot_variation(id = \"rs121434451\", id_type = \"dbsnp\")\n## End(No test)\n## No test: \nrba_uniprot_variation(id = \"NC_000008.11:g.22119227C>T\", id_type = \"hgvs\")\n## End(No test)\n## No test: \nrba_uniprot_variation(id = \"O43593\", id_type = \"uniprot\")\n## End(No test)\n\n\n\n"} {"package":"rbioapi","topic":"rba_uniprot_variation_search","snippet":"### Name: rba_uniprot_variation_search\n### Title: Search UniProt Natural Variants\n### Aliases: rba_uniprot_variation_search\n\n### ** Examples\n\n## No test: \nrba_uniprot_variation_search(accession = \"P05067\")\n## End(No test)\n## No test: \nrba_uniprot_variation_search(disease = \"alzheimer disease, 18\")\n## End(No test)\n## No test: \nrba_uniprot_variation_search(disease = \"alzheimer\",\n wild_type = \"A\", alternative_sequence = \"T\")\n## End(No test)\n\n\n\n"} {"package":"TRexSelector","topic":"FDP","snippet":"### Name: FDP\n### Title: False discovery proportion (FDP)\n### Aliases: FDP\n\n### ** Examples\n\ndata(\"Gauss_data\")\nX <- Gauss_data$X\ny <- c(Gauss_data$y)\nbeta <- Gauss_data$beta\n\nset.seed(1234)\nres <- trex(X, y)\nbeta_hat <- res$selected_var\n\nFDP(beta_hat = beta_hat, beta = beta)\n\n\n"} {"package":"TRexSelector","topic":"Gauss_data","snippet":"### Name: Gauss_data\n### Title: Toy data generated from a Gaussian linear model\n### Aliases: Gauss_data\n### Keywords: datasets\n\n### ** Examples\n\n# Generated as follows:\nset.seed(789)\nn <- 50\np <- 100\nX <- matrix(stats::rnorm(n * p), nrow = n, ncol = p)\nbeta <- c(rep(5, times = 3), rep(0, times = 97))\nsupport <- beta > 0\ny <- X %*% beta + stats::rnorm(n)\nGauss_data <- list(\n X = X,\n y = y,\n beta = beta,\n support = support\n)\n\n\n"} {"package":"TRexSelector","topic":"TPP","snippet":"### Name: TPP\n### Title: True positive proportion (TPP)\n### Aliases: TPP\n\n### ** Examples\n\ndata(\"Gauss_data\")\nX <- Gauss_data$X\ny <- c(Gauss_data$y)\nbeta <- Gauss_data$beta\n\nset.seed(1234)\nres <- trex(X, y)\nbeta_hat <- res$selected_var\n\nTPP(beta_hat = beta_hat, beta = beta)\n\n\n"} {"package":"TRexSelector","topic":"add_dummies","snippet":"### Name: add_dummies\n### Title: Add dummy predictors to the original predictor matrix\n### Aliases: add_dummies\n\n### ** Examples\n\nset.seed(123)\nn <- 50\np <- 100\nX <- matrix(stats::rnorm(n * p), nrow = n, ncol = p)\nadd_dummies(X = X, num_dummies = p)\n\n\n"} {"package":"TRexSelector","topic":"add_dummies_GVS","snippet":"### Name: add_dummies_GVS\n### Title: Add dummy predictors to the original predictor matrix, as\n### required by the T-Rex+GVS selector\n### Aliases: add_dummies_GVS\n\n### ** Examples\n\nset.seed(123)\nn <- 50\np <- 100\nX <- matrix(stats::rnorm(n * p), nrow = n, ncol = p)\nadd_dummies_GVS(X = X, num_dummies = p)\n\n\n"} {"package":"TRexSelector","topic":"lm_dummy","snippet":"### Name: lm_dummy\n### Title: Perform one random experiment\n### Aliases: lm_dummy\n\n### ** Examples\n\nset.seed(123)\neps <- .Machine$double.eps\nn <- 75\np <- 100\nX <- matrix(stats::rnorm(n * p), nrow = n, ncol = p)\nbeta <- c(rep(3, times = 3), rep(0, times = 97))\ny <- X %*% beta + rnorm(n)\nres <- lm_dummy(X = X, y = y, T_stop = 1, num_dummies = 5 * p)\nbeta_hat <- res$get_beta()[seq(p)]\nsupport <- abs(beta_hat) > eps\nsupport\n\n\n"} {"package":"TRexSelector","topic":"random_experiments","snippet":"### Name: random_experiments\n### Title: Run K random experiments\n### Aliases: random_experiments\n\n### ** Examples\n\nset.seed(123)\ndata(\"Gauss_data\")\nX <- Gauss_data$X\ny <- c(Gauss_data$y)\nres <- random_experiments(X = X, y = y)\nrelative_occurrences_matrix <- res$phi_T_mat\nrelative_occurrences_matrix\n\n\n"} {"package":"TRexSelector","topic":"trex","snippet":"### Name: trex\n### Title: Run the T-Rex selector\n### Aliases: trex\n\n### ** Examples\n\ndata(\"Gauss_data\")\nX <- Gauss_data$X\ny <- c(Gauss_data$y)\nset.seed(1234)\nres <- trex(X = X, y = y)\nselected_var <- res$selected_var\nselected_var\n\n\n"} {"package":"eegkitdata","topic":"eegdata","snippet":"### Name: eegdata\n### Title: EEG Data from Alcoholic and Control Subjects\n### Aliases: eegdata\n### Keywords: datasets\n\n### ** Examples\n\n# see examples for eegtime, eegspace, eegica, and eegsmooth (in package eegkit)\n\n# example code to create eegdata (not run):\n# #(1)# download and untar SMNI_CMI_TRAIN.tar.gz file from UCI:\n# # http://archive.ics.uci.edu/ml/machine-learning-databases/eeg-mld/\n# #(2)# eegdata=geteegdata(indir=\"/Users/Nate/Downloads/SMNI_CMI_TRAIN/\",nt=5)\n\n\n"} {"package":"eegkitdata","topic":"eegkitdata-package","snippet":"### Name: eegkitdata-package\n### Title: Electroencephalography Toolkit Datasets\n### Aliases: eegkitdata-package eegkitdata\n### Keywords: package\n\n### ** Examples\n\n# See examples for eegcap, eegtime, eegspace, eegica, and eegsmooth (in package eegkit)\n\n\n"} {"package":"eegkitdata","topic":"geteegdata","snippet":"### Name: geteegdata\n### Title: Create Data Matrix from UCI EEG Database\n### Aliases: geteegdata\n\n### ** Examples\n\n########## EXAMPLE 1: UCI TRAIN DATA (not run) ##########\n\n# Note: you need to change 'indir' and 'outdir' in Steps 2-4\n\n# #(1)# download and untar SMNI_CMI_TRAIN.tar.gz file from UCI:\n# # # http://archive.ics.uci.edu/ml/machine-learning-databases/eeg-mld/\n\n##### for Unix/Mac #####\n\n# #(2)# extract condition \"S1\" and save as .rda\n# eegS1=geteegdata(indir=\"/Users/Nate/Downloads/SMNI_CMI_TRAIN/\",\n# cond=\"S1\",filename=\"eegtrainS1\")\n \n# #(3)# extract condition \"S2m\" and save as .rda\n# eegS2m=geteegdata(indir=\"/Users/Nate/Downloads/SMNI_CMI_TRAIN/\",\n# cond=\"S2m\",filename=\"eegtrainS2m\")\n \n# #(4)# extract condition \"S2n\" and save as .rda\n# eegS2n=geteegdata(indir=\"/Users/Nate/Downloads/SMNI_CMI_TRAIN/\",\n# cond=\"S2n\",filename=\"eegtrainS2n\")\n\n# #(5)# combine conditions\n# eegdata=rbind(eegS1,eegS2m,eegS2n)\n\n##### for Windows #####\n\n# #(2)# extract condition \"S1\" and save as .rda\n# eegS1=geteegdata(indir=\"C:/Users/Nate/Downloads/SMNI_CMI_TRAIN/\",\n# cond=\"S1\",filename=\"eegtrainS1\")\n \n# #(3)# extract condition \"S2m\" and save as .rda\n# eegS2m=geteegdata(indir=\"C:/Users/Nate/Downloads/SMNI_CMI_TRAIN/\",\n# cond=\"S2m\",filename=\"eegtrainS2m\")\n \n# #(4)# extract condition \"S2n\" and save as .rda\n# eegS2n=geteegdata(indir=\"C:/Users/Nate/Downloads/SMNI_CMI_TRAIN/\",\n# cond=\"S2n\",filename=\"eegtrainS2n\")\n\n# #(5)# combine conditions\n# eegdata=rbind(eegS1,eegS2m,eegS2n)\n\n\n########## EXAMPLE 2: UCI TEST DATA (not run) ##########\n\n# # Note: you need to change 'indir' and 'outdir' in Steps 2 and 3\n\n# #(1)# download and untar SMNI_CMI_TEST.tar.gz file from UCI:\n# # # http://archive.ics.uci.edu/ml/machine-learning-databases/eeg-mld/\n\n##### for Unix/Mac #####\n\n# #(2)# extract condition \"S1\" and save as .rda\n# eegS1=geteegdata(indir=\"/Users/Nate/Downloads/SMNI_CMI_TEST/\",\n# cond=\"S1\",filename=\"eegtestS1\")\n \n# #(3)# extract condition \"S2m\" and save as .rda\n# eegS2m=geteegdata(indir=\"/Users/Nate/Downloads/SMNI_CMI_TEST/\",\n# cond=\"S2m\",filename=\"eegtestS2m\")\n \n# #(4)# extract condition \"S2n\" and save as .rda\n# eegS2n=geteegdata(indir=\"/Users/Nate/Downloads/SMNI_CMI_TEST/\",\n# cond=\"S2n\",filename=\"eegtestS2n\")\n\n# #(5)# combine conditions\n# eegdata=rbind(eegS1,eegS2m,eegS2n)\n\n##### for Windows #####\n\n# #(2)# extract condition \"S1\" and save as .rda\n# eegS1=geteegdata(indir=\"C:/Users/Nate/Downloads/SMNI_CMI_TEST/\",\n# cond=\"S1\",filename=\"eegtestS1\")\n \n# #(3)# extract condition \"S2m\" and save as .rda\n# eegS2m=geteegdata(indir=\"C:/Users/Nate/Downloads/SMNI_CMI_TEST/\",\n# cond=\"S2m\",filename=\"eegtestS2m\")\n \n# #(4)# extract condition \"S2n\" and save as .rda\n# eegS2n=geteegdata(indir=\"C:/Users/Nate/Downloads/SMNI_CMI_TEST/\",\n# cond=\"S2n\",filename=\"eegtestS2n\")\n\n# #(5)# combine conditions\n# eegdata=rbind(eegS1,eegS2m,eegS2n)\n\n\n########## EXAMPLE 3: UCI FULL DATA (not run) ##########\n\n# #(1)# download and untar eeg_full.tar file from UCI:\n# # # http://archive.ics.uci.edu/ml/machine-learning-databases/eeg-mld/\n\n##### for Unix/Mac #####\n\n# #(2)# extract condition \"S1\" and save as .rda\n# eegS1=geteegdata(indir=\"/Users/Nate/Downloads/eeg_full/\",\n# cond=\"S1\",filename=\"eegfullS1\")\n \n# #(3)# extract condition \"S2m\" and save as .rda\n# eegS2m=geteegdata(indir=\"/Users/Nate/Downloads/eeg_full/\",\n# cond=\"S2m\",filename=\"eegfullS2m\")\n \n# #(4)# extract condition \"S2n\" and save as .rda\n# eegS2n=geteegdata(indir=\"/Users/Nate/Downloads/eeg_full/\",\n# cond=\"S2n\",filename=\"eegfullS2n\")\n\n# #(5)# combine conditions\n# eegdata=rbind(eegS1,eegS2m,eegS2n)\n\n##### for Windows #####\n\n# #(2)# extract all conditions and save as .rda (default use)\n# eegS1=geteegdata(indir=\"C:/Users/Nate/Downloads/eeg_full/\",\n# cond=\"S1\",filename=\"eegfullS1\")\n \n# #(3)# extract condition \"S2m\" and save as .rda\n# eegS2m=geteegdata(indir=\"C:/Users/Nate/Downloads/eeg_full/\",\n# cond=\"S2m\",filename=\"eegfullS2m\")\n \n# #(4)# extract condition \"S2n\" and save as .rda\n# eegS2n=geteegdata(indir=\"C:/Users/Nate/Downloads/eeg_full/\",\n# cond=\"S2n\",filename=\"eegfullS2n\")\n\n# #(5)# combine conditions\n# eegdata=rbind(eegS1,eegS2m,eegS2n)\n\n\n"} {"package":"ezmmek","topic":"new_ezmmek_act_calibrate","snippet":"### Name: new_ezmmek_act_calibrate\n### Title: new_ezmmek_act_calibrate\n### Aliases: new_ezmmek_act_calibrate\n\n### ** Examples\n\n## Not run: \n##D new_obj <- new_ezmmek_act_calibrate(\"data/tyson_std_04172020.csv\",\n##D \"data/tyson_sat_steen_04172020.csv\",\n##D site_name,\n##D std_type,\n##D method = \"isc\",\n##D columns = NULL)\n##D new_obj <- new_ezmmek_act_calibrate(\"data/tyson_std_04172020.csv\",\n##D \"data/tyson_sat_german_04172020.csv\",\n##D site_name,\n##D std_type,\n##D method = \"ibc\",\n##D columns = NULL)\n## End(Not run)\n\n\n"} {"package":"ezmmek","topic":"new_ezmmek_act_group","snippet":"### Name: new_ezmmek_act_group\n### Title: new_ezmmek_act_group\n### Aliases: new_ezmmek_act_group\n\n### ** Examples\n\n## Not run: \n##D new_obj <- new_ezmmek_act_group(\"data/tyson_sat_steen_04172020.csv,\n##D site_name,\n##D std_type,\n##D method = \"isc\",\n##D columns = NULL)\n##D new_obj <- new_ezmmek_act_group(\"data/tyson_sat_german_04172020.csv,\n##D site_name,\n##D std_type,\n##D method = \"ibc\",\n##D columns = NULL)\n## End(Not run)\n\n\n"} {"package":"ezmmek","topic":"new_ezmmek_sat_fit","snippet":"### Name: new_ezmmek_sat_fit\n### Title: new_ezmmek_sat_fit\n### Aliases: new_ezmmek_sat_fit\n\n### ** Examples\n\n## Not run: \n##D new_obj <- new_ezmmek_sat_fit(\"data/tyson_std_04172020.csv\",\n##D \"data/tyson_sat_steen_04172020.csv\",\n##D site_name,\n##D std_type,\n##D km = NULL,\n##D vmax = NULL,\n##D method = \"isc\")\n##D new_obj <- new_ezmmek_sat_fit(\"data/tyson_std_04172020.csv\",\n##D \"data/tyson_sat_german_04172020.csv\",\n##D site_name,\n##D std_type,\n##D km = NULL,\n##D vmax = NULL,\n##D method = \"ibc\")\n## End(Not run)\n\n\n"} {"package":"ezmmek","topic":"new_ezmmek_std_group","snippet":"### Name: new_ezmmek_std_group\n### Title: new_ezmmek_std_group\n### Aliases: new_ezmmek_std_group\n\n### ** Examples\n\n## Not run: \n##D new_obj <- new_ezmmek_std_group(\"data/tyson_std_04172020.csv\",\n##D site_name,\n##D std_type,\n##D method = \"isc\",\n##D columns = NULL)\n##D new_obj <- new_ezmmek_std_group(\"data/tyson_std_04172020.csv\",\n##D site_name,\n##D std_type,\n##D method = \"ibc\",\n##D columns = NULL)\n## End(Not run)\n\n\n"} {"package":"ezmmek","topic":"plot.new_ezmmek_act_group","snippet":"### Name: plot.new_ezmmek_act_group\n### Title: plot_new_ezmmek_act_group\n### Aliases: plot.new_ezmmek_act_group\n\n### ** Examples\n\n## Not run: \n##D plot.new_ezmmek_act_group(new_ezmmek_act_group_obj,\n##D site_name,\n##D std_type)\n## End(Not run)\n\n\n"} {"package":"ezmmek","topic":"plot.new_ezmmek_calibrate","snippet":"### Name: plot.new_ezmmek_calibrate\n### Title: plot_new_ezmmek_calibrate\n### Aliases: plot.new_ezmmek_calibrate\n\n### ** Examples\n\n## Not run: \n##D plot.new_ezmmek_calibrate(new_ezmmek_calibrate_obj,\n##D site_name,\n##D std_type)\n## End(Not run)\n\n\n"} {"package":"ezmmek","topic":"plot.new_ezmmek_sat_fit","snippet":"### Name: plot.new_ezmmek_sat_fit\n### Title: plot_new_ezmmek_sat_fit\n### Aliases: plot.new_ezmmek_sat_fit\n\n### ** Examples\n\n## Not run: \n##D plot.new_ezmmek_act_group(new_ezmmek_sat_fit_obj,\n##D site_name,\n##D stdy_type)\n## End(Not run)\n\n\n"} {"package":"ezmmek","topic":"plot.new_ezmmek_std_group","snippet":"### Name: plot.new_ezmmek_std_group\n### Title: plot_new_ezmmek_std_group\n### Aliases: plot.new_ezmmek_std_group\n\n### ** Examples\n\n## Not run: \n##D plot.new_ezmmek_std_group(new_ezmmek_std_group_obj,\n##D site_name,\n##D std_type)\n## End(Not run)\n\n\n"} {"package":"WeightedROC","topic":"WeightedAUC","snippet":"### Name: WeightedAUC\n### Title: WeightedAUC\n### Aliases: WeightedAUC\n\n### ** Examples\n\n\nlibrary(WeightedROC)\n## Compute the AUC for this weighted data set.\ny <- c(0, 0, 1, 1, 1)\nw <- c(1, 1, 1, 4, 5)\ny.hat <- c(1, 2, 3, 1, 1)\ntp.fp <- WeightedROC(y.hat, y, w)\n(wauc <- WeightedAUC(tp.fp))\n\n## For the un-weighted ROCR example data set, verify that our AUC is\n## the same as that of ROCR/pROC.\nif(require(microbenchmark) && require(ROCR) && require(pROC)){\n data(ROCR.simple, envir=environment())\n microbenchmark(WeightedROC={\n tp.fp <- with(ROCR.simple, WeightedROC(predictions, labels))\n wroc <- WeightedAUC(tp.fp)\n }, ROCR={\n pred <- with(ROCR.simple, prediction(predictions, labels))\n rocr <- performance(pred, \"auc\")@y.values[[1]]\n }, pROC={\n proc <- pROC::auc(labels ~ predictions, ROCR.simple, algorithm=2)\n }, times=10)\n rbind(WeightedROC=wroc, ROCR=rocr, pROC=proc) #same\n}\n\n## For the un-weighted pROC example data set, verify that our AUC is\n## the same as that of ROCR/pROC.\ndata(aSAH, envir=environment())\ntable(aSAH$s100b)\nif(require(microbenchmark)){\n microbenchmark(WeightedROC={\n tp.fp <- with(aSAH, WeightedROC(s100b, outcome))\n wroc <- WeightedAUC(tp.fp)\n }, ROCR={\n pred <- with(aSAH, prediction(s100b, outcome))\n rocr <- performance(pred, \"auc\")@y.values[[1]]\n }, pROC={\n proc <- pROC::auc(outcome ~ s100b, aSAH, algorithm=2)\n }, times=10)\n rbind(WeightedROC=wroc, ROCR=rocr, pROC=proc)\n}\n\n\n\n"} {"package":"WeightedROC","topic":"WeightedROC","snippet":"### Name: WeightedROC\n### Title: WeightedROC\n### Aliases: WeightedROC\n\n### ** Examples\n\n\n## WeightedROC can compute ROC curves for data sets with variable\n## weights.\nlibrary(WeightedROC)\ny <- c(-1, -1, 1, 1, 1)\nw <- c(1, 1, 1, 4, 5)\ny.hat <- c(1, 2, 3, 1, 1)\ntp.fp <- WeightedROC(y.hat, y, w)\nif(require(ggplot2)){\n gg <- ggplot()+\n geom_path(aes(FPR, TPR), data=tp.fp)+\n coord_equal()\n print(gg)\n}else{\n plot(TPR~FPR, tp.fp, type=\"l\")\n}\n\n## The FN/FP columns can be used to plot weighted error as a\n## function of threshold.\nerror.fun.list <- list(\n FN=function(df)df$FN,\n FP=function(df)df$FP,\n errors=function(df)with(df, FP+FN)\n )\nall.error.list <- list()\nfor(error.type in names(error.fun.list)){\n error.fun <- error.fun.list[[error.type]]\n all.error.list[[error.type]] <-\n data.frame(tp.fp, error.type, weighted.error=error.fun(tp.fp))\n}\nall.error <- do.call(rbind, all.error.list)\nfp.fn.colors <- c(FP=\"skyblue\",\n FN=\"#E41A1C\",\n errors=\"black\")\nggplot()+\n scale_color_manual(values=fp.fn.colors)+\n geom_line(aes(threshold, weighted.error, color=error.type),\n data=all.error)\n\nif(require(microbenchmark) && require(ROCR) && require(pROC)){\n \n data(ROCR.simple, envir=environment())\n ## Compare speed and plot ROC curves for the ROCR example data set.\n microbenchmark(WeightedROC={\n tp.fp <- with(ROCR.simple, WeightedROC(predictions, labels))\n }, ROCR={\n pred <- with(ROCR.simple, prediction(predictions, labels))\n perf <- performance(pred, \"tpr\", \"fpr\")\n }, pROC.1={\n proc <- roc(labels ~ predictions, ROCR.simple, algorithm=1)\n }, pROC.2={\n proc <- roc(labels ~ predictions, ROCR.simple, algorithm=2)\n }, pROC.3={\n proc <- roc(labels ~ predictions, ROCR.simple, algorithm=3)\n }, times=10)\n perfDF <- function(p){\n data.frame(FPR=p@x.values[[1]], TPR=p@y.values[[1]], package=\"ROCR\")\n }\n procDF <- function(p){\n data.frame(FPR=1-p$specificities, TPR=p$sensitivities, package=\"pROC\")\n }\n roc.curves <- rbind(\n data.frame(tp.fp[, c(\"FPR\", \"TPR\")], package=\"WeightedROC\"),\n perfDF(perf),\n procDF(proc))\n ggplot()+\n geom_path(aes(FPR, TPR, color=package, linetype=package),\n data=roc.curves, size=1)+\n coord_equal()\n \n ## Compare speed and plot ROC curves for the pROC example data set.\n data(aSAH, envir=environment())\n microbenchmark(WeightedROC={\n tp.fp <- with(aSAH, WeightedROC(s100b, outcome))\n }, ROCR={\n pred <- with(aSAH, prediction(s100b, outcome))\n perf <- performance(pred, \"tpr\", \"fpr\")\n }, pROC.1={\n proc <- roc(outcome ~ s100b, aSAH, algorithm=1)\n }, pROC.2={\n proc <- roc(outcome ~ s100b, aSAH, algorithm=2)\n }, pROC.3={\n proc <- roc(outcome ~ s100b, aSAH, algorithm=3)\n }, times=10)\n roc.curves <- rbind(\n data.frame(tp.fp[, c(\"FPR\", \"TPR\")], package=\"WeightedROC\"),\n perfDF(perf),\n procDF(proc))\n ggplot()+\n geom_path(aes(FPR, TPR, color=package, linetype=package),\n data=roc.curves, size=1)+\n coord_equal()\n \n ## Compute a small ROC curve with 1 tie to show the diagonal.\n y <- c(-1, -1, 1, 1)\n y.hat <- c(1, 2, 3, 1)\n microbenchmark(WeightedROC={\n tp.fp <- WeightedROC(y.hat, y)\n }, ROCR={\n pred <- prediction(y.hat, y)\n perf <- performance(pred, \"tpr\", \"fpr\")\n }, pROC.1={\n proc <- roc(y ~ y.hat, algorithm=1)\n }, pROC.2={\n proc <- roc(y ~ y.hat, algorithm=2)\n }, pROC.3={\n proc <- roc(y ~ y.hat, algorithm=3)\n }, times=10)\n roc.curves <- rbind(\n data.frame(tp.fp[, c(\"FPR\", \"TPR\")], package=\"WeightedROC\"),\n perfDF(perf),\n procDF(proc))\n ggplot()+\n geom_path(aes(FPR, TPR, color=package, linetype=package),\n data=roc.curves, size=1)+\n coord_equal()\n\n}\n\n\n\n"} {"package":"rocTree","topic":"plot.rocTree","snippet":"### Name: plot.rocTree\n### Title: Plotting an 'rocTree' object\n### Aliases: plot.rocTree\n\n### ** Examples\n\n## Not run: \n##D data(simDat)\n##D fit <- rocTree(Surv(Time, death) ~ z1 + z2, id = id, data = simDat, ensemble = FALSE)\n##D ## Plot tree\n##D plot(fit)\n##D ## Plot survival estimates at terminal nodes\n##D plot(fit, type = \"survival\")\n##D ## Plot hazard estimates at terminal nodes\n##D plot(fit, type = \"haz\")\n## End(Not run)\n\n\n"} {"package":"rocTree","topic":"predict.rocTree","snippet":"### Name: predict.rocTree\n### Title: Predicting based on a 'rocTree' model.\n### Aliases: predict.rocTree\n\n### ** Examples\n\ndata(simDat)\nfit <- rocTree(Surv(Time, death) ~ z1 + z2, id = id, data = simDat, ensemble = FALSE)\n\n## testing data\nnewdat <- data.frame(Time = sort(unique(simDat$Time)), \n z2 = runif(1))\nnewdat$z1 <- 1 * (newdat$Time < median(newdat$Time))\nhead(newdat)\n\n## Predict survival \npred <- predict(fit, newdat)\nplot(pred)\n\n## Predict hazard\npred <- predict(fit, newdat, type = \"hazard\")\nplot(pred)\n\n\n"} {"package":"rocTree","topic":"print.rocTree","snippet":"### Name: print.rocTree\n### Title: Printing an 'rocTree' object\n### Aliases: print.rocTree\n\n### ** Examples\n\ndata(simDat)\n\n## Fitting a pruned survival tree\nrocTree(Surv(Time, death) ~ z1 + z2, id = id, data = simDat, ensemble = FALSE)\n\n## Fitting a unpruned survival tree\nrocTree(Surv(Time, death) ~ z1 + z2, id = id, data = simDat, ensemble = FALSE,\n control = list(numFold = 0))\n\n## Not run: \n##D ## Fitting the ensemble algorithm (default)\n##D rocTree(Surv(Time, death) ~ z1 + z2, id = id, data = simDat, ensemble = TRUE)\n## End(Not run)\n\n\n"} {"package":"rocTree","topic":"rocTree","snippet":"### Name: rocTree\n### Title: Roc-guided survival trees\n### Aliases: rocTree\n### Keywords: rocTree\n\n### ** Examples\n\ndata(simDat)\n\n## Fitting a pruned survival tree\nrocTree(Surv(Time, death) ~ z1 + z2, id = id, data = simDat, ensemble = FALSE)\n\n## Fitting a unpruned survival tree\nrocTree(Surv(Time, death) ~ z1 + z2, id = id, data = simDat, ensemble = FALSE,\n control = list(numFold = 0))\n\n## Not run: \n##D ## Fitting the ensemble algorithm (default)\n##D rocTree(Surv(Time, death) ~ z1 + z2, id = id, data = simDat, ensemble = TRUE)\n## End(Not run)\n\n\n"} {"package":"rocTree","topic":"simu","snippet":"### Name: simu\n### Title: Function to generate simulated data used in the manuscript.\n### Aliases: simu trueHaz trueSurv\n\n### ** Examples\n\nset.seed(1)\nsimu(10, 0.25, 1.2, TRUE)\n\nset.seed(1)\nsimu(10, 0.50, 2.2, TRUE)\n\n\n\n"} {"package":"RWgraph","topic":"info_client","snippet":"### Name: info_client\n### Title: Random walk metrics for each client\n### Aliases: info_client\n\n### ** Examples\n\ng <- igraph::graph_from_data_frame(d = transactions_small_example[, 1:3], directed = TRUE)\ninfo_client(g, data = clients_small_example)\n\n\n\n"} {"package":"RWgraph","topic":"mean_rw_client","snippet":"### Name: mean_rw_client\n### Title: Metrics for multiple random walks\n### Aliases: mean_rw_client\n\n### ** Examples\n\ng <- igraph::graph_from_data_frame(d = transactions_small_example[, 1:3], directed = TRUE)\nv <- transactions_small_example[1, 1]\nmean_rw_client(v, g, data = clients_small_example)\n\n\n\n"} {"package":"RWgraph","topic":"rw_client","snippet":"### Name: rw_client\n### Title: Random walk simulation\n### Aliases: rw_client\n\n### ** Examples\n\ng <- igraph::graph_from_data_frame(d = transactions_small_example[, 1:3], directed = TRUE)\nv <- transactions_small_example[1, 1]\nrw_client(v, g, data = clients_small_example)\n\n\n"} {"package":"pleio","topic":"pleio.demo","snippet":"### Name: pleio.demo\n### Title: Demonstration dataset for pleiotropy tests with a mixture of\n### trait types and covariates\n### Aliases: pleio.demo geno y x\n### Keywords: datasets\n\n### ** Examples\n\ndata(pleio.demo)\nstr(y)\ntable(geno)\nx[1:5,]\n\n\n"} {"package":"pleio","topic":"pleio.glm.sequential","snippet":"### Name: pleio.glm.sequential\n### Title: Perform sequential tests of pleiotropy\n### Aliases: pleio.glm.sequential\n\n### ** Examples\n\ndata(pleio.demo)\n\n## test without covars\nfams <- c(\"gaussian\",\"binomial\",\"ordinal\")\nobj <- pleio.glm.fit(y, geno, glm.family=fams)\nstat <- pleio.glm.test(obj, count.nonzero.coef = 0)\nstat$stat\nstat$pval\npseq <- pleio.glm.sequential(obj, pval.threshold=.5)\npseq\n\n\n"} {"package":"pleio","topic":"pleio.glm.test","snippet":"### Name: pleio.glm.test\n### Title: Single test of the number of traits associated with genotype\n### Aliases: pleio.glm.test\n\n### ** Examples\n\ndata(pleio.demo)\nobj <- pleio.glm.fit(y, geno, glm.family=c(\"gaussian\",\"binomial\",\"ordinal\"))\ntest1 <- pleio.glm.test(obj, count.nonzero.coef = 0)\ntest1\ntest2 <- pleio.glm.test(obj, count.nonzero.coef = 1)\ntest2\n\n\n"} {"package":"pleio","topic":"pleio.q.sequential","snippet":"### Name: pleio.q.sequential\n### Title: Perform sequential tests of pleiotropy\n### Aliases: pleio.q.sequential\n\n### ** Examples\n\ndata(pleio.qdemo)\nfit <- pleio.q.fit(y, geno)\ntest.seq <- pleio.q.sequential(fit, pval.threshold=.05)\ntest.seq\n\n\n"} {"package":"pleio","topic":"pleio.q.test","snippet":"### Name: pleio.q.test\n### Title: Single test of the number of traits associated with genotype\n### Aliases: pleio.q.test\n\n### ** Examples\n\ndata(pleio.qdemo)\nfit <- pleio.q.fit(y, geno)\n## usual multivariate test of whether all betas = 0\ntest0 <- pleio.q.test(fit, count.nonzero.beta = 0)\ntest0\n## test whether allowing 2 betas to be non-zero fits data\ntest2 <- pleio.q.test(fit, count.nonzero.beta = 2)\ntest2\n\n\n"} {"package":"pleio","topic":"pleio.qdemo","snippet":"### Name: pleio.qdemo\n### Title: Demonstration dataset for quantitative pleiotropy tests\n### Aliases: pleio.qdemo\n### Keywords: datasets\n\n### ** Examples\n\ndata(pleio.qdemo)\nstr(y)\ntable(geno)\n\n\n"} {"package":"RQuantLib","topic":"AffineSwaption","snippet":"### Name: AffineSwaption\n### Title: Affine swaption valuation using several short-rate models\n### Aliases: AffineSwaption AffineSwaption.default\n### summary.G2AnalyticAffineSwaption summary.HWAnalyticAffineSwaption\n### summary.HWTreeAffineSwaption summary.BKTreeAffineSwaption\n### Keywords: models\n\n### ** Examples\n\nif (.Platform$OS.type != \"windows\" && .Platform$r_arch != \"i386\") {\n## Not run: \n##D \n##D # This data was generated to match the original quantlib example for Bermudan Swaption\n##D params <- list(tradeDate=as.Date('2016-2-15'),\n##D settleDate=as.Date('2016-2-17'),\n##D startDate=as.Date('2017-2-17'),\n##D maturity=as.Date('2022-2-17'),\n##D payFixed=TRUE,\n##D european=FALSE,\n##D dt=.25,\n##D strike=.06,\n##D method=\"G2Analytic\",\n##D interpWhat=\"discount\",\n##D interpHow=\"loglinear\")\n##D \n##D # Market data used to construct the term structure of interest rates\n##D tsQuotes <- list(d1w =0.0382,\n##D d1m =0.0372,\n##D fut1=96.2875,\n##D fut2=96.7875,\n##D fut3=96.9875,\n##D fut4=96.6875,\n##D fut5=96.4875,\n##D fut6=96.3875,\n##D fut7=96.2875,\n##D fut8=96.0875,\n##D s3y =0.0398,\n##D s5y =0.0443,\n##D s10y =0.05165,\n##D s15y =0.055175)\n##D \n##D \n##D # Swaption volatility matrix with corresponding maturities and tenors\n##D swaptionMaturities <- c(1,2,3,4,5)\n##D \n##D swapTenors <- c(1,2,3,4,5)\n##D \n##D volMatrix <- matrix(\n##D c(0.1490, 0.1340, 0.1228, 0.1189, 0.1148,\n##D 0.1290, 0.1201, 0.1146, 0.1108, 0.1040,\n##D 0.1149, 0.1112, 0.1070, 0.1010, 0.0957,\n##D 0.1047, 0.1021, 0.0980, 0.0951, 0.1270,\n##D 0.1000, 0.0950, 0.0900, 0.1230, 0.1160),\n##D ncol=5, byrow=TRUE)\n##D \n##D legparams=list(dayCounter=\"Thirty360\",\n##D fixFreq=\"Annual\",\n##D floatFreq=\"Semiannual\")\n##D \n##D setEvaluationDate(as.Date(\"2016-2-16\"))\n##D times<-times <- seq(0,14.75,.25)\n##D dcurve <- DiscountCurve(params, tsQuotes, times=times,legparams)\n##D \n##D # Price the Bermudan swaption\n##D pricing <- AffineSwaption(params, dcurve,swaptionMaturities, swapTenors, volMatrix,legparams)\n##D summary(pricing)\n##D \n## End(Not run)\n}\n\n\n"} {"package":"RQuantLib","topic":"AmericanOption","snippet":"### Name: AmericanOption\n### Title: American Option evaluation using Finite Differences\n### Aliases: AmericanOption AmericanOption.default\n### Keywords: misc\n\n### ** Examples\n\n# simple call with unnamed parameters\nAmericanOption(\"call\", 100, 100, 0.02, 0.03, 0.5, 0.4)\n# simple call with some explicit parameters\nAmericanOption(\"put\", strike=100, volatility=0.4, 100, 0.02, 0.03, 0.5)\n# simple call with unnamed parameters, using Crank-Nicolons\nAmericanOption(\"put\", strike=100, volatility=0.4, 100, 0.02, 0.03, 0.5, engine=\"CrankNicolson\")\n\n\n"} {"package":"RQuantLib","topic":"AmericanOptionImpliedVolatility","snippet":"### Name: AmericanOptionImpliedVolatility\n### Title: Implied Volatility calculation for American Option\n### Aliases: AmericanOptionImpliedVolatility\n### AmericanOptionImpliedVolatility.default\n### Keywords: misc\n\n### ** Examples\n\nAmericanOptionImpliedVolatility(type=\"call\", value=11.10, underlying=100,\n\tstrike=100, dividendYield=0.01, riskFreeRate=0.03,\n\tmaturity=0.5, volatility=0.4)\n\n\n"} {"package":"RQuantLib","topic":"AsianOption","snippet":"### Name: AsianOption\n### Title: Asian Option evaluation using Closed-Form solution\n### Aliases: AsianOption AsianOption.default\n### Keywords: misc\n\n### ** Examples\n\n# simple call with some explicit parameters, and slightly increased vol:\nAsianOption(\"geometric\", \"put\", underlying=80, strike=85, div=-0.03,\n riskFree=0.05, maturity=0.25, vol=0.2)\n\n\n"} {"package":"RQuantLib","topic":"BarrierOption","snippet":"### Name: BarrierOption\n### Title: Barrier Option evaluation using Closed-Form solution\n### Aliases: BarrierOption BarrierOption.default\n### Keywords: misc\n\n### ** Examples\n\nBarrierOption(barrType=\"downin\", type=\"call\", underlying=100,\n\tstrike=100, dividendYield=0.02, riskFreeRate=0.03,\n\tmaturity=0.5, volatility=0.4, barrier=90)\n\n\n"} {"package":"RQuantLib","topic":"BermudanSwaption","snippet":"### Name: BermudanSwaption\n### Title: Bermudan swaption valuation using several short-rate models\n### Aliases: BermudanSwaption BermudanSwaption.default summary.G2Analytic\n### summary.HWAnalytic summary.HWTree summary.BKTree\n### Keywords: models\n\n### ** Examples\n\n## Not run: \n##D # This data replicates sample code shipped with QuantLib 0.3.10 results\n##D params <- list(tradeDate=as.Date('2002-2-15'),\n##D settleDate=as.Date('2002-2-19'),\n##D startDate=as.Date('2003-2-19'),\n##D maturity=as.Date('2008-2-19'),\n##D dt=.25,\n##D payFixed=TRUE,\n##D strike=.05,\n##D method=\"G2Analytic\",\n##D interpWhat=\"discount\",\n##D interpHow=\"loglinear\")\n##D setEvaluationDate(as.Date('2002-2-15'))\n##D # Market data used to construct the term structure of interest rates\n##D tsQuotes <- list(d1w =0.05,\n##D # d1m =0.0372,\n##D # fut1=96.2875,\n##D # fut2=96.7875,\n##D # fut3=96.9875,\n##D # fut4=96.6875,\n##D # fut5=96.4875,\n##D # fut6=96.3875,\n##D # fut7=96.2875,\n##D # fut8=96.0875,\n##D s3y =0.05,\n##D s5y =0.05,\n##D s10y =0.05,\n##D s15y =0.05)\n##D \n##D times=seq(0,14.75,.25)\n##D swcurve=DiscountCurve(params,tsQuotes,times)\n##D # Use this to compare with the Bermudan swaption example from QuantLib\n##D #tsQuotes <- list(flat=0.04875825)\n##D \n##D # Swaption volatility matrix with corresponding maturities and tenors\n##D swaptionMaturities <- c(1,2,3,4,5)\n##D \n##D swapTenors <- c(1,2,3,4,5)\n##D \n##D volMatrix <- matrix(\n##D c(0.1490, 0.1340, 0.1228, 0.1189, 0.1148,\n##D 0.1290, 0.1201, 0.1146, 0.1108, 0.1040,\n##D 0.1149, 0.1112, 0.1070, 0.1010, 0.0957,\n##D 0.1047, 0.1021, 0.0980, 0.0951, 0.1270,\n##D 0.1000, 0.0950, 0.0900, 0.1230, 0.1160),\n##D ncol=5, byrow=TRUE)\n##D \n##D volMatrix <- matrix(\n##D c(rep(.20,25)),\n##D ncol=5, byrow=TRUE)\n##D # Price the Bermudan swaption\n##D pricing <- BermudanSwaption(params, ts=.05,\n##D swaptionMaturities, swapTenors, volMatrix)\n##D summary(pricing)\n## End(Not run)\n\n\n"} {"package":"RQuantLib","topic":"BinaryOption","snippet":"### Name: BinaryOption\n### Title: Binary Option evaluation using Closed-Form solution\n### Aliases: BinaryOption BinaryOption.default\n### Keywords: misc\n\n### ** Examples\n\nBinaryOption(binType=\"asset\", type=\"call\", excType=\"european\",\n underlying=100, strike=100, dividendYield=0.02,\n riskFreeRate=0.03, maturity=0.5, volatility=0.4, cashPayoff=10)\n\n\n"} {"package":"RQuantLib","topic":"BinaryOptionImpliedVolatility","snippet":"### Name: BinaryOptionImpliedVolatility\n### Title: Implied Volatility calculation for Binary Option\n### Aliases: BinaryOptionImpliedVolatility\n### BinaryOptionImpliedVolatility.default\n### Keywords: misc\n\n### ** Examples\n\nBinaryOptionImpliedVolatility(\"call\", value=4.50, strike=100, 100, 0.02, 0.03, 0.5, 0.4, 10)\n\n\n"} {"package":"RQuantLib","topic":"Bond","snippet":"### Name: Bond\n### Title: Base class for Bond price evalution\n### Aliases: Bond plot.Bond print.Bond print.FixedRateBond summary.Bond\n### Keywords: misc\n\n### ** Examples\n\n## Not run: \n##D \n##D ## This data is taken from sample code shipped with QuantLib 0.9.7\n##D ## from the file Examples/Swap/swapvaluation\n##D params <- list(tradeDate=as.Date('2004-09-20'),\n##D settleDate=as.Date('2004-09-22'),\n##D dt=.25,\n##D interpWhat=\"discount\",\n##D interpHow=\"loglinear\")\n##D setEvaluationDate(as.Date(\"2004-09-20\"))\n##D \n##D ## We got numerical issues for the spline interpolation if we add\n##D ## any on of these three extra futures, at least with QuantLib 0.9.7\n##D ## The curve data comes from QuantLib's Examples/Swap/swapvaluation.cpp\n##D ## Removing s2y helps, as kindly pointed out by Luigi Ballabio\n##D tsQuotes <- list(d1w = 0.0382,\n##D d1m = 0.0372,\n##D fut1=96.2875,\n##D fut2=96.7875,\n##D fut3=96.9875,\n##D fut4=96.6875,\n##D fut5=96.4875,\n##D fut6=96.3875,\n##D fut7=96.2875,\n##D fut8=96.0875,\n##D # s2y = 0.037125, ## s2y perturbs\n##D s3y = 0.0398,\n##D s5y = 0.0443,\n##D s10y = 0.05165,\n##D s15y = 0.055175)\n##D times <- seq(0,10,.1)\n##D \n##D setEvaluationDate(params$tradeDate)\n##D discountCurve <- DiscountCurve(params, tsQuotes, times)\n##D \n##D # price a zero coupon bond\n##D bondparams <- list(faceAmount=100, issueDate=as.Date(\"2004-11-30\"),\n##D maturityDate=as.Date(\"2008-11-30\"), redemption=100 )\n##D dateparams <-list(settlementDays=1,\n##D calendar=\"UnitedStates/GovernmentBond\",\n##D businessDayConvention=4)\n##D ZeroCouponBond(bondparams, discountCurve, dateparams)\n##D \n##D # price a fixed rate coupon bond\n##D \n##D bond <- list(settlementDays=1, issueDate=as.Date(\"2004-11-30\"),\n##D faceAmount=100, dayCounter='Thirty360',\n##D paymentConvention='Unadjusted')\n##D schedule <- list(effectiveDate=as.Date(\"2004-11-30\"),\n##D maturityDate=as.Date(\"2008-11-30\"),\n##D period='Semiannual',\n##D calendar='UnitedStates/GovernmentBond',\n##D businessDayConvention='Unadjusted',\n##D terminationDateConvention='Unadjusted',\n##D dateGeneration='Forward',\n##D endOfMonth=1)\n##D calc=list(dayCounter='Actual360', compounding='Compounded',\n##D freq='Annual', durationType='Modified')\n##D rates <- c(0.02875)\n##D FixedRateBond(bond, rates, schedule, calc, discountCurve=discountCurve)\n##D \n##D # price a fixed rate coupon bond from yield\n##D \n##D yield <- 0.050517\n##D FixedRateBond(bond, rates, schedule, calc, yield=yield)\n##D \n##D # calculate the same bond from the clean price\n##D \n##D price <- 92.167\n##D FixedRateBond(bond, rates, schedule, calc, price=price)\n##D \n##D # price a floating rate bond\n##D bondparams <- list(faceAmount=100, issueDate=as.Date(\"2004-11-30\"),\n##D maturityDate=as.Date(\"2008-11-30\"), redemption=100,\n##D effectiveDate=as.Date(\"2004-12-01\"))\n##D \n##D dateparams <- list(settlementDays=1, calendar=\"UnitedStates/GovernmentBond\",\n##D dayCounter = 1, period=3, businessDayConvention = 1,\n##D terminationDateConvention=1, dateGeneration=0, endOfMonth=0,\n##D fixingDays = 1)\n##D \n##D gearings <- spreads <- caps <- floors <- vector()\n##D \n##D iborCurve <- DiscountCurve(params,list(flat=0.05), times)\n##D ibor <- list(type=\"USDLibor\", length=6, inTermOf=\"Month\",\n##D term=iborCurve)\n##D FloatingRateBond(bondparams, gearings, spreads, caps, floors,\n##D ibor, discountCurve, dateparams)\n## End(Not run)\n\n\n"} {"package":"RQuantLib","topic":"isBusinessDay","snippet":"### Name: Calendars\n### Title: Calendar functions from QuantLib\n### Aliases: isBusinessDay businessDay isHoliday isWeekend isEndOfMonth\n### getEndOfMonth endOfMonth getHolidayList holidayList\n### getBusinessDayList businessDayList setCalendarContext adjust advance\n### businessDaysBetween dayCount yearFraction setEvaluationDate\n### advanceDate addHolidays removeHolidays calendars\n### Keywords: misc\n\n### ** Examples\n\n dates <- seq(from=as.Date(\"2009-04-07\"), to=as.Date(\"2009-04-14\"), by=1)\n isBusinessDay(\"UnitedStates\", dates)\n isBusinessDay(\"UnitedStates/Settlement\", dates) ## same as previous\n isBusinessDay(\"UnitedStates/NYSE\", dates) ## stocks\n isBusinessDay(\"UnitedStates/GovernmentBond\", dates) ## bonds\n isBusinessDay(\"UnitedStates/NERC\", dates) ## energy\n\n isHoliday(\"UnitedStates\", dates)\n isHoliday(\"UnitedStates/Settlement\", dates) ## same as previous\n isHoliday(\"UnitedStates/NYSE\", dates) ## stocks\n isHoliday(\"UnitedStates/GovernmentBond\", dates) ## bonds\n isHoliday(\"UnitedStates/NERC\", dates) ## energy\n\n isWeekend(\"UnitedStates\", dates)\n isWeekend(\"UnitedStates/Settlement\", dates) ## same as previous\n isWeekend(\"UnitedStates/NYSE\", dates) ## stocks\n isWeekend(\"UnitedStates/GovernmentBond\", dates) ## bonds\n isWeekend(\"UnitedStates/NERC\", dates) ## energy\n\n isEndOfMonth(\"UnitedStates\", dates)\n isEndOfMonth(\"UnitedStates/Settlement\", dates) ## same as previous\n isEndOfMonth(\"UnitedStates/NYSE\", dates) ## stocks\n isEndOfMonth(\"UnitedStates/GovernmentBond\", dates) ## bonds\n isEndOfMonth(\"UnitedStates/NERC\", dates) ## energy\n\n getEndOfMonth(\"UnitedStates\", dates)\n getEndOfMonth(\"UnitedStates/Settlement\", dates) ## same as previous\n getEndOfMonth(\"UnitedStates/NYSE\", dates) ## stocks\n getEndOfMonth(\"UnitedStates/GovernmentBond\", dates) ## bonds\n getEndOfMonth(\"UnitedStates/NERC\", dates) ## energy\n\n from <- as.Date(\"2009-04-07\")\n to<-as.Date(\"2009-04-14\")\n getHolidayList(\"UnitedStates\", from, to)\n to <- as.Date(\"2009-10-7\")\n getHolidayList(\"UnitedStates\", from, to)\n\n dates <- seq(from=as.Date(\"2009-04-07\"), to=as.Date(\"2009-04-14\"), by=1)\n\n adjust(\"UnitedStates\", dates)\n adjust(\"UnitedStates/Settlement\", dates) ## same as previous\n adjust(\"UnitedStates/NYSE\", dates) ## stocks\n adjust(\"UnitedStates/GovernmentBond\", dates) ## bonds\n adjust(\"UnitedStates/NERC\", dates) ## energy\n\n advance(\"UnitedStates\", dates, 10, 0)\n advance(\"UnitedStates/Settlement\", dates, 10, 1) ## same as previous\n advance(\"UnitedStates/NYSE\", dates, 10, 2) ## stocks\n advance(\"UnitedStates/GovernmentBond\", dates, 10, 3) ## bonds\n advance(\"UnitedStates/NERC\", dates, period = 3) ## energy\n\n from <- as.Date(\"2009-04-07\")\n to<-as.Date(\"2009-04-14\")\n businessDaysBetween(\"UnitedStates\", from, to)\n\n startDates <- seq(from=as.Date(\"2009-04-07\"), to=as.Date(\"2009-04-14\"),by=1)\n endDates <- seq(from=as.Date(\"2009-11-07\"), to=as.Date(\"2009-11-14\"), by=1)\n dayCounters <- c(0,1,2,3,4,5,6,1)\n dayCount(startDates, endDates, dayCounters)\n yearFraction(startDates, endDates, dayCounters)\n\n head(calendars, 10)\n\n\n"} {"package":"RQuantLib","topic":"CallableBond","snippet":"### Name: CallableBond\n### Title: CallableBond evaluation\n### Aliases: CallableBond CallableBond.default\n### Keywords: misc\n\n### ** Examples\n\nif (interactive()) { # the example is too computationally expensive for normal checks\n #set-up a HullWhite according to example from QuantLib\n HullWhite <- list(term = 0.055, alpha = 0.03, sigma = 0.01, gridIntervals = 40)\n\n #callability schedule dataframe\n Price <- rep(as.double(100),24)\n Type <- rep(as.character(\"C\"), 24)\n Date <- seq(as.Date(\"2006-09-15\"), by = '3 months', length = 24)\n callSch <- data.frame(Price, Type, Date)\n callSch$Type <- as.character(callSch$Type)\n\n bondparams <- list(faceAmount=100, issueDate = as.Date(\"2004-09-16\"),\n maturityDate=as.Date(\"2012-09-16\"), redemption=100,\n callSch = callSch)\n dateparams <- list(settlementDays=3, calendar=\"UnitedStates/GovernmentBond\",\n dayCounter = \"ActualActual\",\n period=\"Quarterly\",\n businessDayConvention = \"Unadjusted\",\n terminationDateConvention= \"Unadjusted\")\n coupon <- c(0.0465)\n setEvaluationDate(as.Date(\"2004-11-22\"))\n\n CallableBond(bondparams, HullWhite, coupon, dateparams)\n #examples using default values\n CallableBond(bondparams, HullWhite, coupon)\n dateparams <- list(period=\"Quarterly\",\n businessDayConvention = \"Unadjusted\",\n terminationDateConvention= \"Unadjusted\")\n CallableBond(bondparams, HullWhite, coupon, dateparams)\n\n bondparams <- list(issueDate = as.Date(\"2004-09-16\"),\n maturityDate=as.Date(\"2012-09-16\"))\n CallableBond(bondparams, HullWhite, coupon, dateparams)\n}\n\n\n"} {"package":"RQuantLib","topic":"ConvertibleFixedCouponBond","snippet":"### Name: ConvertibleBond\n### Title: Convertible Bond evaluation for Fixed, Floating and Zero Coupon\n### Aliases: ConvertibleFixedCouponBond ConvertibleFixedCouponBond.default\n### ConvertibleFloatingCouponBond ConvertibleFloatingCouponBond.default\n### ConvertibleZeroCouponBond ConvertibleZeroCouponBond.default\n\n### ** Examples\n\n# commented-out as it runs longer than CRAN likes\n## Not run: \n##D #this follow an example in test-suite/convertiblebond.cpp\n##D params <- list(tradeDate=Sys.Date()-2,\n##D settleDate=Sys.Date(),\n##D dt=.25,\n##D interpWhat=\"discount\",\n##D interpHow=\"loglinear\")\n##D \n##D \n##D dividendYield <- DiscountCurve(params, list(flat=0.02))\n##D riskFreeRate <- DiscountCurve(params, list(flat=0.05))\n##D \n##D dividendSchedule <- data.frame(Type=character(0), Amount=numeric(0),\n##D Rate = numeric(0), Date = as.Date(character(0)))\n##D callabilitySchedule <- data.frame(Price = numeric(0), Type=character(0),\n##D Date = as.Date(character(0)))\n##D \n##D process <- list(underlying=50, divYield = dividendYield,\n##D rff = riskFreeRate, volatility=0.15)\n##D \n##D today <- Sys.Date()\n##D bondparams <- list(exercise=\"am\", faceAmount=100,\n##D divSch = dividendSchedule,\n##D callSch = callabilitySchedule,\n##D redemption=100,\n##D creditSpread=0.005,\n##D conversionRatio = 0.0000000001,\n##D issueDate=as.Date(today+2),\n##D maturityDate=as.Date(today+3650))\n##D dateparams <- list(settlementDays=3,\n##D dayCounter=\"ActualActual\",\n##D period = \"Semiannual\", calendar = \"UnitedStates/GovernmentBond\",\n##D businessDayConvention=\"Following\")\n##D \n##D lengths <- c(2,4,6,8,10,12,14,16,18,20,22,24,26,28,30)\n##D coupons <- c( 0.0200, 0.0225, 0.0250, 0.0275, 0.0300,\n##D 0.0325, 0.0350, 0.0375, 0.0400, 0.0425,\n##D 0.0450, 0.0475, 0.0500, 0.0525, 0.0550 )\n##D marketQuotes <- rep(100, length(lengths))\n##D curvedateparams <- list(settlementDays=0, period=\"Annual\",\n##D dayCounter=\"ActualActual\",\n##D businessDayConvention =\"Unadjusted\")\n##D curveparams <- list(method=\"ExponentialSplinesFitting\",\n##D origDate = Sys.Date())\n##D curve <- FittedBondCurve(curveparams, lengths, coupons, marketQuotes, curvedateparams)\n##D iborindex <- list(type=\"USDLibor\", length=6,\n##D inTermOf=\"Month\", term=curve)\n##D spreads <- c()\n##D #ConvertibleFloatingCouponBond(bondparams, iborindex, spreads, process, dateparams)\n##D \n##D \n##D #example using default values\n##D #ConvertibleFloatingCouponBond(bondparams, iborindex,spreads, process)\n##D \n##D dateparams <- list(settlementDays=3,\n##D period = \"Semiannual\",\n##D businessDayConvention=\"Unadjusted\")\n##D \n##D bondparams <- list(\n##D creditSpread=0.005, conversionRatio = 0.0000000001,\n##D issueDate=as.Date(today+2),\n##D maturityDate=as.Date(today+3650))\n##D #ConvertibleFloatingCouponBond(bondparams, iborindex,\n##D #spreads, process, dateparams)\n##D \n##D \n##D \n##D #this follow an example in test-suite/convertiblebond.cpp\n##D #for ConvertibleFixedCouponBond\n##D \n##D #set up arguments to build a pricing engine.\n##D params <- list(tradeDate=Sys.Date()-2,\n##D settleDate=Sys.Date(),\n##D dt=.25,\n##D interpWhat=\"discount\",\n##D interpHow=\"loglinear\")\n##D times <- seq(0,10,.1)\n##D \n##D dividendYield <- DiscountCurve(params, list(flat=0.02), times)\n##D riskFreeRate <- DiscountCurve(params, list(flat=0.05), times)\n##D \n##D dividendSchedule <- data.frame(Type=character(0), Amount=numeric(0),\n##D Rate = numeric(0), Date = as.Date(character(0)))\n##D callabilitySchedule <- data.frame(Price = numeric(0), Type=character(0),\n##D Date = as.Date(character(0)))\n##D \n##D process <- list(underlying=50, divYield = dividendYield,\n##D rff = riskFreeRate, volatility=0.15)\n##D \n##D today <- Sys.Date()\n##D bondparams <- list(exercise=\"am\", faceAmount=100, divSch = dividendSchedule,\n##D callSch = callabilitySchedule, redemption=100,\n##D creditSpread=0.005, conversionRatio = 0.0000000001,\n##D issueDate=as.Date(today+2),\n##D maturityDate=as.Date(today+3650))\n##D dateparams <- list(settlementDays=3,\n##D dayCounter=\"Actual360\",\n##D period = \"Once\", calendar = \"UnitedStates/GovernmentBond\",\n##D businessDayConvention=\"Following\"\n##D )\n##D coupon <- c(0.05)\n##D ConvertibleFixedCouponBond(bondparams, coupon, process, dateparams)\n##D \n##D #example with default value\n##D ConvertibleFixedCouponBond(bondparams, coupon, process)\n##D \n##D dateparams <- list(settlementDays=3,\n##D dayCounter=\"Actual360\")\n##D ConvertibleFixedCouponBond(bondparams, coupon, process, dateparams)\n##D \n##D bondparams <- list(creditSpread=0.005, conversionRatio = 0.0000000001,\n##D issueDate=as.Date(today+2),\n##D maturityDate=as.Date(today+3650))\n##D ConvertibleFixedCouponBond(bondparams, coupon, process, dateparams)\n##D \n##D \n##D \n##D #this follow an example in test-suite/convertiblebond.cpp\n##D params <- list(tradeDate=Sys.Date()-2,\n##D settleDate=Sys.Date(),\n##D dt=.25,\n##D interpWhat=\"discount\",\n##D interpHow=\"loglinear\")\n##D times <- seq(0,10,.1)\n##D \n##D \n##D dividendYield <- DiscountCurve(params, list(flat=0.02), times)\n##D riskFreeRate <- DiscountCurve(params, list(flat=0.05), times)\n##D \n##D dividendSchedule <- data.frame(Type=character(0), Amount=numeric(0),\n##D Rate = numeric(0), Date = as.Date(character(0)))\n##D callabilitySchedule <- data.frame(Price = numeric(0), Type=character(0),\n##D Date = as.Date(character(0)))\n##D \n##D process <- list(underlying=50, divYield = dividendYield,\n##D rff = riskFreeRate, volatility=0.15)\n##D \n##D today <- Sys.Date()\n##D bondparams <- list(exercise=\"am\", faceAmount=100, divSch = dividendSchedule,\n##D callSch = callabilitySchedule, redemption=100,\n##D creditSpread=0.005, conversionRatio = 0.0000000001,\n##D issueDate=as.Date(today+2),\n##D maturityDate=as.Date(today+3650))\n##D dateparams <- list(settlementDays=3,\n##D dayCounter=\"Actual360\",\n##D period = \"Once\", calendar = \"UnitedStates/GovernmentBond\",\n##D businessDayConvention=\"Following\"\n##D )\n##D \n##D ConvertibleZeroCouponBond(bondparams, process, dateparams)\n##D \n##D #example with default values\n##D ConvertibleZeroCouponBond(bondparams, process)\n##D \n##D \n##D bondparams <- list(creditSpread=0.005,\n##D conversionRatio=0.0000000001,\n##D issueDate=as.Date(today+2),\n##D maturityDate=as.Date(today+3650))\n##D \n##D dateparams <- list(settlementDays=3, dayCounter='Actual360')\n##D ConvertibleZeroCouponBond(bondparams, process, dateparams)\n##D ConvertibleZeroCouponBond(bondparams, process)\n## End(Not run)\n\n\n"} {"package":"RQuantLib","topic":"DiscountCurve","snippet":"### Name: DiscountCurve\n### Title: Returns the discount curve (with zero rates and forwards) given\n### times\n### Aliases: DiscountCurve DiscountCurve.default plot.DiscountCurve\n### Keywords: models\n\n### ** Examples\n\n## Not run: \n##D savepar <- par(mfrow=c(3,3), mar=c(4,4,2,0.5))\n##D \n##D ## This data is taken from sample code shipped with QuantLib 0.9.7\n##D ## from the file Examples/Swap/swapvaluation\n##D params <- list(tradeDate=as.Date('2004-09-20'),\n##D settleDate=as.Date('2004-09-22'),\n##D dt=.25,\n##D interpWhat=\"discount\",\n##D interpHow=\"loglinear\")\n##D setEvaluationDate(as.Date(\"2004-09-20\"))\n##D \n##D ## We get numerical issue for the spline interpolation if we add\n##D ## any on of these three extra futures -- the original example\n##D ## creates different curves based on different deposit, fra, futures\n##D ## and swap data\n##D ## Removing s2y helps, as kindly pointed out by Luigi Ballabio\n##D tsQuotes <- list(d1w = 0.0382,\n##D d1m = 0.0372,\n##D d3m = 0.0363,\n##D d6m = 0.0353,\n##D d9m = 0.0348,\n##D d1y = 0.0345,\n##D fut1=96.2875,\n##D fut2=96.7875,\n##D fut3=96.9875,\n##D fut4=96.6875,\n##D fut5=96.4875,\n##D fut6=96.3875,\n##D fut7=96.2875,\n##D fut8=96.0875,\n##D # s2y = 0.037125,\n##D s3y = 0.0398,\n##D s5y = 0.0443,\n##D s10y = 0.05165,\n##D s15y = 0.055175)\n##D \n##D times <- seq(0,10,.1)\n##D \n##D # Loglinear interpolation of discount factors\n##D curves <- DiscountCurve(params, tsQuotes, times)\n##D plot(curves,setpar=FALSE)\n##D \n##D # Linear interpolation of discount factors\n##D params$interpHow=\"linear\"\n##D curves <- DiscountCurve(params, tsQuotes, times)\n##D plot(curves,setpar=FALSE)\n##D \n##D # Spline interpolation of discount factors\n##D params$interpHow=\"spline\"\n##D curves <- DiscountCurve(params, tsQuotes, times)\n##D plot(curves,setpar=FALSE)\n##D \n##D par(savepar)\n## End(Not run)\n\n\n"} {"package":"RQuantLib","topic":"EuropeanOption","snippet":"### Name: EuropeanOption\n### Title: European Option evaluation using Closed-Form solution\n### Aliases: EuropeanOption EuropeanOption.default\n### Keywords: misc\n\n### ** Examples\n\n## simple call with unnamed parameters\nEuropeanOption(\"call\", 100, 100, 0.01, 0.03, 0.5, 0.4)\n## simple call with some explicit parameters, and slightly increased vol:\nEuropeanOption(type=\"call\", underlying=100, strike=100, dividendYield=0.01, \nriskFreeRate=0.03, maturity=0.5, volatility=0.5)\n## simple call with slightly shorter maturity: QuantLib 1.7 compiled with \n## intra-day time calculation support with create slightly changed values\nEuropeanOption(type=\"call\", underlying=100, strike=100, dividendYield=0.01, \nriskFreeRate=0.03, maturity=0.499, volatility=0.5)\n\n\n"} {"package":"RQuantLib","topic":"EuropeanOptionArrays","snippet":"### Name: EuropeanOptionArrays\n### Title: European Option evaluation using Closed-Form solution\n### Aliases: EuropeanOptionArrays oldEuropeanOptionArrays plotOptionSurface\n### Keywords: misc\n\n### ** Examples\n\n## Not run: \n##D # define two vectos for the underlying and the volatility\n##D und.seq <- seq(10,180,by=2)\n##D vol.seq <- seq(0.1,0.9,by=0.1)\n##D # evaluate them along with three scalar parameters\n##D EOarr <- EuropeanOptionArrays(\"call\", underlying=und.seq,\n##D strike=100, dividendYield=0.01,\n##D riskFreeRate=0.03,\n##D maturity=1, volatility=vol.seq)\n##D # and look at four of the result arrays: value, delta, gamma, vega\n##D old.par <- par(no.readonly = TRUE)\n##D par(mfrow=c(2,2),oma=c(5,0,0,0),mar=c(2,2,2,1))\n##D plot(EOarr$parameters.underlying, EOarr$value[,1], type='n',\n##D main=\"option value\", xlab=\"\", ylab=\"\")\n##D topocol <- topo.colors(length(vol.seq))\n##D for (i in 1:length(vol.seq))\n##D lines(EOarr$parameters.underlying, EOarr$value[,i], col=topocol[i])\n##D plot(EOarr$parameters.underlying, EOarr$delta[,1],type='n',\n##D main=\"option delta\", xlab=\"\", ylab=\"\")\n##D for (i in 1:length(vol.seq))\n##D lines(EOarr$parameters.underlying, EOarr$delta[,i], col=topocol[i])\n##D plot(EOarr$parameters.underlying, EOarr$gamma[,1],type='n',\n##D main=\"option gamma\", xlab=\"\", ylab=\"\")\n##D for (i in 1:length(vol.seq))\n##D lines(EOarr$parameters.underlying, EOarr$gamma[,i], col=topocol[i])\n##D plot(EOarr$parameters.underlying, EOarr$vega[,1],type='n',\n##D main=\"option vega\", xlab=\"\", ylab=\"\")\n##D for (i in 1:length(vol.seq))\n##D lines(EOarr$parameters.underlying, EOarr$vega[,i], col=topocol[i])\n##D mtext(text=paste(\"Strike is 100, maturity 1 year, riskless rate 0.03\",\n##D \"\\nUnderlying price from\", und.seq[1],\"to\", und.seq[length(und.seq)],\n##D \"\\nVolatility from\",vol.seq[1], \"to\",vol.seq[length(vol.seq)]),\n##D side=1,font=1,outer=TRUE,line=3)\n##D par(old.par)\n## End(Not run)\n\n\n"} {"package":"RQuantLib","topic":"EuropeanOptionImpliedVolatility","snippet":"### Name: EuropeanOptionImpliedVolatility\n### Title: Implied Volatility calculation for European Option\n### Aliases: EuropeanOptionImpliedVolatility\n### EuropeanOptionImpliedVolatility.default\n### Keywords: misc\n\n### ** Examples\n\nEuropeanOptionImpliedVolatility(type=\"call\", value=11.10, underlying=100,\n\tstrike=100, dividendYield=0.01, riskFreeRate=0.03,\n\tmaturity=0.5, volatility=0.4)\n\n\n"} {"package":"RQuantLib","topic":"FittedBondCurve","snippet":"### Name: FittedBondCurve\n### Title: Returns the discount curve (with zero rates and forwards) given\n### set of bonds\n### Aliases: FittedBondCurve FittedBondCurve.default plot.FittedBondCurve\n\n### ** Examples\n\n# commented-out as it runs longer than CRAN likes\n## Not run: \n##D lengths <- c(2,4,6,8,10,12,14,16,18,20,22,24,26,28,30)\n##D coupons <- c( 0.0200, 0.0225, 0.0250, 0.0275, 0.0300,\n##D 0.0325, 0.0350, 0.0375, 0.0400, 0.0425,\n##D 0.0450, 0.0475, 0.0500, 0.0525, 0.0550 )\n##D marketQuotes <- rep(100, length(lengths))\n##D dateparams <- list(settlementDays=0, period=\"Annual\",\n##D dayCounter=\"ActualActual\",\n##D businessDayConvention =\"Unadjusted\")\n##D curveparams <- list(method=\"ExponentialSplinesFitting\",\n##D origDate = Sys.Date())\n##D curve <- FittedBondCurve(curveparams, lengths, coupons, marketQuotes, dateparams)\n##D z <- zoo::zoo(curve$table$zeroRates, order.by=curve$table$date)\n##D plot(z)\n## End(Not run)\n\n\n"} {"package":"RQuantLib","topic":"FixedRateBond","snippet":"### Name: FixedRateBond\n### Title: Fixed-Rate bond pricing\n### Aliases: FixedRateBond FixedRateBond.default FixedRateBondPriceByYield\n### FixedRateBondPriceByYield.default FixedRateBondYield\n### FixedRateBondYield.default\n### Keywords: misc\n\n### ** Examples\n\n\n#Simple call with a flat curve\nbond <- list(settlementDays=1,\n issueDate=as.Date(\"2004-11-30\"),\n faceAmount=100,\n dayCounter='Thirty360',\n paymentConvention='Unadjusted')\nschedule <- list(effectiveDate=as.Date(\"2004-11-30\"),\n maturityDate=as.Date(\"2008-11-30\"),\n period='Semiannual',\n calendar='UnitedStates/GovernmentBond',\n businessDayConvention='Unadjusted',\n terminationDateConvention='Unadjusted',\n dateGeneration='Forward',\n endOfMonth=1)\ncalc=list(dayCounter='Actual360',\n compounding='Compounded',\n freq='Annual',\n durationType='Modified')\ncoupon.rate <- c(0.02875)\n \nparams <- list(tradeDate=as.Date('2002-2-15'),\n settleDate=as.Date('2002-2-19'),\n dt=.25,\n interpWhat=\"discount\",\n interpHow=\"loglinear\")\nsetEvaluationDate(as.Date(\"2004-11-22\"))\n\ndiscountCurve.flat <- DiscountCurve(params, list(flat=0.05))\nFixedRateBond(bond,\n coupon.rate,\n schedule,\n calc,\n discountCurve=discountCurve.flat)\n\n\n#Same bond with a discount curve constructed from market quotes\ntsQuotes <- list(d1w =0.0382,\n d1m =0.0372,\n fut1=96.2875,\n fut2=96.7875,\n fut3=96.9875,\n fut4=96.6875,\n fut5=96.4875,\n fut6=96.3875,\n fut7=96.2875,\n fut8=96.0875,\n s3y =0.0398,\n s5y =0.0443,\n s10y =0.05165,\n s15y =0.055175)\ntsQuotes <- list(\"flat\" = 0.02)\t\t## While discount curve code is buggy\n\ndiscountCurve <- DiscountCurve(params, tsQuotes)\nFixedRateBond(bond,\n coupon.rate,\n schedule,\n calc,\n discountCurve=discountCurve)\n\n#Same bond calculated from yield rather than from the discount curve\nyield <- 0.02\nFixedRateBond(bond,\n coupon.rate,\n schedule,\n calc,\n yield=yield)\n\n\n#same example with clean price\nprice <- 103.31\nFixedRateBond(bond,\n coupon.rate,\n schedule,\n calc,\n price = price)\n\n#example with default calc parameter\nFixedRateBond(bond,\n coupon.rate,\n schedule,\n discountCurve=discountCurve)\n \n#example with default calc and schedule parameters\nschedule <- list(effectiveDate=as.Date(\"2004-11-30\"),\n maturityDate=as.Date(\"2008-11-30\"))\nFixedRateBond(bond,\n coupon.rate,\n schedule,\n discountCurve=discountCurve)\n\n#example with default calc, schedule and bond parameters\nFixedRateBond(,\n coupon.rate,\n schedule,\n discountCurve=discountCurve)\n\nFixedRateBondPriceByYield(,0.0307, 100000, as.Date(\"2004-11-30\"),\n as.Date(\"2008-11-30\"), 3, , c(0.02875),\n , , , ,as.Date(\"2004-11-30\"))\n\nFixedRateBondYield(,90, 100000, as.Date(\"2004-11-30\"), as.Date(\"2008-11-30\"),\n 3, , c(0.02875), , , , ,as.Date(\"2004-11-30\"))\n\n\n\n"} {"package":"RQuantLib","topic":"FloatingRateBond","snippet":"### Name: FloatingRateBond\n### Title: Floating rate bond pricing\n### Aliases: FloatingRateBond FloatingRateBond.default\n### Keywords: misc\n\n### ** Examples\n\n\nbond <- list(faceAmount=100, issueDate=as.Date(\"2004-11-30\"),\n maturityDate=as.Date(\"2008-11-30\"), redemption=100, \n effectiveDate=as.Date(\"2004-11-30\"))\ndateparams <- list(settlementDays=1, calendar=\"UnitedStates/GovernmentBond\",\n dayCounter = 'ActualActual', period=2, \n businessDayConvention = 1, terminationDateConvention=1,\n dateGeneration=0, endOfMonth=0, fixingDays = 1)\n\ngearings <- spreads <- caps <- floors <- vector()\n\nparams <- list(tradeDate=as.Date('2002-2-15'),\n settleDate=as.Date('2002-2-19'),\n dt=.25,\n interpWhat=\"discount\",\n interpHow=\"loglinear\")\nsetEvaluationDate(as.Date(\"2004-11-22\"))\n\ntsQuotes <- list(d1w =0.0382,\n d1m =0.0372,\n fut1=96.2875,\n fut2=96.7875,\n fut3=96.9875,\n fut4=96.6875,\n fut5=96.4875,\n fut6=96.3875,\n fut7=96.2875,\n fut8=96.0875,\n s3y =0.0398,\n s5y =0.0443,\n s10y =0.05165,\n s15y =0.055175)\ntsQuotes <- list(\"flat\" = 0.02)\t\t## While discount curve code is buggy\n\n## when both discount and libor curves are flat.\n\ndiscountCurve.flat <- DiscountCurve(params, list(flat=0.05))\ntermstructure <- DiscountCurve(params, list(flat=0.03))\niborIndex.params <- list(type=\"USDLibor\", length=6, \n inTermOf=\"Month\", term=termstructure) \nFloatingRateBond(bond, gearings, spreads, caps, floors, \n iborIndex.params, discountCurve.flat, dateparams)\n\n\n## discount curve is constructed from market quotes\n## and a flat libor curve\ndiscountCurve <- DiscountCurve(params, tsQuotes)\ntermstructure <- DiscountCurve(params, list(flat=0.03))\niborIndex.params <- list(type=\"USDLibor\", length=6, \n inTermOf=\"Month\", term = termstructure) \nFloatingRateBond(bond, gearings, spreads, caps, floors, \n iborIndex.params, discountCurve, dateparams)\n\n#example using default values\nFloatingRateBond(bond=bond, index=iborIndex.params, curve=discountCurve)\n\n\n\n"} {"package":"RQuantLib","topic":"ImpliedVolatility","snippet":"### Name: ImpliedVolatility\n### Title: Base class for option-price implied volatility evalution\n### Aliases: ImpliedVolatility print.ImpliedVolatility\n### summary.ImpliedVolatility\n### Keywords: misc\n\n### ** Examples\n\nimpVol<-EuropeanOptionImpliedVolatility(\"call\", value=11.10, strike=100,\n volatility=0.4, 100, 0.01, 0.03, 0.5)\nprint(impVol)\nsummary(impVol)\n\n\n"} {"package":"RQuantLib","topic":"Option","snippet":"### Name: Option\n### Title: Base class for option price evalution\n### Aliases: Option plot.Option print.Option summary.Option\n### Keywords: misc\n\n### ** Examples\n\nEO<-EuropeanOption(\"call\", strike=100, volatility=0.4, 100, 0.01, 0.03, 0.5)\nprint(EO)\nsummary(EO)\n\n\n"} {"package":"RQuantLib","topic":"SabrSwaption","snippet":"### Name: SabrSwaption\n### Title: SABR swaption using vol cube data with bermudan alternative\n### using markovfunctional\n### Aliases: SabrSwaption SabrSwaption.default\n### Keywords: models\n\n### ** Examples\n\n\nparams <- list(tradeDate=as.Date('2016-2-15'),\n settleDate=as.Date('2016-2-17'),\n startDate=as.Date('2017-2-17'),\n maturity=as.Date('2022-2-17'),\n european=TRUE,\n dt=.25,\n expiryDate=as.Date('2017-2-17'),\n strike=.02,\n interpWhat=\"discount\",\n interpHow=\"loglinear\")\n\n# Set leg paramters for generating discount curve\ndclegparams=list(dayCounter=\"Thirty360\",\n fixFreq=\"Annual\",\n floatFreq=\"Semiannual\")\n\nsetEvaluationDate(as.Date(\"2016-2-15\"))\ntimes<-times <- seq(0,14.75,.25)\n\ndata(tsQuotes)\ndcurve <- DiscountCurve(params, tsQuotes, times=times,dclegparams)\n\n# Price the Bermudan swaption\nswaplegparams=list(fixFreq=\"Semiannual\",floatFreq=\"Quarterly\")\n\ndata(vcube)\npricing <- SabrSwaption(params, dcurve,vcube,swaplegparams)\npricing\n\n\n\n"} {"package":"RQuantLib","topic":"Schedule","snippet":"### Name: Schedule\n### Title: Schedule generation\n### Aliases: Schedule Schedule.default\n### Keywords: misc\n\n### ** Examples\n\n\nparams <- list(effectiveDate=as.Date(\"2004-11-30\"),\n maturityDate=as.Date(\"2008-11-30\"),\n period='Semiannual',\n calendar='UnitedStates/GovernmentBond',\n businessDayConvention='Unadjusted',\n terminationDateConvention='Unadjusted',\n dateGeneration='Forward',\n endOfMonth=1)\nSchedule(params)\n\n\n"} {"package":"RQuantLib","topic":"ZeroCouponBond","snippet":"### Name: ZeroCouponBond\n### Title: Zero-Coupon bond pricing\n### Aliases: ZeroCouponBond ZeroCouponBond.default ZeroPriceByYield\n### ZeroPriceByYield.default ZeroYield ZeroYield.default\n### Keywords: misc\n\n### ** Examples\n\n\n# Simple call with all parameter and a flat curve\nbond <- list(faceAmount=100,issueDate=as.Date(\"2004-11-30\"),\n maturityDate=as.Date(\"2008-11-30\"), redemption=100 )\n\ndateparams <-list(settlementDays=1, calendar=\"UnitedStates/GovernmentBond\",\n businessDayConvention='Unadjusted')\n\ndiscountCurve.param <- list(tradeDate=as.Date('2002-2-15'),\n settleDate=as.Date('2002-2-15'),\n dt=0.25,\n interpWhat='discount', interpHow='loglinear')\ndiscountCurve.flat <- DiscountCurve(discountCurve.param, list(flat=0.05))\n\nZeroCouponBond(bond, discountCurve.flat, dateparams)\n\n\n# The same bond with a discount curve constructed from market quotes\ntsQuotes <- list(d1w =0.0382,\n d1m =0.0372,\n fut1=96.2875,\n fut2=96.7875,\n fut3=96.9875,\n fut4=96.6875,\n fut5=96.4875,\n fut6=96.3875,\n fut7=96.2875,\n fut8=96.0875,\n s3y =0.0398,\n s5y =0.0443,\n s10y =0.05165,\n s15y =0.055175)\ntsQuotes <- list(\"flat\" = 0.02)\t\t## While discount curve code is buggy\n\ndiscountCurve <- DiscountCurve(discountCurve.param, tsQuotes)\nZeroCouponBond(bond, discountCurve, dateparams)\n\n\n#examples with default arguments\nZeroCouponBond(bond, discountCurve)\n\nbond <- list(issueDate=as.Date(\"2004-11-30\"),\n maturityDate=as.Date(\"2008-11-30\"))\ndateparams <-list(settlementDays=1)\nZeroCouponBond(bond, discountCurve, dateparams)\n\n\nZeroPriceByYield(0.1478, 100, as.Date(\"1993-6-24\"), as.Date(\"1993-11-1\"))\n\nZeroYield(90, 100, as.Date(\"1993-6-24\"), as.Date(\"1993-11-1\"))\n\n\n\n"} {"package":"RQuantLib","topic":"getQuantLibCapabilities","snippet":"### Name: getQuantLibCapabilities\n### Title: Return configuration options of the QuantLib library\n### Aliases: getQuantLibCapabilities\n\n### ** Examples\n\n getQuantLibCapabilities()\n\n\n"} {"package":"RQuantLib","topic":"getQuantLibVersion","snippet":"### Name: getQuantLibVersion\n### Title: Return the QuantLib version number\n### Aliases: getQuantLibVersion\n\n### ** Examples\n\n getQuantLibVersion()\n\n\n"} {"package":"uwedragon","topic":"disguise","snippet":"### Name: disguise\n### Title: Disguise the sample mean and sample deviation\n### Aliases: disguise\n\n### ** Examples\n\n\nusersample<-c(1,1,2,3,4,4,5)\n\ndisguise(usersample,method=1)\ndisguise(usersample,method=2)\ndisguise(usersample,method=3)\ndisguise(usersample,method=4)\n\n\n\n\n"} {"package":"uwedragon","topic":"solutions","snippet":"### Name: solutions\n### Title: Find individual sample values from the sample mean and standard\n### deviation\n### Aliases: solutions\n\n### ** Examples\n\n\n# EXAMPLE 1\n# Seven observations are taken from a five-point Likert scale (coded 1 to 5).\n# The reported mean is 2.857 and the reported standard deviation is 1.574.\n\nsolutions(7,1,5,2.857,1.574)\n\n# For this mean and standard deviation there are two possible distributions:\n# 1 1 2 3 4 4 5\n# 1 2 2 2 3 5 5\n\n# Optionally adding median value of 3.\n\nsolutions(7,1,5,2.857,1.574, usermed=3)\n\n# uniquely reveals the raw sample values:\n# 1 1 2 3 4 4 5\n\n\n# EXAMPLE 2\n# The mean is '4.00'.\n# The standard deviation is '2.00'.\n# Narrower set of solutions found specifying 2dp including trailing zeroes.\n\nsolutions(3,-Inf,Inf,4.00,2.00,2,2)\n\n# uniquely reveals the raw sample values:\n# 2 4 6\n\n\n\n"} {"package":"qgisprocess","topic":"as_qgis_argument","snippet":"### Name: as_qgis_argument\n### Title: Type coercion for arguments to QGIS processing algorithms\n### Aliases: as_qgis_argument qgis_clean_argument\n### Keywords: internal\n\n### ** Examples\n\n## Don't show: \nif (has_qgis()) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\nqgisprocess::as_qgis_argument(\n c(\"a\", \"b\"),\n spec = list(qgis_type = \"range\"),\n use_json_input = FALSE\n)\nqgisprocess::as_qgis_argument(\n c(1, 2),\n spec = list(qgis_type = \"range\"),\n use_json_input = FALSE\n)\nqgisprocess::as_qgis_argument(\n c(\"a\", \"b\"),\n spec = list(qgis_type = \"range\"),\n use_json_input = TRUE\n)\nqgisprocess::as_qgis_argument(\n c(1, 2),\n spec = list(qgis_type = \"range\"),\n use_json_input = TRUE\n)\nmat <- matrix(1:12, ncol = 3)\nmat\nqgisprocess::as_qgis_argument(\n mat,\n spec = list(qgis_type = \"matrix\"),\n use_json_input = FALSE\n)\nqgisprocess::as_qgis_argument(\n mat,\n spec = list(qgis_type = \"matrix\"),\n use_json_input = TRUE\n)\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"qgisprocess","topic":"has_qgis","snippet":"### Name: has_qgis\n### Title: Check availability of QGIS, a plugin, a provider or an algorithm\n### Aliases: has_qgis qgis_has_plugin qgis_has_provider qgis_has_algorithm\n\n### ** Examples\n\nhas_qgis()\nif (has_qgis()) qgis_has_algorithm(\"native:filedownloader\")\nif (has_qgis()) qgis_has_provider(\"native\")\nif (has_qgis()) qgis_has_plugin(c(\"grassprovider\", \"processing_saga_nextgen\"))\n\n\n\n"} {"package":"qgisprocess","topic":"qgis_algorithms","snippet":"### Name: qgis_algorithms\n### Title: List algorithms, processing providers or plugins\n### Aliases: qgis_algorithms qgis_providers qgis_plugins\n\n### ** Examples\n\n## Don't show: \nif (has_qgis()) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\nqgis_algorithms()\nqgis_providers()\nqgis_plugins(quiet = FALSE)\nqgis_plugins(which = \"disabled\")\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"qgisprocess","topic":"qgis_as_raster","snippet":"### Name: qgis_as_raster\n### Title: Convert a qgis_result object or one of its elements to a raster\n### object\n### Aliases: qgis_as_raster qgis_as_brick qgis_as_raster.qgis_outputRaster\n### qgis_as_brick.qgis_outputRaster qgis_as_raster.qgis_outputLayer\n### qgis_as_brick.qgis_outputLayer qgis_as_raster.qgis_result\n### qgis_as_brick.qgis_result\n\n### ** Examples\n\n## Don't show: \nif (has_qgis() && requireNamespace(\"raster\", quietly = TRUE)) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\n## No test: \n# not running below examples in R CMD check to save time\nresult <- qgis_run_algorithm(\n \"native:slope\",\n INPUT = system.file(\"longlake/longlake_depth.tif\", package = \"qgisprocess\")\n)\n\n# most direct approach, autoselecting a `qgis_outputRaster` type\n# output from the `result` object and reading as RasterLayer:\nqgis_as_raster(result)\n\n# if you need more control, extract the needed output element first:\noutput_raster <- qgis_extract_output(result, \"OUTPUT\")\nqgis_as_raster(output_raster)\n## End(No test)\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"qgisprocess","topic":"qgis_as_terra","snippet":"### Name: qgis_as_terra\n### Title: Convert a qgis_result object or one of its elements to a terra\n### object\n### Aliases: qgis_as_terra qgis_as_terra.qgis_outputRaster\n### qgis_as_terra.qgis_outputLayer qgis_as_terra.qgis_outputVector\n### qgis_as_terra.qgis_result\n\n### ** Examples\n\n## Don't show: \nif (has_qgis() && requireNamespace(\"terra\", quietly = TRUE)) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\n## No test: \n# not running below examples in R CMD check to save time\nresult <- qgis_run_algorithm(\n \"native:slope\",\n INPUT = system.file(\"longlake/longlake_depth.tif\", package = \"qgisprocess\")\n)\n\n# most direct approach, autoselecting a `qgis_outputRaster` type\n# output from the `result` object and reading as SpatRaster:\nqgis_as_terra(result)\n\n# if you need more control, extract the needed output element first:\noutput_raster <- qgis_extract_output(result, \"OUTPUT\")\nqgis_as_terra(output_raster)\n\n# Same holds for coercion to SpatVector\nresult2 <- qgis_run_algorithm(\n \"native:buffer\",\n INPUT = system.file(\"longlake/longlake.gpkg\", package = \"qgisprocess\"),\n DISTANCE = 100\n)\n\nqgis_as_terra(result2)\noutput_vector <- qgis_extract_output(result2, \"OUTPUT\")\nqgis_as_terra(output_vector)\n\n# SpatVectorProxy:\nqgis_as_terra(result2, proxy = TRUE)\n## End(No test)\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"qgisprocess","topic":"qgis_clean_result","snippet":"### Name: qgis_clean_result\n### Title: Clean processing results\n### Aliases: qgis_clean_result\n\n### ** Examples\n\n## Don't show: \nif (has_qgis()) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\nresult <- qgis_run_algorithm(\n \"native:buffer\",\n INPUT = system.file(\"longlake/longlake_depth.gpkg\", package = \"qgisprocess\"),\n DISTANCE = 10\n)\n\nfile.exists(qgis_extract_output(result))\nqgis_clean_result(result)\nfile.exists(qgis_extract_output(result))\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"qgisprocess","topic":"qgis_configure","snippet":"### Name: qgis_configure\n### Title: Configure qgisprocess\n### Aliases: qgis_configure\n\n### ** Examples\n\n## No test: \n# not running in R CMD check to save time\nqgis_configure(use_cached_data = TRUE)\n## End(No test)\n\n## Not run: \n##D # package reconfiguration\n##D # (not run in example() as it rewrites the package cache file)\n##D qgis_configure()\n## End(Not run)\n\n\n\n"} {"package":"qgisprocess","topic":"qgis_detect_paths","snippet":"### Name: qgis_detect_paths\n### Title: Detect QGIS installations with 'qgis_process' on Windows and\n### macOS\n### Aliases: qgis_detect_paths qgis_detect_windows_paths\n### qgis_detect_macos_paths\n\n### ** Examples\n\nif (.Platform$OS.type == \"windows\") {\n qgis_detect_paths()\n identical(qgis_detect_windows_paths(), qgis_detect_paths())\n}\nif (Sys.info()[\"sysname\"] == \"Darwin\") {\n qgis_detect_paths()\n identical(qgis_detect_macos_paths(), qgis_detect_paths())\n}\n\n\n"} {"package":"qgisprocess","topic":"qgis_enable_plugins","snippet":"### Name: qgis_enable_plugins\n### Title: Enable or disable QGIS plugins\n### Aliases: qgis_enable_plugins qgis_disable_plugins\n\n### ** Examples\n\n## Don't show: \nif (has_qgis()) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\nqgis_enable_plugins(\"name_of_plugin\")\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"qgisprocess","topic":"qgis_extract_output","snippet":"### Name: qgis_extract_output\n### Title: Access processing output\n### Aliases: qgis_extract_output qgis_extract_output_by_name\n### qgis_extract_output_by_position qgis_extract_output_by_class\n\n### ** Examples\n\n## Don't show: \nif (has_qgis()) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\nresult <- qgis_run_algorithm(\n \"native:buffer\",\n INPUT = system.file(\"longlake/longlake_depth.gpkg\", package = \"qgisprocess\"),\n DISTANCE = 10\n)\n\n# the print() method of a qgis_result only prints its output elements:\nresult\n\n# nevertheless, more elements are included:\nlength(result)\nnames(result)\n\n# extract the output element 'OUTPUT':\nqgis_extract_output(result)\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"qgisprocess","topic":"qgis_function","snippet":"### Name: qgis_function\n### Title: Create a wrapper function that runs one algorithm\n### Aliases: qgis_function\n\n### ** Examples\n\n## Don't show: \nif (has_qgis()) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\nqgis_buffer <- qgis_function(\"native:buffer\")\nqgis_buffer(\n system.file(\n \"longlake/longlake_depth.gpkg\",\n package = \"qgisprocess\"\n ),\n DISTANCE = 10\n)\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"qgisprocess","topic":"qgis_list_input","snippet":"### Name: qgis_list_input\n### Title: Prepare a compound input argument\n### Aliases: qgis_list_input qgis_dict_input\n\n### ** Examples\n\nqgis_list_input(1, 2, 3)\nqgis_dict_input(a = 1, b = 2, c = 3)\n\n\n\n"} {"package":"qgisprocess","topic":"qgis_path","snippet":"### Name: qgis_path\n### Title: Get metadata about the used 'qgis_process' command\n### Aliases: qgis_path qgis_version\n\n### ** Examples\n\n## Don't show: \nif (has_qgis()) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\nqgis_path()\nqgis_path(quiet = FALSE)\nqgis_version()\nqgis_version(full = FALSE)\nqgis_version(debug = TRUE)\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"qgisprocess","topic":"qgis_result_status","snippet":"### Name: qgis_result_status\n### Title: Access processing results: extra tools\n### Aliases: qgis_result_status qgis_result_stdout qgis_result_stderr\n### qgis_result_args\n\n### ** Examples\n\n## Don't show: \nif (has_qgis()) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\nresult <- qgis_run_algorithm(\n \"native:buffer\",\n INPUT = system.file(\"longlake/longlake_depth.gpkg\", package = \"qgisprocess\"),\n DISTANCE = 10\n)\n\nqgis_result_status(result)\nstdout <- qgis_result_stdout(result)\ncat(substr(stdout, 1, 335))\nqgis_result_stderr(result)\nqgis_result_args(result)\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"qgisprocess","topic":"qgis_run","snippet":"### Name: qgis_run\n### Title: Call the 'qgis_process' command directly\n### Aliases: qgis_run\n\n### ** Examples\n\n## Don't show: \nif (has_qgis()) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\nprocessx_list <- qgis_run(args = \"--help\")\ncat(processx_list$stdout)\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"qgisprocess","topic":"qgis_run_algorithm","snippet":"### Name: qgis_run_algorithm\n### Title: Run an algorithm using 'qgis_process'\n### Aliases: qgis_run_algorithm\n\n### ** Examples\n\n## Don't show: \nif (has_qgis()) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\nqgis_run_algorithm(\n \"native:buffer\",\n INPUT = system.file(\"longlake/longlake_depth.gpkg\", package = \"qgisprocess\"),\n DISTANCE = 10\n)\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"qgisprocess","topic":"qgis_run_algorithm_p","snippet":"### Name: qgis_run_algorithm_p\n### Title: Run an algorithm using 'qgis_process': pipe-friendly wrapper\n### Aliases: qgis_run_algorithm_p\n\n### ** Examples\n\n## Don't show: \nif (has_qgis()) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\nsystem.file(\n \"longlake/longlake_depth.gpkg\",\n package = \"qgisprocess\"\n) |>\n qgis_run_algorithm_p(\n \"native:buffer\",\n DISTANCE = 10\n )\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"qgisprocess","topic":"qgis_search_algorithms","snippet":"### Name: qgis_search_algorithms\n### Title: Search geoprocessing algorithms\n### Aliases: qgis_search_algorithms\n\n### ** Examples\n\n## Don't show: \nif (has_qgis()) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\nqgis_search_algorithms(\n algorithm = \"point.*line\",\n provider = \"^native$\"\n)\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"qgisprocess","topic":"qgis_show_help","snippet":"### Name: qgis_show_help\n### Title: Get detailed information about one algorithm\n### Aliases: qgis_show_help qgis_get_description qgis_get_argument_specs\n### qgis_get_output_specs\n\n### ** Examples\n\n## Don't show: \nif (has_qgis()) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\nqgis_get_description(\"native:filedownloader\")\n## No test: \n# not running below examples in R CMD check to save time\nqgis_get_argument_specs(\"native:filedownloader\")\nqgis_get_output_specs(\"native:filedownloader\")\nqgis_show_help(\"native:filedownloader\")\n## End(No test)\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"qgisprocess","topic":"qgis_tmp_file","snippet":"### Name: qgis_tmp_file\n### Title: Manage temporary files\n### Aliases: qgis_tmp_file qgis_tmp_folder qgis_tmp_vector qgis_tmp_raster\n### qgis_tmp_base qgis_clean_tmp\n\n### ** Examples\n\nqgis_tmp_base()\nqgis_tmp_file(\".csv\")\nqgis_tmp_vector()\nqgis_tmp_raster()\n\n\n\n"} {"package":"qgisprocess","topic":"qgis_unconfigure","snippet":"### Name: qgis_unconfigure\n### Title: Clean the package cache\n### Aliases: qgis_unconfigure\n\n### ** Examples\n\n## Not run: \n##D # not running this function in example() as it clears the cache environment.\n##D qgis_unconfigure()\n## End(Not run)\n\n# undoing qgis_unconfigure() by repopulating the cache environment from file:\n## No test: \n# not running in R CMD check to save time\nqgis_configure(use_cached_data = TRUE)\n## End(No test)\n\n\n\n"} {"package":"qgisprocess","topic":"qgis_using_json_input","snippet":"### Name: qgis_using_json_input\n### Title: Report if JSON objects are used for input to and output from\n### 'qgis_process'\n### Aliases: qgis_using_json_input qgis_using_json_output\n\n### ** Examples\n\n## Don't show: \nif (has_qgis()) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\nqgis_using_json_input()\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"qgisprocess","topic":"st_as_sf","snippet":"### Name: st_as_sf\n### Title: Convert a qgis_result object or one of its elements to an sf\n### object\n### Aliases: st_as_sf st_as_sf.qgis_result st_as_sf.qgis_outputVector\n### st_as_sf.qgis_outputLayer\n\n### ** Examples\n\n## Don't show: \nif (has_qgis() && requireNamespace(\"sf\", quietly = TRUE)) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\n## No test: \n# not running below examples in R CMD check to save time\nresult <- qgis_run_algorithm(\n \"native:buffer\",\n INPUT = system.file(\"longlake/longlake_depth.gpkg\", package = \"qgisprocess\"),\n DISTANCE = 10\n)\n\n# most direct approach, autoselecting a `qgis_outputVector` type\n# output from the `result` object and reading as sf object:\nsf::st_as_sf(result)\n\n# if you need more control, extract the needed output element first:\noutput_vector <- qgis_extract_output(result, \"OUTPUT\")\nsf::st_as_sf(output_vector)\n## End(No test)\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"qgisprocess","topic":"st_as_stars","snippet":"### Name: st_as_stars\n### Title: Convert a qgis_result object or one of its elements to a stars\n### object\n### Aliases: st_as_stars st_as_stars.qgis_outputRaster\n### st_as_stars.qgis_outputLayer st_as_stars.qgis_result\n\n### ** Examples\n\n## Don't show: \nif (has_qgis() && requireNamespace(\"stars\", quietly = TRUE)) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\n## No test: \n# not running below examples in R CMD check to save time\nresult <- qgis_run_algorithm(\n \"native:slope\",\n INPUT = system.file(\"longlake/longlake_depth.tif\", package = \"qgisprocess\")\n)\n\n# most direct approach, autoselecting a `qgis_outputRaster` type\n# output from the `result` object and reading as stars or stars_proxy:\nstars::st_as_stars(result)\nstars::st_as_stars(result, proxy = TRUE)\n\n# if you need more control, extract the needed output element first:\noutput_raster <- qgis_extract_output(result, \"OUTPUT\")\nstars::st_as_stars(output_raster)\n## End(No test)\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"dfexpand","topic":"expand_column","snippet":"### Name: expand_column\n### Title: Expand a single column containing delimited values into multiple\n### binary columns\n### Aliases: expand_column\n\n### ** Examples\n\n library('dfexpand')\n myDelimiter = \";\"\n\n # Create some fake data with duplicates\n rows = c(\n c(\"a;b\"), c(\"a;b;c\"), c(\"b;c\"), c(\"d\"), c(\"d\")\n )\n\n # Add to a dataframe\n df = data.frame(rows)\n\n colnames(df) <- c(\"myvar\")\n #\n # The default behavior is to trim extra whitespace from the extracted values, \n # but not to alter or change the case of the values. So 'Alpha' is distinct from 'alpha'\n # but ' beta ' is the same as 'beta'. You can override this behavior with\n # the trim and ignore case flags.\n #\n expanded_df = expand_column(df, \"myvar\", myDelimiter)\n\n\n"} {"package":"dfexpand","topic":"getDistinctValues","snippet":"### Name: getDistinctValues\n### Title: dfexpand\n### Aliases: getDistinctValues\n\n### ** Examples\n\n values <- getDistinctValues(\"a;b;c\", ';')\n\n\n"} {"package":"rQCC","topic":"HL","snippet":"### Name: Hodges-Lehmann\n### Title: Hodges-Lehmann estimator\n### Aliases: HL\n### Keywords: robust univar\n\n### ** Examples\n\nx = c(0:10, 50)\nHL(x, estimator=\"HL2\")\n\n\n"} {"package":"rQCC","topic":"mad.unbiased","snippet":"### Name: MAD\n### Title: Median absolute deviation (MAD)\n### Aliases: mad.unbiased mad2.unbiased\n### Keywords: robust univar\n\n### ** Examples\n\nx = c(0:10, 50)\n\n# Fisher-consistent MAD, but not unbiased with a finite sample.\nmad(x)\n\n# Unbiased MAD.\nmad.unbiased(x)\n\n# Fisher-consistent squared MAD, but not unbiased.\nmad(x)^2\n\n# Unbiased squared MAD.\nmad2.unbiased(x)\n\n\n"} {"package":"rQCC","topic":"RE","snippet":"### Name: relative.efficiency\n### Title: Relative efficiency (RE)\n### Aliases: RE\n### Keywords: variance\n\n### ** Examples\n\n#################\n# Single sample #\n#################\n\n# RE of the Hodges-Lehmann (HL2) estimator \n# with respect to the sample standard deviation under the normal distribution.\nRE(n=5, estimator=\"HL2\")\n\n# RE of the unbiased Shamos estimator \n# with respect to the unbiased sample standard deviation under the normal distribution.\nRE(n=5, estimator=\"shamos\")\n\n# RE of the original Shamos estimator \n# with respect to the sample standard deviation under the normal distribution.\nRE(n=5, estimator=\"shamos\", correction=FALSE)\n\n# RE of the unbiased range ( (maximum - minimum) / d2 )\n# with respect to the unibased sample standard deviation under the normal distribution.\nRE(n=6, estimator=\"range\")\n\n# RE of the original range (maximum minus minimum)\n# with respect to the sample standard deviation under the normal distribution.\nRE(n=6, estimator=\"range\", correction=FALSE)\n\n\n####################\n# Multiple samples #\n####################\n# With multiple samples, only the unbiased pooled estimators are considered.\n\n# RE of the pooled median (pooling type A) with respect to the mean (pooling type A)\nRE( n=c(4,5), estimator=\"median\" )\n\n# RE of the pooled median (pooling type A) with respect to the median (pooling type C)\nRE( n=c(4,5), estimator=\"median\", baseEstimator=\"median\", basePoolType=\"C\")\n\n# RE of the pooled mad (pooling type A) with respect to the standard deviation (pooling type A)\nRE( n=c(4,5), estimator=\"mad\")\n\n# RE of the pooled mad (pooling type A) with respect to the standard deviation (pooling type C)\nRE( n=c(4,5), estimator=\"mad\", basePoolType=\"C\")\n\n# RE of the pooled standard deviation (pooling type A) with respect to the sd (pooling type C)\nRE( n=c(4,5), estimator=\"sd\", baseEstimator=\"sd\", basePoolType=\"C\" )\n\n\n"} {"package":"rQCC","topic":"shamos","snippet":"### Name: Shamos\n### Title: Shamos estimator\n### Aliases: shamos shamos.unbiased shamos2.unbiased\n### Keywords: robust univar\n\n### ** Examples\n\nx = c(0:10, 50)\n\n# Fisher-consistent Shamos, but not unbiased with a finite sample. \nshamos(x)\n\n# Unbiased Shamos. \nshamos.unbiased(x)\n\n# Fisher-consistent squared Shamos, but not unbiased with a finite sample. \nshamos(x)^2 \n\n# Unbiased squared Shamos. \nshamos2.unbiased(x)\n\n\n"} {"package":"rQCC","topic":"acc","snippet":"### Name: attributes.chart.unbalanced\n### Title: Attributes control chart with balanced/unbalanced samples\n### Aliases: acc print.acc summary.acc\n### Keywords: control chart unbalanced\n\n### ** Examples\n\n# ==============================\n# Example 1a: p chart (balanced)\n# ------------------------------\n# Refer to Table 31 of ASTM (2010).\nx = c(1, 3, 0, 7, 2, 0, 1, 0, 8, 5, 2, 0, 1, 0, 3)\nn = 400\n\n# The conventional p chart with the balanced samples.\n# Print LCL, CL, UCL.\nresult = acc(x, n)\nprint(result)\n\n# Summary of a control chart.\nsummary(result)\n\n# Plot of a control chart.\nplot(result, cex.text=0.8)\ntext(15, 0.5, labels=\"p chart (with balanced sample)\" )\n\n# The p chart based on the Wilson confidence interval.\nacc(x, n, pEstimator=\"Wilson\")\n\n\n# ===============================\n# Example 1b: np chart (balanced)\n# -------------------------------\n# The data are the same as in Example 1a.\n# The conventional np chart with the balanced samples.\n# Print LCL, CL, UCL.\nresult = acc(x, n, type=\"np\")\nprint(result)\nsummary(result)\nplot(result, cex.text=0.8)\ntext(15, 25, labels=\"np chart\" )\n\n# The np chart based on the Wilson confidence interval.\nacc(x, n, type=\"np\", pEstimator=\"Wilson\")\n\n\n# ================================\n# Example 2a: p chart (unbalanced)\n# --------------------------------\n# Refer to Table 32 of ASTM (2010).\nx = c( 9, 7, 3, 9,13,14,14,10,12,14, 6,12, 7,11, 5,\n 4, 2, 4, 7, 9, 7,12, 8, 5,18, 7, 8, 8,15, 3, 5)\nn = c( 580, 550, 580, 640, 880, 880, 640, 550, 580, 880,\n 800, 800, 580, 580, 550, 330, 330, 640, 580, 550,\n 510, 640, 300, 330, 880, 880, 800, 580, 880, 880, 330)\n\n# The conventional p chart with the unbalanced samples.\n# Print LCL, CL, UCL.\nresult = acc(x, n, nk=880)\nprint(result)\n\n# Summary of a control chart.\nsummary(result)\n\n# Plot of a control chart.\nplot(result, cex.text=0.8)\ntext(15, 0.2, labels=\"p chart (with unbalanced sample)\" )\n\n\n# ================================\n# Example 2b: p chart (unbalanced)\n# --------------------------------\n# Refer to Table 7.4 of Montgomery (2013).\nx = c(12, 8, 6, 9, 10, 12, 11, 16, 10, 6, 20, 15, 9, 8, 6, 8, 10, 7, 5, 8, 5, 8, 10, 6, 9)\nn = c(100,80,80,100,110,110,100,100,90,90,110,120,120,120,110,80,80,80,90,100,100,100,100,90,90)\n\n# The conventional p chart with the unbalanced samples.\n# Print LCL, CL, UCL.\n# If nk is missing, the average sample size is used.\nresult = acc(x, n)\nprint(result)\n\n# Summary of a control chart.\nsummary(result)\n\n# Plot of a control chart.\n# Refer to Figure 7.8 of Montgomery (2013).\nplot(result, cex.text=0.8)\ntext(15, 0.2, labels=\"p chart (with unbalanced sample)\" )\n\n\n# ================================\n# Example 2c: p chart (unbalanced)\n# p is known \n# --------------------------------\n# Refer to Table 41 of ASTM (2010).\nx = c(2, 2, 1, 1, 5, 2, 0, 3, 0, 15, 7, 2, 5, 2, 0, 3, 0, 4, 8, 4)\nn = c(600,1300,2000,2500,1550,2000,1550,780,260,2000,1550,950,950,950,35,330,200,600,1300,780)\n\n# The fraction nonconforming is known as 0.0020\n# The control limits at the size nk=600.\n# If nk (sample size for Phase II) is unknown, the average of subsample sizes is used.\nresult = acc(x, n, parameter=0.002, nk=600)\nsummary(result)\n\n\n# ===============================\n# Example 3a: u chart (balanced)\n# -------------------------------\n# Refer to Table 33 of ASTM (2010).\nx = c(17, 14, 6, 23, 5, 7, 10, 19, 29, 18, 25, 5,\n 8, 11, 18, 13, 22, 6, 23, 22, 9, 15, 20, 6, 24)\nn = 10\n\n# The u chart with the balanced samples.\n# Print LCL, CL, UCL.\nresult = acc(x, n, type=\"u\")\nprint(result)\n\n# Summary of a control chart\nsummary(result)\n\n# Plot of a control chart\nplot(result, cex.text=0.8)\ntext(13, 3, labels=\"u chart\" )\n\n\n# ================================\n# Example 3b: u chart (unbalanced)\n# --------------------------------\n# Refer to Table 34 of ASTM (2010).\nx = c(72, 38, 76, 35, 62, 81, 97, 78, 103, 56,\n 47, 55, 49, 62, 71, 47, 41, 52, 128, 84)\nn = c(20, 20, 40, 25, 25, 25, 40, 40, 40, 40,\n 25, 25, 25, 25, 25, 20, 20, 20, 40, 40)\n\n# The u chart with the unbalanced samples.\n# Print LCL, CL, UCL.\nresult = acc(x, n, type=\"u\", nk=20)\nprint(result)\n\n# Summary of a control chart\nsummary(result)\n\n# Plot of a control chart\nplot(result, cex.text=0.8)\ntext(12, 3.5, labels=\"u chart (with unbalanced sample)\" )\n\n\n# ===============================\n# Example 4: c chart \n# -------------------------------\n# Refer to Table 35 of ASTM (2010).\nx = c(0, 1, 1, 0, 2, 1, 3, 4, 5, 3, 0, 1, 1, 1, 2, 4, 0, 1, 1, 0,\n 6, 4, 3, 2, 0, 0, 9,10, 8, 8, 6,14, 0, 1, 2, 4, 5, 7, 1, 3,\n 3, 2, 0, 1, 5, 3, 4, 3, 5, 4, 2, 0, 1, 2, 5, 9, 4, 2, 5, 3)\n\n# Print LCL, CL, UCL.\nresult = acc(x, type=\"c\")\nprint(result)\n\n# Summary of a control chart\nsummary(result)\n\n# Plot of a control chart\nplot(result, cex.text=0.8)\ntext(40, 14, labels=\"c chart\" )\n\n\n# ===============================\n# Example 5: g and h charts\n# -------------------------------\n# Refer to Kaminsky et al. (1992).\ntmp = c(\n11, 2, 8, 2, 4, 1, 1, 11, 2, 1, 1, 7, 1, 1, 9, \n 5, 1, 3, 6, 5, 13, 2, 3, 3, 4, 3, 2, 6, 1, 5, \n 2, 2, 8, 3, 1, 1, 3, 4, 6, 5, 2, 8, 1, 1, 4, \n13, 10, 15, 5, 2, 3, 6, 1, 5, 8, 9, 1, 18, 3, 1, \n 3, 7, 14, 3, 1, 7, 7, 1, 8, 1, 4, 1, 6, 1, 1, \n 1, 14, 2, 3, 7, 19, 9, 7, 1, 8, 5, 1, 1, 6, 1, \n 9, 5, 6, 2, 2, 8, 15, 2, 3, 3, 4, 7, 11, 4, 6, \n 7, 5, 1, 14, 8, 3, 3, 5, 21,10, 11, 1, 6, 1, 2, \n 4, 1, 2, 11, 5, 3, 5, 4, 10, 3, 1, 4, 7, 3, 2, \n 3, 5, 4, 2, 3, 5, 1, 4, 11,17, 1, 13, 13, 2, 1) \ndata = matrix(tmp, byrow=TRUE, ncol=5)\n\n# g chart with ML method.\n# Print LCL, CL, UCL.\nresult = acc(data, type=\"g\", location=1)\nprint(result)\n\n# Summary of a control chart\nsummary(result)\n\nplot(result, cex.text=0.8)\n\n# h chart with MVU method.\nacc(data, type=\"h\", location=1, gEstimator=\"MVU\")\n\n\n# ===============================\n# Example 6: g and h charts (unbalanced data)\n# -------------------------------\nx1 = c(11, 2, 8, 2, 4)\nx2 = c(1, 1, 11, 2, 1)\nx3 = c(1, 7, 1)\nx4 = c(5, 1, 3, 6, 5)\nx5 = c(13, 2, 3, 3)\nx6 = c(3, 2, 6, 1, 5)\nx7 = c(2, 2, 8, 3, 1)\nx8 = c(1, 3, 4, 6, 5)\nx9 = c(2, 8, 1, 1, 4)\ndata = list(x1, x2, x3, x4, x5, x6, x7, x8, x9)\n\nresult = acc(data, type=\"g\", location=1, gEstimator=\"MVU\", nk=5)\nsummary(result)\nplot(result)\n\n\n# ===============================\n# Example 7: t charts \n# -------------------------------\nx = c(0.35, 0.92, 0.59, 4.28, 0.21, 0.79, 1.75, 0.07, 3.3, \n1.7, 0.33, 0.97, 0.96, 2.23, 0.88, 0.37, 1.3, 0.4, 0.19, 1.59)\n\n# Exponential t chart\nresult = acc(x, type=\"t\", tModel=\"E\")\nsummary(result)\n\nplot(result, cex.text=0.8)\ntext(10, 6, labels=\"Exponential t chart\" )\n\n\n# Weibull t chart\nresult = acc(x, type=\"t\", tModel=\"W\")\nsummary(result)\n\nplot(result, cex.text=0.8)\ntext(10, 6, labels=\"Weibull t chart\" )\n\n\n\n"} {"package":"rQCC","topic":"evar","snippet":"### Name: empirical.variance\n### Title: Empirical variances of robust estimators\n### Aliases: evar\n### Keywords: variance unbalanced\n\n### ** Examples\n\n# Empirical variance of the Hodges-Lehmann estimator (HL2) under the standard normal distribution.\nevar (n=10, estimator=\"HL2\")\n\n# Multiple samples\nevar (n=c(4,5), estimator=\"mad\", poolType=\"C\")\n\n\n"} {"package":"rQCC","topic":"factors.cc","snippet":"### Name: factors.for.chart\n### Title: Factors for constructing control charts\n### Aliases: factors.cc\n### Keywords: factor control chart\n\n### ** Examples\n\n## A3 is used for constructing the conventional X-bar chart \n# with the sample standard deviation.\nfactors.cc(n=10, factor=\"A3\")\n\n\n## Unbiasing factor for the standard deviation \n# using the sample standard deviation.\nfactors.cc(n=10, factor=\"c4\")\n# The above is the same as below:\nc4.factor(n=10, estimator=\"sd\")\n\n\n## Unbiasing factor for the standard deviation \n# using the sample range. \nfactors.cc(n=10, factor=\"d2\")\n# The above is the same as below:\nc4.factor(n=10, estimator=\"range\") \n\n\n## Table B2 in Supplement B of ASTM (1951).\nchar = c(\"A\",\"A1\",\"A2\",\"c2\", \"B1\",\"B2\",\"B3\",\"B4\", \"d2\",\"d3\",\"D1\",\"D2\",\"D3\",\"D4\")\nnn = 2L:25L\n\nres=NULL\nfor(n in nn){tmp=NULL;for(ch in char) tmp=c(tmp,factors.cc(n,ch));res=rbind(res,tmp)}\nrownames(res) = paste0(\"n=\",nn)\nround(res,4)\n\n\n## Table 49 in Chapter 3 of ASTM (2010).\nchar = c(\"A\",\"A2\",\"A3\",\"c4\", \"B3\",\"B4\",\"B5\",\"B6\", \"d2\",\"d3\",\"D1\",\"D2\",\"D3\",\"D4\")\nnn = 2L:25L\n\nres=NULL\nfor(n in nn){tmp=NULL;for(ch in char) tmp=c(tmp,factors.cc(n,ch));res=rbind(res,tmp)}\nrownames(res) = paste0(\"n=\",nn)\nround(res,4)\n\n\n## Table 50 in Chapter 3 of ASTM (2010).\nchar = c(\"E2\", \"E3\")\nnn = 2L:25L\n\nres=NULL\nfor(n in nn){tmp=NULL;for(ch in char) tmp=c(tmp,factors.cc(n,ch));res=rbind(res,tmp)}\nrownames(res) = paste0(\"n=\",nn)\nround(res,3)\n\n\n"} {"package":"rQCC","topic":"finite.breakdown","snippet":"### Name: finite.sample.breakdown\n### Title: Finite-sample breakdown point\n### Aliases: finite.breakdown\n### Keywords: breakdown robust\n\n### ** Examples\n\n# finite-sample breakdown point of the Hodges-Lehmann (HL1) with size n=10.\nfinite.breakdown(n=10, estimator=\"HL2\")\n\n# finite-sample breakdown points of the median with sizes n=4,5,6 \nfinite.breakdown(n=4:6, estimator=\"median\")\n\n\n"} {"package":"rQCC","topic":"pooledEstimator","snippet":"### Name: pooled.Estimator\n### Title: Pooled Estimator\n### Aliases: pooledEstimator\n### Keywords: univar unbalanced\n\n### ** Examples\n\nx1 = c(1,2,3,4,5)\nx2 = c(6,7)\nx = list(x1,x2)\n\n# Pooled sample mean (default) by type \"A\" pooling\npooledEstimator(x) \npooledEstimator(x, \"mean\", \"A\") # same as the above \n\n# Pooled sample mean by type \"B\" pooling\npooledEstimator(x, \"mean\", \"B\")\n\n\n# Pooled sample sd by type \"B\" pooling\npooledEstimator(x, estimator=\"sd\", pool=\"B\")\n\n\n"} {"package":"rQCC","topic":"racc","snippet":"### Name: robust.attributes.chart.unbalanced\n### Title: Robust attributes control charts with balanced/unbalanced\n### samples\n### Aliases: racc print.racc summary.racc\n### Keywords: control chart robust unbalanced\n\n### ** Examples\n\n# ===============================\n# Example 1: g and h charts\n# -------------------------------\n# Refer to Kaminsky et al. (1992) and Table 2 of Park, et al. (2021).\ntmp = c(\n11, 2, 8, 2, 4, 1, 1, 11, 2, 1, 1, 7, 1, 1, 9, \n 5, 1, 3, 6, 5, 13, 2, 3, 3, 4, 3, 2, 6, 1, 5, \n 2, 2, 8, 3, 1, 1, 3, 4, 6, 5, 2, 8, 1, 1, 4, \n13, 10, 15, 5, 2, 3, 6, 1, 5, 8, 9, 1, 18, 3, 1, \n 3, 7, 14, 3, 1, 7, 7, 1, 8, 1, 4, 1, 6, 1, 1, \n 1, 14, 2, 3, 7, 19, 9, 7, 1, 8, 5, 1, 1, 6, 1, \n 9, 5, 6, 2, 2, 8, 15, 2, 3, 3, 4, 7, 11, 4, 6, \n 7, 5, 1, 14, 8, 3, 3, 5, 21,10, 11, 1, 6, 1, 2, \n 4, 1, 2, 11, 5, 3, 5, 4, 10, 3, 1, 4, 7, 3, 2, \n 3, 5, 4, 2, 3, 5, 1, 4, 11,17, 1, 13, 13, 2, 1) \ndata = matrix(tmp, byrow=TRUE, ncol=5)\n\n# g chart with cdf (trimming) method.\n# Print LCL, CL, UCL.\nresult = racc(data, gamma=0.9, type=\"g\", location=1)\nprint(result)\n\n# Summary of a control chart\nsummary(result)\n\nplot(result, cex.text=0.8)\n\n# h chart with MM (truncated geometric) method.\nracc(data, gamma=0.9, type=\"h\", location=1, gEstimator=\"MM\")\n\n\n# ===============================\n# Example 2: g and h charts (unbalanced data)\n# -------------------------------\nx1 = c(11, 2, 8, 2, 4)\nx2 = c(1, 1, 11, 2, 1)\nx3 = c(1, 7, 1)\nx4 = c(5, 1, 3, 6, 5)\nx5 = c(13, 2, 3, 3)\nx6 = c(3, 2, 6, 1, 5)\nx7 = c(2, 2, 8, 3, 1)\nx8 = c(1, 3, 4, 6, 5)\nx9 = c(2, 8, 1, 1, 4)\ndata = list(x1, x2, x3, x4, x5, x6, x7, x8, x9)\n\nresult = racc(data, gamma=0.9, type=\"g\", location=1, gEstimator=\"cdf\", nk=5)\nsummary(result)\nplot(result)\n\n\n# ===============================\n# Example 3: t charts \n# -------------------------------\nx = c(0.35, 0.92, 0.59, 4.28, 0.21, 0.79, 1.75, 0.07, 3.3,\n1.7, 0.33, 0.97, 0.96, 2.23, 0.88, 0.37, 1.3, 0.4, 0.19, 1.59)\n\n# Exponential t chart\nresult = racc(x, type=\"t\", tModel=\"E\")\nsummary(result)\n\nplot(result, cex.text=0.8)\ntext(10, 6, labels=\"Robust exponential t chart\" )\n\n\n# Weibull t chart\nresult = racc(x, type=\"t\", tModel=\"W\")\nsummary(result)\n\nplot(result, cex.text=0.8)\ntext(10, 5.5, labels=\"Robust Weibull t chart\" )\n\n\n\n"} {"package":"rQCC","topic":"rQCC","snippet":"### Name: robust.chart.unbalanced\n### Title: Robust quality control chart with balanced/unbalanced samples\n### Aliases: rQCC rcc print.rcc summary.rcc\n### Keywords: control chart robust unbalanced\n\n### ** Examples\n\n###############\n# X-bar chart #\n###############\n\n# ========== \n# Example 1a \n# ---------- \n# The conventional X-bar chart with the standard deviation. \n# Refer to Example 3 in Section 3.31 of ASTM (2010). \n\n# The data below are from Table 29 in Section 3.31 of ASTM (2010). \n# Each subgroup has a sample of size n=6. There are m=10 subgroups.\nx1 = c(0.5005, 0.5000, 0.5008, 0.5000, 0.5005, 0.5000)\nx2 = c(0.4998, 0.4997, 0.4998, 0.4994, 0.4999, 0.4998)\nx3 = c(0.4995, 0.4995, 0.4995, 0.4995, 0.4995, 0.4996)\nx4 = c(0.4998, 0.5005, 0.5005, 0.5002, 0.5003, 0.5004)\nx5 = c(0.5000, 0.5005, 0.5008, 0.5007, 0.5008, 0.5010)\nx6 = c(0.5008, 0.5009, 0.5010, 0.5005, 0.5006, 0.5009)\nx7 = c(0.5000, 0.5001, 0.5002, 0.4995, 0.4996, 0.4997)\nx8 = c(0.4993, 0.4994, 0.4999, 0.4996, 0.4996, 0.4997)\nx9 = c(0.4995, 0.4995, 0.4997, 0.4992, 0.4995, 0.4992)\nx10= c(0.4994, 0.4998, 0.5000, 0.4990, 0.5000, 0.5000)\ndata1 = rbind(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10)\n\n# Print LCL, CL, UCL.\n# The mean and standard deviation are used.\nresult = rcc(data1, loc=\"mean\", scale=\"sd\", type=\"Xbar\") \nprint(result)\n\n# Note: X-bar chart is a default with the mean and sd\n# so the below is the same as the above.\nrcc(data1) \n\n# Summary of a control chart\nsummary(result)\nRE(n=6, estimator=\"sd\", correction=TRUE)\n\n# The above limits are also calculated as \nA3 = factors.cc(n=6, \"A3\")\n\nxbarbar = mean(data1)\n # xbarbar = mean(unlist(data1)) # for list\n\nsbar = mean( apply(data1, 1, sd) )\nc(xbarbar-A3*sbar, xbarbar, xbarbar+A3*sbar)\n\n# Plot a control chart\nplot(result, cex.text=0.8)\nabline(v=5.5, lty=1, lwd=2, col=\"gold\")\ntext( c(3,8), c(0.5005, 0.5005), labels=c(\"Group 1\", \"Group 2\") )\n\n# ==========\n# Example 1b \n# ----------\n# The conventional X-bar chart with the range.\n# Refer to Example 5 in Section 3.31 of ASTM (2010).\n# The data are the same as in Example 1a.\n\n# Print LCL, CL, UCL.\n# The range is used for the scale estimator.\nresult = rcc(data1, loc=\"mean\", scale=\"range\")\nprint(result)\n\n# Summary of a control chart\n# Note: the RE is calculated based on the unbiased estimators.\nsummary(result)\nRE(n=6, estimator=\"range\", correction=TRUE)\n\n# The above limits are also calculated as \nA2 = factors.cc(n=6, \"A2\")\n\nxbarbar = mean(data1)\n # xbarbar = mean(unlist(data1)) # for list\n\nRbar = mean( apply(data1, 1, function(x) {diff(range(x))}) )\n # Rbar = mean( apply(sapply(data1,range),2,diff) ) # for list\n\nc(xbarbar-A2*Rbar, xbarbar, xbarbar+A2*Rbar)\n\n# Plot a control chart\nplot(result, cex.text=0.8)\nabline(v=5.5, lty=1, lwd=2, col=\"gold\")\ntext( c(3,8), c(0.5005, 0.5005), labels=c(\"Group 1\", \"Group 2\") )\n\n# ==========\n# Example 1c \n# ----------\n# The median-MAD chart.\n# Refer to Table 4.2 in Section 4.7 of Ryan (2000).\n# Data: 20 subgroups with 4 observations each.\ntmp = c(\n72, 84, 79, 49, 56, 87, 33, 42, 55, 73, 22, 60, 44, 80, 54, 74,\n97, 26, 48, 58, 83, 89, 91, 62, 47, 66, 53, 58, 88, 50, 84, 69,\n57, 47, 41, 46, 13, 10, 30, 32, 26, 39, 52, 48, 46, 27, 63, 34,\n49, 62, 78, 87, 71, 63, 82, 55, 71, 58, 69, 70, 67, 69, 70, 94,\n55, 63, 72, 49, 49, 51, 55, 76, 72, 80, 61, 59, 61, 74, 62, 57 )\ndata2 = matrix(tmp, ncol=4, byrow=TRUE)\n\n# Print LCL, CL, UCL.\n# The median (location) and MAD (scale) are used.\nrcc(data2, loc=\"median\", scale=\"mad\")\n\n# Note: the RE is calculated based on the unbiased estimators.\nRE(n=4, estimator=\"median\", correction=TRUE)\n\n# ==========\n# Example 1d \n# ----------\n# The HL2-Shamos chart.\n# The data are the same as in Example 1c.\n\n# Print LCL, CL, UCL.\n# The HL2 (location) and Shamos (scale) are used.\nrcc(data2, loc=\"HL2\", scale=\"shamos\")\n\n# Note: the RE is calculated based on the unbiased estimators.\nRE(n=4, estimator=\"HL2\", correction=TRUE)\n\n\n############\n# S chart #\n############\n\n# ==========\n# Example 2a \n# ----------\n# The conventional S chart with the standard deviation.\n# Refer to Example 3 in Section 3.31 of ASTM (2010). \n# The data are the same as in Example 1a.\n\n# Print LCL, CL, UCL.\n# The standard deviaion (default) is used for the scale estimator.\nresult = rcc(data1, type=\"S\")\nprint(result)\n\n# The above limits are also calculated as \nB3 = factors.cc(n=6, \"B3\")\nB4 = factors.cc(n=6, \"B4\")\nsbar = mean( apply(data1, 1, sd) )\nc(B3*sbar, sbar, B4*sbar)\n\n# Plot a control chart\nplot(result, cex.text=0.8)\nabline(v=5.5, lty=1, lwd=2, col=\"gold\")\ntext( c(3,8), c(0.0005, 0.0005), labels=c(\"Group 1\", \"Group 2\") )\n\n\n# ==========\n# Example 2b\n# ----------\n# The S-type chart with the MAD.\n# The data are the same as in Example 2a.\n\n# Print LCL, CL, UCL.\n# The mad (scale) are used.\nresult = rcc(data1, scale=\"mad\", type=\"S\")\nprint(result)\n\n# Plot a control chart\nplot(result, cex.text=0.8)\nabline(v=5.5, lty=1, lwd=2, col=\"gold\")\ntext( c(3,8), c(0.00045, 0.00045), labels=c(\"Group 1\", \"Group 2\") )\n\n\n############\n# R chart #\n############\n\n# ==========\n# Example 3a \n# ----------\n# The conventional R chart with the range.\n# Refer to Example 5 in Section 3.31 of ASTM (2010). \n# The data are the same as in Example 1a.\n\n# Print LCL, CL, UCL.\n# The range is used for the scale estimator.\n# Unlike the S chart, scale=\"range\" is not a default. \n# Thus, for the conventional R chart, use the option (scale=\"range\") as below.\nresult = rcc(data1, scale=\"range\", type=\"R\")\nprint(result)\n\n# The above limits are also calculated as \nD3 = factors.cc(n=6, \"D3\")\nD4 = factors.cc(n=6, \"D4\")\n\nRbar = mean( apply(data1, 1, function(x) {diff(range(x))}) )\n # Rbar = mean( apply(sapply(data1,range),2,diff) ) # for list\n\nc(D3*Rbar, Rbar, D4*Rbar)\n\n# Plot a control chart\nplot(result, cex.text=0.8)\nabline(v=5.5, lty=1, lwd=2, col=\"gold\")\ntext( c(3,8), c(0.00135, 0.00135), labels=c(\"Group 1\", \"Group 2\") )\n\n\n# ==========\n# Example 3b\n# ----------\n# The R-type chart with the Shamos.\n# Refer to Example 5 in Section 3.31 of ASTM (2010). \n# The data are the same as in Example 3a.\n\n# Print LCL, CL, UCL.\n# The mad (scale) are used.\nresult = rcc(data1, scale=\"shamos\", type=\"R\")\nprint(result)\n\n# Plot a control chart\nplot(result, cex.text=0.8)\nabline(v=5.5, lty=1, lwd=2, col=\"gold\")\ntext( c(3,8), c(0.00135, 0.00135), labels=c(\"Group 1\", \"Group 2\") )\n\n\n###################\n# Unbalanced Data #\n###################\n\n# ==========\n# Example 4\n# ----------\n# Refer to Example 4 in Section 3.31 of ASTM (2010).\n# Data set is from Table 30 in Section 3.31 of ASTM (2010).\n x1 = c( 73, 73, 73, 75, 75)\n x2 = c( 70, 71, 71, 71, 72)\n x3 = c( 74, 74, 74, 74, 75)\n x4 = c( 70, 70, 70, 72, 73)\n x5 = c( 70, 70, 70, 70, 70)\n x6 = c( 65, 65, 66, 69, 70)\n x7 = c( 72, 72, 74, 76)\n x8 = c( 69, 70, 71, 73, 73)\n x9 = c( 71, 71, 71, 71, 72)\nx10 = c( 71, 71, 71, 71, 72)\nx11 = c( 71, 71, 72, 72, 72)\nx12 = c( 70, 71, 71, 72, 72)\nx13 = c( 73, 74, 74, 75, 75)\nx14 = c( 74, 74, 75, 75, 75)\nx15 = c( 72, 72, 72, 73, 73)\nx16 = c( 75, 75, 75, 76)\nx17 = c( 68, 69, 69, 69, 70)\nx18 = c( 71, 71, 72, 72, 73)\nx19 = c( 72, 73, 73, 73, 73)\nx20 = c( 68, 69, 70, 71, 71)\nx21 = c( 69, 69, 69, 69, 69)\n\n# For unbalanced data set, use list.\ndata = list(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10,\n x11,x12,x13,x14, x15, x16, x17, x18, x19, x20, x21)\n\n# Xbar chart (witn nk=5)\nrcc(data, nk=5)\n\n# S chart (witn nk=4)\nrcc(data, type=\"S\", nk=4)\n\n# ==========\n# Example 5a\n# ----------\n# Data set is from Example 6.4 of Montgomery (2013)\n# Statistical Quality Control (7th ed), Wiley.\n# Data set for Phase I\n x1 = c(74.030, 74.002, 74.019, 73.992, 74.008)\n x2 = c(73.995, 73.992, 74.001)\n x3 = c(73.988, 74.024, 74.021, 74.005, 74.002)\n x4 = c(74.002, 73.996, 73.993, 74.015, 74.009)\n x5 = c(73.992, 74.007, 74.015, 73.989, 74.014)\n x6 = c(74.009, 73.994, 73.997, 73.985)\n x7 = c(73.995, 74.006, 73.994, 74.000)\n x8 = c(73.985, 74.003, 73.993, 74.015, 73.988)\n x9 = c(74.008, 73.995, 74.009, 74.005)\nx10 = c(73.998, 74.000, 73.990, 74.007, 73.995)\nx11 = c(73.994, 73.998, 73.994, 73.995, 73.990)\nx12 = c(74.004, 74.000, 74.007, 74.000, 73.996)\nx13 = c(73.983, 74.002, 73.998) \nx14 = c(74.006, 73.967, 73.994, 74.000, 73.984)\nx15 = c(74.012, 74.014, 73.998) \nx16 = c(74.000, 73.984, 74.005, 73.998, 73.996)\nx17 = c(73.994, 74.012, 73.986, 74.005) \nx18 = c(74.006, 74.010, 74.018, 74.003, 74.000)\nx19 = c(73.984, 74.002, 74.003, 74.005, 73.997)\nx20 = c(74.000, 74.010, 74.013) \nx21 = c(73.982, 74.001, 74.015, 74.005, 73.996)\nx22 = c(74.004, 73.999, 73.990, 74.006, 74.009)\nx23 = c(74.010, 73.989, 73.990, 74.009, 74.014)\nx24 = c(74.015, 74.008, 73.993, 74.000, 74.010)\nx25 = c(73.982, 73.984, 73.995, 74.017, 74.013) \n\n# For unbalanced data set, use list.\ndata = list(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, \n x16, x17, x18, x19, x20, x21, x22, x23, x24, x25)\n\n# Xbar chart (witn nk=5)\nrcc(data, nk=5)\n\n# Xbar chart (witn nk=5) with different pooling methods\nrcc(data, nk=5, poolLoc=\"C\", poolScale=\"C\")\n\n# S chart (witn nk=5)\nrcc(data, type=\"S\", nk=5)\n\n# S chart (witn nk=5) with plling method C.\nrcc(data, type=\"S\", nk=5, poolScale=\"C\")\n\n# ==========\n# Example 5b\n# ----------\n# With contaminated data set. \n# Two contaminated observations are added \n# in the first subgroup (70.5, 77.0) of Example 5a.\ndatan = data\ndatan[[1]] = c(data[[1]], 70.5, 77.0)\n\n# Xbar chart with non-robust estimators \nrcc(datan, nk=5)\n\n# robust Xbar chart (median and mad estimates)\nrcc(datan, loc=\"median\", sc=\"mad\", nk=5)\n\n# robust Xbar chart (median and mad estimates) with different pooling methods\nrcc(datan, loc=\"median\", sc=\"mad\", nk=5, poolLoc=\"C\", poolScale=\"C\")\n\n# robust S chart (mad estimate) with different pooling methods\nrcc(datan, type=\"S\", sc=\"mad\", nk=5, poolScale=\"B\")\nrcc(datan, type=\"S\", sc=\"mad\", nk=5, poolScale=\"C\")\n\n\n"} {"package":"rQCC","topic":"sd.unbiased","snippet":"### Name: sd.unbiased\n### Title: Unbiased standard deviation\n### Aliases: sd.unbiased\n### Keywords: univar\n\n### ** Examples\n\nsd.unbiased(1:2)\n\n\n"} {"package":"rQCC","topic":"c4.factor","snippet":"### Name: unbiasing.factor\n### Title: Finite-sample unbiasing factor\n### Aliases: c4.factor w4.factor\n### Keywords: factor variance\n\n### ** Examples\n\n# unbiasing factor for estimating the standard deviation\nc4.factor(n=10, estimator=\"sd\")\nc4.factor(n=10, estimator=\"mad\")\nc4.factor(n=10, estimator=\"shamos\")\n\n# Note: d2 notation is widely used for the bias-correction of the range.\nd2 = c4.factor(n=10, estimator=\"range\") \nd2\n\n# unbiasing factor for estimating the variance\nw4.factor(n=10, \"mad2\")\nw4.factor(n=10, \"shamos2\")\n\n\n"} {"package":"rQCC","topic":"rcc","snippet":"### Name: robust.chart.unbalanced\n### Title: Robust quality control chart with balanced/unbalanced samples\n### Aliases: rQCC rcc print.rcc summary.rcc\n### Keywords: control chart robust unbalanced\n\n### ** Examples\n\n###############\n# X-bar chart #\n###############\n\n# ========== \n# Example 1a \n# ---------- \n# The conventional X-bar chart with the standard deviation. \n# Refer to Example 3 in Section 3.31 of ASTM (2010). \n\n# The data below are from Table 29 in Section 3.31 of ASTM (2010). \n# Each subgroup has a sample of size n=6. There are m=10 subgroups.\nx1 = c(0.5005, 0.5000, 0.5008, 0.5000, 0.5005, 0.5000)\nx2 = c(0.4998, 0.4997, 0.4998, 0.4994, 0.4999, 0.4998)\nx3 = c(0.4995, 0.4995, 0.4995, 0.4995, 0.4995, 0.4996)\nx4 = c(0.4998, 0.5005, 0.5005, 0.5002, 0.5003, 0.5004)\nx5 = c(0.5000, 0.5005, 0.5008, 0.5007, 0.5008, 0.5010)\nx6 = c(0.5008, 0.5009, 0.5010, 0.5005, 0.5006, 0.5009)\nx7 = c(0.5000, 0.5001, 0.5002, 0.4995, 0.4996, 0.4997)\nx8 = c(0.4993, 0.4994, 0.4999, 0.4996, 0.4996, 0.4997)\nx9 = c(0.4995, 0.4995, 0.4997, 0.4992, 0.4995, 0.4992)\nx10= c(0.4994, 0.4998, 0.5000, 0.4990, 0.5000, 0.5000)\ndata1 = rbind(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10)\n\n# Print LCL, CL, UCL.\n# The mean and standard deviation are used.\nresult = rcc(data1, loc=\"mean\", scale=\"sd\", type=\"Xbar\") \nprint(result)\n\n# Note: X-bar chart is a default with the mean and sd\n# so the below is the same as the above.\nrcc(data1) \n\n# Summary of a control chart\nsummary(result)\nRE(n=6, estimator=\"sd\", correction=TRUE)\n\n# The above limits are also calculated as \nA3 = factors.cc(n=6, \"A3\")\n\nxbarbar = mean(data1)\n # xbarbar = mean(unlist(data1)) # for list\n\nsbar = mean( apply(data1, 1, sd) )\nc(xbarbar-A3*sbar, xbarbar, xbarbar+A3*sbar)\n\n# Plot a control chart\nplot(result, cex.text=0.8)\nabline(v=5.5, lty=1, lwd=2, col=\"gold\")\ntext( c(3,8), c(0.5005, 0.5005), labels=c(\"Group 1\", \"Group 2\") )\n\n# ==========\n# Example 1b \n# ----------\n# The conventional X-bar chart with the range.\n# Refer to Example 5 in Section 3.31 of ASTM (2010).\n# The data are the same as in Example 1a.\n\n# Print LCL, CL, UCL.\n# The range is used for the scale estimator.\nresult = rcc(data1, loc=\"mean\", scale=\"range\")\nprint(result)\n\n# Summary of a control chart\n# Note: the RE is calculated based on the unbiased estimators.\nsummary(result)\nRE(n=6, estimator=\"range\", correction=TRUE)\n\n# The above limits are also calculated as \nA2 = factors.cc(n=6, \"A2\")\n\nxbarbar = mean(data1)\n # xbarbar = mean(unlist(data1)) # for list\n\nRbar = mean( apply(data1, 1, function(x) {diff(range(x))}) )\n # Rbar = mean( apply(sapply(data1,range),2,diff) ) # for list\n\nc(xbarbar-A2*Rbar, xbarbar, xbarbar+A2*Rbar)\n\n# Plot a control chart\nplot(result, cex.text=0.8)\nabline(v=5.5, lty=1, lwd=2, col=\"gold\")\ntext( c(3,8), c(0.5005, 0.5005), labels=c(\"Group 1\", \"Group 2\") )\n\n# ==========\n# Example 1c \n# ----------\n# The median-MAD chart.\n# Refer to Table 4.2 in Section 4.7 of Ryan (2000).\n# Data: 20 subgroups with 4 observations each.\ntmp = c(\n72, 84, 79, 49, 56, 87, 33, 42, 55, 73, 22, 60, 44, 80, 54, 74,\n97, 26, 48, 58, 83, 89, 91, 62, 47, 66, 53, 58, 88, 50, 84, 69,\n57, 47, 41, 46, 13, 10, 30, 32, 26, 39, 52, 48, 46, 27, 63, 34,\n49, 62, 78, 87, 71, 63, 82, 55, 71, 58, 69, 70, 67, 69, 70, 94,\n55, 63, 72, 49, 49, 51, 55, 76, 72, 80, 61, 59, 61, 74, 62, 57 )\ndata2 = matrix(tmp, ncol=4, byrow=TRUE)\n\n# Print LCL, CL, UCL.\n# The median (location) and MAD (scale) are used.\nrcc(data2, loc=\"median\", scale=\"mad\")\n\n# Note: the RE is calculated based on the unbiased estimators.\nRE(n=4, estimator=\"median\", correction=TRUE)\n\n# ==========\n# Example 1d \n# ----------\n# The HL2-Shamos chart.\n# The data are the same as in Example 1c.\n\n# Print LCL, CL, UCL.\n# The HL2 (location) and Shamos (scale) are used.\nrcc(data2, loc=\"HL2\", scale=\"shamos\")\n\n# Note: the RE is calculated based on the unbiased estimators.\nRE(n=4, estimator=\"HL2\", correction=TRUE)\n\n\n############\n# S chart #\n############\n\n# ==========\n# Example 2a \n# ----------\n# The conventional S chart with the standard deviation.\n# Refer to Example 3 in Section 3.31 of ASTM (2010). \n# The data are the same as in Example 1a.\n\n# Print LCL, CL, UCL.\n# The standard deviaion (default) is used for the scale estimator.\nresult = rcc(data1, type=\"S\")\nprint(result)\n\n# The above limits are also calculated as \nB3 = factors.cc(n=6, \"B3\")\nB4 = factors.cc(n=6, \"B4\")\nsbar = mean( apply(data1, 1, sd) )\nc(B3*sbar, sbar, B4*sbar)\n\n# Plot a control chart\nplot(result, cex.text=0.8)\nabline(v=5.5, lty=1, lwd=2, col=\"gold\")\ntext( c(3,8), c(0.0005, 0.0005), labels=c(\"Group 1\", \"Group 2\") )\n\n\n# ==========\n# Example 2b\n# ----------\n# The S-type chart with the MAD.\n# The data are the same as in Example 2a.\n\n# Print LCL, CL, UCL.\n# The mad (scale) are used.\nresult = rcc(data1, scale=\"mad\", type=\"S\")\nprint(result)\n\n# Plot a control chart\nplot(result, cex.text=0.8)\nabline(v=5.5, lty=1, lwd=2, col=\"gold\")\ntext( c(3,8), c(0.00045, 0.00045), labels=c(\"Group 1\", \"Group 2\") )\n\n\n############\n# R chart #\n############\n\n# ==========\n# Example 3a \n# ----------\n# The conventional R chart with the range.\n# Refer to Example 5 in Section 3.31 of ASTM (2010). \n# The data are the same as in Example 1a.\n\n# Print LCL, CL, UCL.\n# The range is used for the scale estimator.\n# Unlike the S chart, scale=\"range\" is not a default. \n# Thus, for the conventional R chart, use the option (scale=\"range\") as below.\nresult = rcc(data1, scale=\"range\", type=\"R\")\nprint(result)\n\n# The above limits are also calculated as \nD3 = factors.cc(n=6, \"D3\")\nD4 = factors.cc(n=6, \"D4\")\n\nRbar = mean( apply(data1, 1, function(x) {diff(range(x))}) )\n # Rbar = mean( apply(sapply(data1,range),2,diff) ) # for list\n\nc(D3*Rbar, Rbar, D4*Rbar)\n\n# Plot a control chart\nplot(result, cex.text=0.8)\nabline(v=5.5, lty=1, lwd=2, col=\"gold\")\ntext( c(3,8), c(0.00135, 0.00135), labels=c(\"Group 1\", \"Group 2\") )\n\n\n# ==========\n# Example 3b\n# ----------\n# The R-type chart with the Shamos.\n# Refer to Example 5 in Section 3.31 of ASTM (2010). \n# The data are the same as in Example 3a.\n\n# Print LCL, CL, UCL.\n# The mad (scale) are used.\nresult = rcc(data1, scale=\"shamos\", type=\"R\")\nprint(result)\n\n# Plot a control chart\nplot(result, cex.text=0.8)\nabline(v=5.5, lty=1, lwd=2, col=\"gold\")\ntext( c(3,8), c(0.00135, 0.00135), labels=c(\"Group 1\", \"Group 2\") )\n\n\n###################\n# Unbalanced Data #\n###################\n\n# ==========\n# Example 4\n# ----------\n# Refer to Example 4 in Section 3.31 of ASTM (2010).\n# Data set is from Table 30 in Section 3.31 of ASTM (2010).\n x1 = c( 73, 73, 73, 75, 75)\n x2 = c( 70, 71, 71, 71, 72)\n x3 = c( 74, 74, 74, 74, 75)\n x4 = c( 70, 70, 70, 72, 73)\n x5 = c( 70, 70, 70, 70, 70)\n x6 = c( 65, 65, 66, 69, 70)\n x7 = c( 72, 72, 74, 76)\n x8 = c( 69, 70, 71, 73, 73)\n x9 = c( 71, 71, 71, 71, 72)\nx10 = c( 71, 71, 71, 71, 72)\nx11 = c( 71, 71, 72, 72, 72)\nx12 = c( 70, 71, 71, 72, 72)\nx13 = c( 73, 74, 74, 75, 75)\nx14 = c( 74, 74, 75, 75, 75)\nx15 = c( 72, 72, 72, 73, 73)\nx16 = c( 75, 75, 75, 76)\nx17 = c( 68, 69, 69, 69, 70)\nx18 = c( 71, 71, 72, 72, 73)\nx19 = c( 72, 73, 73, 73, 73)\nx20 = c( 68, 69, 70, 71, 71)\nx21 = c( 69, 69, 69, 69, 69)\n\n# For unbalanced data set, use list.\ndata = list(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10,\n x11,x12,x13,x14, x15, x16, x17, x18, x19, x20, x21)\n\n# Xbar chart (witn nk=5)\nrcc(data, nk=5)\n\n# S chart (witn nk=4)\nrcc(data, type=\"S\", nk=4)\n\n# ==========\n# Example 5a\n# ----------\n# Data set is from Example 6.4 of Montgomery (2013)\n# Statistical Quality Control (7th ed), Wiley.\n# Data set for Phase I\n x1 = c(74.030, 74.002, 74.019, 73.992, 74.008)\n x2 = c(73.995, 73.992, 74.001)\n x3 = c(73.988, 74.024, 74.021, 74.005, 74.002)\n x4 = c(74.002, 73.996, 73.993, 74.015, 74.009)\n x5 = c(73.992, 74.007, 74.015, 73.989, 74.014)\n x6 = c(74.009, 73.994, 73.997, 73.985)\n x7 = c(73.995, 74.006, 73.994, 74.000)\n x8 = c(73.985, 74.003, 73.993, 74.015, 73.988)\n x9 = c(74.008, 73.995, 74.009, 74.005)\nx10 = c(73.998, 74.000, 73.990, 74.007, 73.995)\nx11 = c(73.994, 73.998, 73.994, 73.995, 73.990)\nx12 = c(74.004, 74.000, 74.007, 74.000, 73.996)\nx13 = c(73.983, 74.002, 73.998) \nx14 = c(74.006, 73.967, 73.994, 74.000, 73.984)\nx15 = c(74.012, 74.014, 73.998) \nx16 = c(74.000, 73.984, 74.005, 73.998, 73.996)\nx17 = c(73.994, 74.012, 73.986, 74.005) \nx18 = c(74.006, 74.010, 74.018, 74.003, 74.000)\nx19 = c(73.984, 74.002, 74.003, 74.005, 73.997)\nx20 = c(74.000, 74.010, 74.013) \nx21 = c(73.982, 74.001, 74.015, 74.005, 73.996)\nx22 = c(74.004, 73.999, 73.990, 74.006, 74.009)\nx23 = c(74.010, 73.989, 73.990, 74.009, 74.014)\nx24 = c(74.015, 74.008, 73.993, 74.000, 74.010)\nx25 = c(73.982, 73.984, 73.995, 74.017, 74.013) \n\n# For unbalanced data set, use list.\ndata = list(x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15, \n x16, x17, x18, x19, x20, x21, x22, x23, x24, x25)\n\n# Xbar chart (witn nk=5)\nrcc(data, nk=5)\n\n# Xbar chart (witn nk=5) with different pooling methods\nrcc(data, nk=5, poolLoc=\"C\", poolScale=\"C\")\n\n# S chart (witn nk=5)\nrcc(data, type=\"S\", nk=5)\n\n# S chart (witn nk=5) with plling method C.\nrcc(data, type=\"S\", nk=5, poolScale=\"C\")\n\n# ==========\n# Example 5b\n# ----------\n# With contaminated data set. \n# Two contaminated observations are added \n# in the first subgroup (70.5, 77.0) of Example 5a.\ndatan = data\ndatan[[1]] = c(data[[1]], 70.5, 77.0)\n\n# Xbar chart with non-robust estimators \nrcc(datan, nk=5)\n\n# robust Xbar chart (median and mad estimates)\nrcc(datan, loc=\"median\", sc=\"mad\", nk=5)\n\n# robust Xbar chart (median and mad estimates) with different pooling methods\nrcc(datan, loc=\"median\", sc=\"mad\", nk=5, poolLoc=\"C\", poolScale=\"C\")\n\n# robust S chart (mad estimate) with different pooling methods\nrcc(datan, type=\"S\", sc=\"mad\", nk=5, poolScale=\"B\")\nrcc(datan, type=\"S\", sc=\"mad\", nk=5, poolScale=\"C\")\n\n\n"} {"package":"squat","topic":"DTW","snippet":"### Name: DTW\n### Title: Dynamic Time Warping for Quaternion Time Series\n### Aliases: DTW\n\n### ** Examples\n\nDTW(vespa64$igp[[1]], vespa64$igp[[2]])\n\n\n"} {"package":"squat","topic":"append","snippet":"### Name: append\n### Title: QTS Sample Concatenation\n### Aliases: append append.default append.qts_sample\n\n### ** Examples\n\nappend(vespa64$igp, vespa64$igp[1])\nappend(vespa64$igp, vespa64$igp[[1]])\n\n\n"} {"package":"squat","topic":"autoplot.prcomp_qts","snippet":"### Name: autoplot.prcomp_qts\n### Title: Plot for 'prcomp_qts' objects\n### Aliases: autoplot.prcomp_qts\n\n### ** Examples\n\n## Don't show: \nif (requireNamespace(\"ggplot2\", quietly = TRUE)) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\ndf <- as_qts_sample(vespa64$igp[1:16])\nres_pca <- prcomp(df)\n\n# Plot the data points in a PC plane\n# And color points according to a categorical variable\np <- ggplot2::autoplot(res_pca, what = \"scores\")\np + ggplot2::geom_point(ggplot2::aes(color = vespa64$V[1:16]))\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"squat","topic":"autoplot.qts","snippet":"### Name: autoplot.qts\n### Title: Plot for 'qts' objects\n### Aliases: autoplot.qts\n\n### ** Examples\n\n## Don't show: \nif (requireNamespace(\"ggplot2\", quietly = TRUE)) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\nggplot2::autoplot(vespa64$igp[[1]])\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"squat","topic":"autoplot.qts_sample","snippet":"### Name: autoplot.qts_sample\n### Title: Plot for 'qts_sample' objects\n### Aliases: autoplot.qts_sample\n\n### ** Examples\n\n## Don't show: \nif (requireNamespace(\"ggplot2\", quietly = TRUE)) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\nggplot2::autoplot(vespa64$igp)\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"squat","topic":"autoplot.qtsclust","snippet":"### Name: autoplot.qtsclust\n### Title: Plot for 'qtsclust' objects\n### Aliases: autoplot.qtsclust\n\n### ** Examples\n\n## Don't show: \nif (requireNamespace(\"ggplot2\", quietly = TRUE)) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\nout <- kmeans(vespa64$igp[1:10], n_clusters = 2)\nggplot2::autoplot(out)\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"squat","topic":"centring","snippet":"### Name: centring\n### Title: QTS Centering and Standardization\n### Aliases: centring\n\n### ** Examples\n\ncentring(vespa64$igp[[1]])\n\n\n"} {"package":"squat","topic":"dbscan","snippet":"### Name: dbscan\n### Title: QTS Nearest-Neighbor Clustering\n### Aliases: dbscan dbscan.default dbscan.qts_sample\n\n### ** Examples\n\nout <- dbscan(vespa64$igp[1:10])\nplot(out)\n\n\n"} {"package":"squat","topic":"differentiate","snippet":"### Name: differentiate\n### Title: QTS Differentiation\n### Aliases: differentiate differentiate.qts differentiate.qts_sample\n\n### ** Examples\n\ndifferentiate(vespa64$igp[[1]])\ndifferentiate(vespa64$igp)\n\n\n"} {"package":"squat","topic":"dist","snippet":"### Name: dist\n### Title: QTS Distance Matrix Computation\n### Aliases: dist dist.default dist.qts_sample\n\n### ** Examples\n\nD <- dist(vespa64$igp[1:5])\n\n\n"} {"package":"squat","topic":"exp","snippet":"### Name: exp\n### Title: QTS Exponential\n### Aliases: exp exp.qts exp.qts_sample\n\n### ** Examples\n\nx <- log(vespa64$igp[[1]])\nexp(x)\ny <- log(vespa64$igp)\nexp(y)\n\n\n"} {"package":"squat","topic":"hclust","snippet":"### Name: hclust\n### Title: QTS Hierarchical Agglomerative Clustering\n### Aliases: hclust hclust.default hclust.qts_sample\n\n### ** Examples\n\nout <- hclust(vespa64$igp[1:10], n_clusters = 2)\nplot(out)\n\n\n"} {"package":"squat","topic":"hemispherize","snippet":"### Name: hemispherize\n### Title: QTS Hemispherization\n### Aliases: hemispherize hemispherize.qts hemispherize.qts_sample\n\n### ** Examples\n\nhemispherize(vespa64$igp[[1]])\nhemispherize(vespa64$igp)\n\n\n"} {"package":"squat","topic":"kmeans","snippet":"### Name: kmeans\n### Title: QTS K-Means Alignment Algorithm\n### Aliases: kmeans kmeans.default kmeans.qts_sample\n\n### ** Examples\n\nout <- kmeans(vespa64$igp[1:10], n_clusters = 2)\n\n\n"} {"package":"squat","topic":"log","snippet":"### Name: log\n### Title: QTS Logarithm\n### Aliases: log log.qts log.qts_sample\n\n### ** Examples\n\nlog(vespa64$igp[[1]])\nlog(vespa64$igp)\n\n\n"} {"package":"squat","topic":"mean.qts_sample","snippet":"### Name: mean.qts_sample\n### Title: QTS Geometric Mean\n### Aliases: mean.qts_sample\n\n### ** Examples\n\nmean(vespa64$igp)\n\n\n"} {"package":"squat","topic":"median.qts_sample","snippet":"### Name: median.qts_sample\n### Title: QTS Geometric Median\n### Aliases: median.qts_sample\n\n### ** Examples\n\nmedian(vespa64$igp)\n\n\n"} {"package":"squat","topic":"moving_average","snippet":"### Name: moving_average\n### Title: QTS Moving Average\n### Aliases: moving_average moving_average.qts moving_average.qts_sample\n\n### ** Examples\n\nmoving_average(vespa64$igp[[1]], window_size = 5)\nmoving_average(vespa64$igp, window_size = 5)\n\n\n"} {"package":"squat","topic":"normalize","snippet":"### Name: normalize\n### Title: QTS Normalization\n### Aliases: normalize normalize.qts normalize.qts_sample\n\n### ** Examples\n\nnormalize(vespa64$igp[[1]])\nnormalize(vespa64$igp)\n\n\n"} {"package":"squat","topic":"plot.prcomp_qts","snippet":"### Name: plot.prcomp_qts\n### Title: Plot for 'prcomp_qts' objects\n### Aliases: plot.prcomp_qts screeplot.prcomp_qts\n\n### ** Examples\n\ndf <- as_qts_sample(vespa64$igp[1:16])\nres_pca <- prcomp(df)\n\n# You can plot the effect of a PC on the mean\nplot(res_pca, what = \"PC1\")\n\n# You can plot the data points in a PC plane\nplot(res_pca, what = \"scores\")\n\n\n"} {"package":"squat","topic":"plot.qts","snippet":"### Name: plot.qts\n### Title: Plot for 'qts' objects\n### Aliases: plot.qts\n\n### ** Examples\n\nplot(vespa64$igp[[1]])\n\n\n"} {"package":"squat","topic":"plot.qts_sample","snippet":"### Name: plot.qts_sample\n### Title: Plot for 'qts_sample' objects\n### Aliases: plot.qts_sample\n\n### ** Examples\n\nplot(vespa64$igp)\n\n\n"} {"package":"squat","topic":"plot.qtsclust","snippet":"### Name: plot.qtsclust\n### Title: Plot for 'qtsclust' objects\n### Aliases: plot.qtsclust\n\n### ** Examples\n\nout <- kmeans(vespa64$igp[1:10], n_clusters = 2)\nplot(out)\n\n\n"} {"package":"squat","topic":"prcomp.qts_sample","snippet":"### Name: prcomp.qts_sample\n### Title: PCA for QTS Sample\n### Aliases: prcomp.qts_sample\n\n### ** Examples\n\nres_pca <- prcomp(vespa64$igp[1:16])\n\n\n"} {"package":"squat","topic":"qts","snippet":"### Name: qts\n### Title: QTS Class\n### Aliases: qts as_qts is_qts format.qts\n\n### ** Examples\n\nqts1 <- vespa64$igp[[1]]\nqts2 <- as_qts(qts1)\nis_qts(qts1)\nis_qts(qts2)\n\n\n"} {"package":"squat","topic":"qts2ats","snippet":"### Name: qts2ats\n### Title: QTS Transformation To Angle Time Series\n### Aliases: qts2ats\n\n### ** Examples\n\nqts2ats(vespa64$igp[[1]])\n\n\n"} {"package":"squat","topic":"qts2avts","snippet":"### Name: qts2avts\n### Title: QTS Transformation to Angular Velocity Time Series\n### Aliases: qts2avts\n\n### ** Examples\n\nqts2avts(vespa64$igp[[1]])\n\n\n"} {"package":"squat","topic":"qts2dts","snippet":"### Name: qts2dts\n### Title: QTS Transformation To Distance Time Series\n### Aliases: qts2dts\n\n### ** Examples\n\nqts2dts(vespa64$igp[[1]], vespa64$igp[[2]])\n\n\n"} {"package":"squat","topic":"qts2nts","snippet":"### Name: qts2nts\n### Title: QTS Transformation To Norm Time Series\n### Aliases: qts2nts\n\n### ** Examples\n\nqts2nts(vespa64$igp[[1]])\n\n\n"} {"package":"squat","topic":"qts_sample","snippet":"### Name: qts_sample\n### Title: QTS Sample Class\n### Aliases: qts_sample as_qts_sample is_qts_sample [.qts_sample\n\n### ** Examples\n\nx <- vespa64$igp\ny <- as_qts_sample(x)\nis_qts_sample(x)\nis_qts_sample(y)\nx[1]\nx[1, simplify = TRUE]\n\n\n"} {"package":"squat","topic":"reorient","snippet":"### Name: reorient\n### Title: QTS Reorientation\n### Aliases: reorient reorient.qts reorient.qts_sample\n\n### ** Examples\n\nreorient(vespa64$igp[[1]])\nreorient(vespa64$igp)\n\n\n"} {"package":"squat","topic":"resample","snippet":"### Name: resample\n### Title: QTS Resampling\n### Aliases: resample resample.qts resample.qts_sample\n\n### ** Examples\n\nresample(vespa64$igp[[1]])\nresample(vespa64$igp)\n\n\n"} {"package":"squat","topic":"rnorm_qts","snippet":"### Name: rnorm_qts\n### Title: QTS Random Sampling\n### Aliases: rnorm_qts\n\n### ** Examples\n\nrnorm_qts(1, vespa64$igp[[1]])\n\n\n"} {"package":"squat","topic":"scale","snippet":"### Name: scale\n### Title: QTS Sample Centering and Standardization\n### Aliases: scale scale.default scale.qts_sample\n\n### ** Examples\n\nx <- scale(vespa64$igp)\nx[[1]]\n\n\n"} {"package":"squat","topic":"smooth","snippet":"### Name: smooth\n### Title: QTS Smoothing via SLERP Interpolation\n### Aliases: smooth smooth.default smooth.qts smooth.qts_sample\n\n### ** Examples\n\nsmooth(vespa64$igp[[1]])\nsmooth(vespa64$igp)\n\n\n"} {"package":"squat","topic":"straighten","snippet":"### Name: straighten\n### Title: QTS Straightening\n### Aliases: straighten straighten.qts straighten.qts_sample\n\n### ** Examples\n\nstraighten(vespa64$igp[[1]])\nstraighten(vespa64$igp)\n\n\n"} {"package":"rEDM","topic":"CCM","snippet":"### Name: CCM\n### Title: Convergent cross mapping using simplex projection\n### Aliases: CCM\n\n### ** Examples\n\ndata(sardine_anchovy_sst)\ndf = CCM( dataFrame = sardine_anchovy_sst, E = 3, Tp = 0, columns = \"anchovy\",\ntarget = \"np_sst\", libSizes = \"10 70 10\", sample = 100 )\n\n\n\n"} {"package":"rEDM","topic":"ComputeError","snippet":"### Name: ComputeError\n### Title: Compute error\n### Aliases: ComputeError\n\n### ** Examples\n\ndata(block_3sp)\nsmplx <- Simplex( dataFrame=block_3sp, lib=\"1 99\", pred=\"105 190\", E=3,\ncolumns=\"x_t\", target=\"x_t\")\nerr <- ComputeError( smplx$Observations, smplx$Predictions )\n\n\n"} {"package":"rEDM","topic":"Embed","snippet":"### Name: Embed\n### Title: Embed data with time lags\n### Aliases: Embed\n\n### ** Examples\n\ndata(circle)\nembed <- Embed( dataFrame = circle, E = 2, tau = -1, columns = \"x y\" ) \n\n\n"} {"package":"rEDM","topic":"EmbedDimension","snippet":"### Name: EmbedDimension\n### Title: Optimal embedding dimension\n### Aliases: EmbedDimension\n\n### ** Examples\n\ndata(TentMap)\nE.rho = EmbedDimension( dataFrame = TentMap, lib = \"1 100\", pred = \"201 500\",\ncolumns = \"TentMap\", target = \"TentMap\", showPlot = FALSE )\n\n\n"} {"package":"rEDM","topic":"MakeBlock","snippet":"### Name: MakeBlock\n### Title: Make embedded data block\n### Aliases: MakeBlock\n\n### ** Examples\n\ndata(TentMap)\nembed <- MakeBlock(TentMap, 3, 1, \"TentMap\")\n\n\n"} {"package":"rEDM","topic":"Multiview","snippet":"### Name: Multiview\n### Title: Forecasting using multiview embedding\n### Aliases: Multiview\n\n### ** Examples\n\ndata(block_3sp)\nL = Multiview( dataFrame = block_3sp, lib = \"1 100\", pred = \"101 190\",\nE = 2, columns = \"x_t y_t z_t\", target = \"x_t\" )\n\n\n"} {"package":"rEDM","topic":"PredictInterval","snippet":"### Name: PredictInterval\n### Title: Forecast interval accuracy\n### Aliases: PredictInterval\n\n### ** Examples\n\ndata(TentMap)\nTp.rho = PredictInterval( dataFrame = TentMap, lib = \"1 100\",\npred = \"201 500\", E = 2, columns = \"TentMap\", target = \"TentMap\",\nshowPlot = FALSE )\n\n\n"} {"package":"rEDM","topic":"PredictNonlinear","snippet":"### Name: PredictNonlinear\n### Title: Test for nonlinear dynamics\n### Aliases: PredictNonlinear\n\n### ** Examples\n\ndata(TentMapNoise)\ntheta.rho = PredictNonlinear( dataFrame = TentMapNoise, E = 2,\nlib = \"1 100\", pred = \"201 500\", columns = \"TentMap\",\ntarget = \"TentMap\", showPlot = FALSE )\n\n\n"} {"package":"rEDM","topic":"SMap","snippet":"### Name: SMap\n### Title: SMap forecasting\n### Aliases: SMap\n\n### ** Examples\n\ndata(circle)\nL = SMap( dataFrame = circle, lib=\"1 100\", pred=\"110 190\", theta = 4,\nE = 2, embedded = TRUE, columns = \"x y\", target = \"x\" )\n\n\n"} {"package":"rEDM","topic":"Simplex","snippet":"### Name: Simplex\n### Title: Simplex forecasting\n### Aliases: Simplex\n\n### ** Examples\n\ndata( block_3sp )\nsmplx = Simplex( dataFrame = block_3sp, lib = \"1 100\", pred = \"101 190\",\nE = 3, columns = \"x_t\", target = \"x_t\" )\nComputeError( smplx $ Predictions, smplx $ Observations )\n\n\n"} {"package":"rEDM","topic":"SurrogateData","snippet":"### Name: SurrogateData\n### Title: Generate surrogate data for permutation/randomization tests\n### Aliases: SurrogateData\n\n### ** Examples\n\ndata(\"block_3sp\")\nts <- block_3sp$x_t\nSurrogateData(ts, method = \"ebisuzaki\")\n\n\n"} {"package":"testcorr","topic":"ac.test","snippet":"### Name: ac.test\n### Title: Testing zero autocorrelation\n### Aliases: ac.test\n\n### ** Examples\n\nx <- rnorm(100)\nac.test(x, max.lag = 10)\n\n\n"} {"package":"testcorr","topic":"cc.test","snippet":"### Name: cc.test\n### Title: Testing zero cross-correlation\n### Aliases: cc.test\n\n### ** Examples\n\nx <- rnorm(100)\ny <- rnorm(100)\ncc.test(x, y, max.lag = 10)\n\n\n"} {"package":"testcorr","topic":"iid.test","snippet":"### Name: iid.test\n### Title: Testing iid property\n### Aliases: iid.test\n\n### ** Examples\n\nx <- rnorm(100)\niid.test(x, max.lag = 10)\n\n\n"} {"package":"testcorr","topic":"rcorr.test","snippet":"### Name: rcorr.test\n### Title: Testing zero Pearson correlation\n### Aliases: rcorr.test\n\n### ** Examples\n\nx <- matrix(rnorm(400),100)\nrcorr.test(x)\n\n\n"} {"package":"dChipIO","topic":"readCdfBin","snippet":"### Name: readCdfBin\n### Title: Reads a dChip CDF.bin file\n### Aliases: readCdfBin\n### Keywords: file IO\n\n### ** Examples\n\npath <- system.file(\"exData\", package=\"dChipIO\")\nchipType <- \"Test3\"\nfilename <- sprintf(\"%s.CDF.bin\", chipType)\npathname <- file.path(path, filename)\n\nhdr <- readCdfBinHeader(pathname)\nprint(hdr)\n\ndata <- readCdfBin(pathname)\nstr(data)\n\n# Read a subset of the units\nunits <- c(10:11, 15:20, 150:105, 2,2,2)\ndataT <- readCdfBin(pathname, units=units)\nstr(dataT)\n\n# Assert correctness\nfor (ff in c(\"unitNames\", \"numProbes\", \"CellPos\")) {\n stopifnot(length(dataT[[ff]]) == length(units))\n stopifnot(identical(dataT[[ff]], data[[ff]][units]))\n}\n\n\n"} {"package":"dChipIO","topic":"readDcp","snippet":"### Name: readDcp\n### Title: Reads a dChip DCP file\n### Aliases: readDcp\n### Keywords: file IO\n\n### ** Examples\n\npath <- system.file(\"exData\", package=\"dChipIO\")\n\nfilename <- \"Test3-1-121502.dcp\"\npathname <- file.path(path, filename)\n\nhdr <- readDcpHeader(pathname)\nprint(hdr)\n\ndata <- readDcp(pathname)\nstr(data)\n\n# Read a subset of the units\nunits <- c(10:11, 15:20, 150:105, 2,2,2)\ndataT <- readDcp(pathname, units=units)\nstr(dataT)\n\n# Assert correctness\nfor (ff in c(\"calls\", \"thetas\", \"thetaStds\", \"excludes\")) {\n stopifnot(length(dataT[[ff]]) == length(units))\n stopifnot(identical(dataT[[ff]], data[[ff]][units]))\n}\n\n\n"} {"package":"dChipIO","topic":"readDcpRectangle","snippet":"### Name: readDcpRectangle\n### Title: Reads a spatial subset of probe-level data from a dChip DCP file\n### Aliases: readDcpRectangle\n### Keywords: file IO\n\n### ** Examples\n\npath <- system.file(\"exData\", package=\"dChipIO\")\n\nfilename <- \"Test3-1-121502.dcp\"\npathname <- file.path(path, filename)\n\ndata <- readDcpRectangle(pathname)\n\nlayout(matrix(1:4, nrow=2, byrow=TRUE))\nimage(data$rawIntensities, main=\"Raw probe signals\")\nimage(data$normalizedIntensities, main=\"Normalized probe signals\")\n\n\n\n\n"} {"package":"autohrf","topic":"autohrf-datasets","snippet":"### Name: autohrf-datasets\n### Title: Datasets for autohrf examples Example datasets for use in\n### 'autohrf' examples and vignettes. The datasets were extracted from\n### the internal Mind and Brain Lab's (MBLab, \n### repository. MBLab is a research lab at the Faculty of Arts,\n### Department of Psychology, University of Ljubljana, Slovenia.\n### Aliases: autohrf-datasets swm swm_autofit swm_autofit1 swm_autofit2\n### flanker flanker_autofit\n\n### ** Examples\n\n# load swm data\ndata_swm <- swm\n\n# load the previously completed autofits\nautofit <- swm_autofit\nautofit1 <- swm_autofit1\nautofit2 <- swm_autofit2\n\n# load flanker data\ndata_flanker <- flanker\n\n# load the previously completed autofits\nautofit3 <- flanker_autofit\n\n\n"} {"package":"autohrf","topic":"autohrf","snippet":"### Name: autohrf\n### Title: autohrf\n### Aliases: autohrf\n\n### ** Examples\n\n# prepare model specs\nmodel3 <- data.frame(\n event = c(\"encoding\", \"delay\", \"response\"),\n start_time = c(0, 2.65, 12.5),\n end_time = c(3, 12.5, 16)\n)\n\nmodel4 <- data.frame(\n event = c(\"fixation\", \"target\", \"delay\", \"response\"),\n start_time = c(0, 2.5, 2.65, 12.5),\n end_time = c(2.5, 3, 12.5, 15.5)\n)\n\nmodel_constraints <- list(model3, model4)\n\n# run autohrf\ndf <- flanker\nautofit <- autohrf(df, model_constraints, tr = 2.5,\n population = 2, iter = 2, cores = 1)\n\n\n\n"} {"package":"autohrf","topic":"evaluate_model","snippet":"### Name: evaluate_model\n### Title: evaluate_model\n### Aliases: evaluate_model\n\n### ** Examples\n\n# create the model\nm <- data.frame(event = c(\"encoding\", \"delay\", \"response\"),\nstart_time = c(0, 2.5, 12.5), duration = c(2.5, 10, 5))\n\n# evaluate\ndf <- flanker\nres <- evaluate_model(df, m, tr = 2.5)\n\n\n\n"} {"package":"autohrf","topic":"get_best_models","snippet":"### Name: get_best_models\n### Title: get_best_models\n### Aliases: get_best_models\n\n### ** Examples\n\n# prepare model specs\nmodel3 <- data.frame(\n event = c(\"encoding\", \"delay\", \"response\"),\n start_time = c(0, 2.65, 12.5),\n end_time = c(3, 12.5, 16)\n)\n\nmodel4 <- data.frame(\n event = c(\"fixation\", \"target\", \"delay\", \"response\"),\n start_time = c(0, 2.5, 2.65, 12.5),\n end_time = c(2.5, 3, 12.5, 15.5)\n)\n\nmodel_constraints <- list(model3, model4)\n\n# run autohrf\ndf <- flanker\nautofit <- autohrf(df, model_constraints, tr = 2.5,\n population = 2, iter = 2, cores = 1)\n\n# print best models\nget_best_models(autofit)\n\n\n\n"} {"package":"autohrf","topic":"plot_best_models","snippet":"### Name: plot_best_models\n### Title: plot_best_models\n### Aliases: plot_best_models\n\n### ** Examples\n\n# prepare model specs\nmodel3 <- data.frame(\n event = c(\"encoding\", \"delay\", \"response\"),\n start_time = c(0, 2.65, 12.5),\n end_time = c(3, 12.5, 16)\n)\n\nmodel4 <- data.frame(\n event = c(\"fixation\", \"target\", \"delay\", \"response\"),\n start_time = c(0, 2.5, 2.65, 12.5),\n end_time = c(2.5, 3, 12.5, 15.5)\n)\n\nmodel_constraints <- list(model3, model4)\n\n# run autohrf\ndf <- flanker\nautofit <- autohrf(df, model_constraints, tr = 2.5,\n population = 2, iter = 2, cores = 1)\n\n# plot best models\nplot_best_models(autofit)\n\n\n\n"} {"package":"autohrf","topic":"plot_fitness","snippet":"### Name: plot_fitness\n### Title: plot_fitness\n### Aliases: plot_fitness\n\n### ** Examples\n\n# prepare model specs\nmodel3 <- data.frame(\n event = c(\"encoding\", \"delay\", \"response\"),\n start_time = c(0, 2.65, 12.5),\n end_time = c(3, 12.5, 16)\n)\n\nmodel4 <- data.frame(\n event = c(\"fixation\", \"target\", \"delay\", \"response\"),\n start_time = c(0, 2.5, 2.65, 12.5),\n end_time = c(2.5, 3, 12.5, 15.5)\n)\n\nmodel_constraints <- list(model3, model4)\n\n# run autohrf\ndf <- flanker\nautofit <- autohrf(df, model_constraints, tr = 2.5,\n population = 2, iter = 2, cores = 1)\n\n# plot fitness\nplot_fitness(autofit)\n\n\n\n"} {"package":"autohrf","topic":"plot_model","snippet":"### Name: plot_model\n### Title: plot_model\n### Aliases: plot_model\n\n### ** Examples\n\n# prepare model specs\nmodel3 <- data.frame(event = c(\"encoding\", \"delay\", \"response\"),\n start_time = c(0, 2.65, 12.5),\n duration = c(2.65, 9.85, 3))\n\n\n\n\n"} {"package":"survivalsvm","topic":"makediff1","snippet":"### Name: makediff1\n### Title: 'Diffmatrix'\n### Aliases: makediff1\n### Keywords: internal\n\n### ** Examples\n\nY <- c(1,3,3.5,4,8); delta <- c(0,0,1,1,0); makediff1(Y, delta)\n\n\n"} {"package":"survivalsvm","topic":"makediff2","snippet":"### Name: makediff2\n### Title: 'Diffmatrix'\n### Aliases: makediff2\n### Keywords: internal\n\n### ** Examples\n\nY <- c(1,3,3.5,4,8); delta <- c(0,0,1,1,0); makediff2(Y, delta)\n\n\n"} {"package":"survivalsvm","topic":"makediff3","snippet":"### Name: makediff3\n### Title: 'Diffmatrix'\n### Aliases: makediff3\n### Keywords: internal\n\n### ** Examples\n\nY <- c(1,3,3.5,4,8); delta <- c(0,0,1,1,0); makediff3(Y, delta)\n\n\n"} {"package":"survivalsvm","topic":"predict.survivalsvm","snippet":"### Name: predict.survivalsvm\n### Title: Suvirvalsvm predictions\n### Aliases: predict.survivalsvm\n\n### ** Examples\n\nrequire(survival)\nset.seed(123)\nn <- nrow(veteran)\ntrain.index <- sample(1:n, 0.7*n, replace = FALSE)\ntest.index <- setdiff(1:n, train.index)\nsurvsvm.reg <- survivalsvm(Surv(veteran$diagtime, veteran$status) ~ .,\n subset = train.index, data = veteran,\n type = \"regression\", gamma.mu = 1,\n opt.meth = \"quadprog\", kernel = \"add_kernel\")\npred.survsvm.reg <- predict(object = survsvm.reg, newdata = veteran, subset = test.index)\nprint(pred.survsvm.reg)\n\n\n"} {"package":"survivalsvm","topic":"survivalsvm","snippet":"### Name: survivalsvm\n### Title: survivalsvm\n### Aliases: survivalsvm\n\n### ** Examples\n\nsurvivalsvm(Surv(time, status) ~ ., veteran, gamma.mu = 0.1)\n\nsurvsvm.reg <- survivalsvm(formula = Surv(diagtime, status) ~ ., data = veteran,\n type = \"regression\", gamma.mu = 0.1,\n opt.meth = \"ipop\", kernel = \"add_kernel\")\n \nsurvsvm.vb2 <- survivalsvm(data = veteran, time.variable.name = \"diagtime\",\n status.variable.name = \"status\", \n type = \"vanbelle2\", gamma.mu = 0.1,\n opt.meth = \"quadprog\", diff.meth = \"makediff3\", \n kernel = \"lin_kernel\",\n sgf.sv = 5, sigf = 7, maxiter = 20, \n margin = 0.05, bound = 10)\n \n\n\n"} {"package":"cmdfun","topic":"cmd_args_all","snippet":"### Name: cmd_args_all\n### Title: Return all named arguments and arguments passed as dots from\n### parent function call\n### Aliases: cmd_args_all\n\n### ** Examples\n\ntheFunction <- function(arg1, ...) { cmd_args_all() }\ntheArgs <- theFunction(arg1 = \"test\", example = \"hello\")\n\n\n"} {"package":"cmdfun","topic":"cmd_args_dots","snippet":"### Name: cmd_args_dots\n### Title: return function dots from parent function as named list\n### Aliases: cmd_args_dots\n\n### ** Examples\n\ntheFunction <- function(...) { cmd_args_dots() }\ntheDots <- theFunction(example = \"hello\", boolFlag = TRUE, vectorFlag = c(1,2,3))\n\n\n"} {"package":"cmdfun","topic":"cmd_args_named","snippet":"### Name: cmd_args_named\n### Title: Return all named arguments from parent function call\n### Aliases: cmd_args_named\n\n### ** Examples\n\ntheFunction <- function(arg1, ...) { cmd_args_named() }\ntheNamedArgs <- theFunction(arg1 = \"test\", example = \"hello\")\n\n\n"} {"package":"cmdfun","topic":"cmd_error_if_missing","snippet":"### Name: cmd_error_if_missing\n### Title: Check that file(s) exist, error if not\n### Aliases: cmd_error_if_missing\n\n### ** Examples\n\ncmd_error_if_missing(tempdir())\n## Not run: \n##D # Throws error if file doesn't exist\n##D cmd_error_if_missing(file.path(tempdir(), \"notreal\"))\n## End(Not run)\n\n\n"} {"package":"cmdfun","topic":"cmd_file_combn","snippet":"### Name: cmd_file_combn\n### Title: Generates list of expected output files\n### Aliases: cmd_file_combn\n\n### ** Examples\n\n# Makes list for many file types of same prefix\n# ie myFile.txt, myFile.html, myFile.xml\ncmd_file_combn(\"myFile\", c(\"txt\", \"html\", \"xml\"))\n\n# Makes list for many files of same type\n# ie myFile1.txt, myFile2.txt, myFile3.txt\ncmd_file_combn(c(\"myFile1\", \"myFile2\", \"myFile3\"), \"txt\")\n\n\n\n"} {"package":"cmdfun","topic":"cmd_file_expect","snippet":"### Name: cmd_file_expect\n### Title: Creates list of paths by file extension & checks they exist\n### Aliases: cmd_file_expect\n\n### ** Examples\n\n## Not run: \n##D # Expects many file types of same prefix\n##D # ie myFile.txt, myFile.html, myFile.xml\n##D cmd_file_expect(\"myFile\", c(\"txt\", \"html\", \"xml\"))\n##D \n##D # Expects many files of same type\n##D # ie myFile1.txt, myFile2.txt, myFile3.txt\n##D cmd_file_expect(c(\"myFile1\", \"myFile2\", \"myFile3\"), \"txt\")\n##D \n##D # Expects many files with each prefix and each extension\n##D # ie myFile1.txt, myFile1.html, myFile2.txt, myFile2.html\n##D cmd_file_expect(c(\"myFile1\", \"myFile2\"), c(\"txt\", \"html\"))\n##D \n## End(Not run)\n\n\n\n\n"} {"package":"cmdfun","topic":"cmd_help_flags_similar","snippet":"### Name: cmd_help_flags_similar\n### Title: Suggest alternative name by minimizing Levenshtein edit distance\n### between valid and invalid arguments\n### Aliases: cmd_help_flags_similar\n\n### ** Examples\n\n# with a flagsList, need to pass names()\nflagsList <- list(\"output\" = \"somevalue\", \"missplld\" = \"anotherValue\")\ncmd_help_flags_similar(c(\"output\", \"misspelled\"), names(flagsList))\n\ncommand_flags <- c(\"long-flag-name\")\nflags <- c(\"long_flag_naee\")\ncmd_help_flags_similar(command_flags, flags, .fun = ~{gsub(\"-\", \"_\", .x)})\n\n# returns NULL if no errors\ncmd_help_flags_similar(c(\"test\"), \"test\")\n\n\n"} {"package":"cmdfun","topic":"cmd_help_flags_suggest","snippet":"### Name: cmd_help_flags_suggest\n### Title: Error & Suggest different flag name to user\n### Aliases: cmd_help_flags_suggest\n\n### ** Examples\n\nuser_flags <- list(\"output\", \"inpt\")\nvalid_flags <- c(\"output\", \"input\")\nsuggestions <- cmd_help_flags_similar(valid_flags, user_flags)\n## Not run: \n##D # Throws error\n##D cmd_help_flags_suggest(suggestions)\n## End(Not run)\n\n\n"} {"package":"cmdfun","topic":"cmd_help_parse_flags","snippet":"### Name: cmd_help_parse_flags\n### Title: Parses commandline help options to return vector of valid flag\n### names\n### Aliases: cmd_help_parse_flags\n\n### ** Examples\n\nif (.Platform$OS.type == \"unix\" & file.exists(\"/bin/tar\")) {\n# below are two examples parsing the --help method of GNU tar \n\n# with processx\nif (require(processx)) {\nout <- processx::run(\"tar\", \"--help\", error_on_status = FALSE)\nfn_flags <- cmd_help_parse_flags(out$stdout, split_newline = TRUE)\n}\n\n# with system2\nlines <- system2(\"tar\", \"--help\", stderr = TRUE)\nfn_flags <- cmd_help_parse_flags(lines)\n\n# NOTE: some of the \"tar\" flags contain the extra characters: \"\\[\", \"\\)\", and \";\"\n# ie \"one-top-level\\[\" which should be \"one-top-level\"\n# These can be additionally parsed using\ngsub(\"[\\\\[;\\\\)]\", \"\", fn_flags)\n}\n\n\n\n"} {"package":"cmdfun","topic":"cmd_install_check","snippet":"### Name: cmd_install_check\n### Title: Wrapper function for checking an install\n### Aliases: cmd_install_check\n\n### ** Examples\n\n## Not run: \n##D path_search <- cmd_path_search(default = \"/bin\", utils = \"ls\")\n##D cmd_install_check(path_search)\n## End(Not run)\n\n\n"} {"package":"cmdfun","topic":"cmd_install_is_valid","snippet":"### Name: cmd_install_is_valid\n### Title: Macro for constructing boolean check for valid path\n### Aliases: cmd_install_is_valid\n\n### ** Examples\n\nif (.Platform$OS.type == \"unix\") {\nsearch <- cmd_path_search(option_name = \"bin_path\", default_path = \"/bin/\")\nvalid_install <- cmd_install_is_valid(search)\n# Returns TRUE if \"/bin/\" exists\nvalid_install()\n# Returns FALSE if \"bad/path/\" doesn't exist\nvalid_install(\"bad/path/\")\n\n# Also works with options\nsearch_option_only <- cmd_path_search(option_name = \"bin_path\")\nvalid_install2 <- cmd_install_is_valid(search_option_only)\noptions(bin_path = \"/bin/\")\nvalid_install2()\n\n# Setting util = TRUE will check that all utils are also installed\nsearch_with_utils <- cmd_path_search(default_path = \"/bin\", utils = c(\"ls\", \"pwd\"))\nvalid_install_all <- cmd_install_is_valid(search_with_utils, util = TRUE)\nvalid_install_all()\n}\n\n\n"} {"package":"cmdfun","topic":"cmd_list_drop","snippet":"### Name: cmd_list_drop\n### Title: Drop entries from list of flags by name, name/value pair, or\n### index\n### Aliases: cmd_list_drop\n\n### ** Examples\n\nexFlags <- list(\"flag1\" = 2, \"flag2\" = \"someText\")\ncmd_list_drop(exFlags, \"flag1\")\n# will drop flag2 because its name and value match 'drop' vector\ncmd_list_drop(exFlags, c(\"flag2\" = \"someText\"))\n# Will drop \"flag1\" by position index\ncmd_list_drop(exFlags, 1)\n\n# won't drop flag2 because its value isn't 'someText'\nexFlags2 <- list(\"flag1\" = 2, \"flag2\" = \"otherText\")\ncmd_list_drop(exFlags, c(\"flag2\" = \"someText\"))\n\n\n"} {"package":"cmdfun","topic":"cmd_list_drop_named","snippet":"### Name: cmd_list_drop_named\n### Title: Drop items by name from list\n### Aliases: cmd_list_drop_named\n\n### ** Examples\n\ncmd_list_drop_named(list(\"a\" = 1, \"b\" = 2), \"a\")\n\n\n"} {"package":"cmdfun","topic":"cmd_list_interp","snippet":"### Name: cmd_list_interp\n### Title: Convert list of function arguments to list of command flags\n### Aliases: cmd_list_interp\n\n### ** Examples\n\ntheFunction <- function(...){cmd_args_all()}\ntheArgs <- theFunction(arg1 = \"value\", arg2 = TRUE)\nflagList <- cmd_list_interp(theArgs)\nflags <- cmd_list_to_flags(flagList)\n\n\n"} {"package":"cmdfun","topic":"cmd_list_keep","snippet":"### Name: cmd_list_keep\n### Title: keep entries from list of flags by name, name/value pair, or\n### index\n### Aliases: cmd_list_keep\n\n### ** Examples\n\nexFlags <- list(\"flag1\" = 2, \"flag2\" = \"someText\")\ncmd_list_keep(exFlags, \"flag1\")\n# will keep flag2 because its name and value match 'keep' vector\ncmd_list_keep(exFlags, c(\"flag2\" = \"someText\"))\n# Will keep \"flag1\" by position index\ncmd_list_keep(exFlags, 1)\n\n# won't keep flag2 because its value isn't 'someText'\nexFlags2 <- list(\"flag1\" = 2, \"flag2\" = \"otherText\")\ncmd_list_keep(exFlags, c(\"flag2\" = \"someText\"))\n\n\n"} {"package":"cmdfun","topic":"cmd_list_keep_named","snippet":"### Name: cmd_list_keep_named\n### Title: Keep items by name from list\n### Aliases: cmd_list_keep_named\n\n### ** Examples\n\ncmd_list_keep_named(list(\"a\" = 1, \"b\" = 2), \"a\")\n\n\n"} {"package":"cmdfun","topic":"cmd_list_to_flags","snippet":"### Name: cmd_list_to_flags\n### Title: Convert flag list to vector of command flags\n### Aliases: cmd_list_to_flags\n\n### ** Examples\n\ntheFunction <- function(...){cmd_args_all()}\ntheArgs <- theFunction(arg1 = \"value\", arg2 = TRUE)\nflagList <- cmd_list_interp(theArgs)\nflags <- cmd_list_to_flags(flagList)\n\n\n"} {"package":"cmdfun","topic":"cmd_path_search","snippet":"### Name: cmd_path_search\n### Title: Macro for constructing target path validators\n### Aliases: cmd_path_search\n\n### ** Examples\n\nif (.Platform$OS.type == \"unix\") {\nbin_checker <- cmd_path_search(default_path = \"/bin\", utils = c(\"ls\", \"pwd\"))\n# returns path to bin\nbin_checker()\n# returns path to bin/ls\nbin_checker(util = \"ls\")\n}\n\n\n"} {"package":"cmdfun","topic":"cmd_ui_file_exists","snippet":"### Name: cmd_ui_file_exists\n### Title: Checks if file exists, returns pretty status message\n### Aliases: cmd_ui_file_exists\n\n### ** Examples\n\ncmd_ui_file_exists(\"/path/to/file.txt\")\n\n\n"} {"package":"cmdfun","topic":".check_valid_command_path","snippet":"### Name: .check_valid_command_path\n### Title: Checks path is valid\n### Aliases: .check_valid_command_path\n\n### ** Examples\n\nif (.Platform$OS.type == \"unix\" & file.exists(\"~/bin\")) {\n# will return /full/path/to/home/bin, or error if path doesn't exist\n.check_valid_command_path(\"~/bin\")\n}\n\n\n"} {"package":"cmdfun","topic":".check_valid_util","snippet":"### Name: .check_valid_util\n### Title: Checks for valid members of subdirectory\n### Aliases: .check_valid_util\n\n### ** Examples\n\nif (.Platform$OS.type == \"unix\") {\n# this will return /full/path/to/bin\n# or return an error for all values of util that are not \"ls\" and \"pwd\"\n# or error if \"ls\" does not exist in \"/bin\"\n.check_valid_util(\"ls\", utils = c(\"ls\", \"pwd\"), \"/bin\")\n\n## Not run: \n##D # This will throw error\n##D .check_valid_util(\"badUtil\", utils = c(\"ls\", \"pwd\"), \"/bin\")\n## End(Not run)\n}\n\n\n"} {"package":"archeofrag","topic":"Frag.object-class","snippet":"### Name: Frag.object-class\n### Title: Class '\"Frag.object\"'\n### Aliases: Frag.object-class make_cr_graph,Frag.object-method\n### make_crsr_graph,Frag.object-method make_sr_graph,Frag.object-method\n### show,Frag.object-method\n### Keywords: build data\n\n### ** Examples\n\nshowClass(\"Frag.object\")\n\n\n"} {"package":"archeofrag","topic":"LiangAbu","snippet":"### Name: LiangAbu\n### Title: Dataset: Archeological relationships between pottery fragments\n### in Liang Abu\n### Aliases: LiangAbu df.cr df.sr fragments.info\n### Keywords: datasets\n\n### ** Examples\n\ndata(LiangAbu)\nhead(fragments.info)\n\n\n"} {"package":"archeofrag","topic":"frag.cycles","snippet":"### Name: frag.cycles\n### Title: Count the k-cycles in a graph, for cycles =< k\n### Aliases: frag.cycles\n### Keywords: measurement\n\n### ** Examples\n\ng <- frag.simul.process(n.components=20, vertices=50, disturbance=.15)\nfrag.cycles(g, kmax=4, max.cycles.only=FALSE)\nfrag.cycles(g, kmax=4, max.cycles.only=TRUE)\n\n\n"} {"package":"archeofrag","topic":"frag.diameters","snippet":"### Name: frag.diameters\n### Title: Diameter distribution for unconnected graphs\n### Aliases: frag.diameters\n### Keywords: measurement\n\n### ** Examples\n\ng <- frag.simul.process(n.components=20, vertices=50, disturbance=.15)\nfrag.diameters(g)\nfrag.diameters(g, cumulative=TRUE)\n\n\n\n"} {"package":"archeofrag","topic":"frag.edges.weighting","snippet":"### Name: frag.edges.weighting\n### Title: Weighting the edges of a fragmentation graph\n### Aliases: frag.edges.weighting\n### Keywords: build data\n\n### ** Examples\n\ng <- frag.simul.process(n.components=20, vertices=50, disturbance=.15)\nfrag.edges.weighting(g , \"layer\")\n# with morphometric and spatial parameters:\nlibrary(igraph)\nV(g)$morpho <- sample(1:20, 50, replace=TRUE)\nV(g)$x <- sample(1:100, 50, replace=TRUE)\nV(g)$y <- sample(1:100, 50, replace=TRUE)\nV(g)$z <- sample(1:100, 50, replace=TRUE)\nfrag.edges.weighting(g, \"layer\", \"morpho\", \"x\", \"y\", \"z\")\n\n\n"} {"package":"archeofrag","topic":"frag.get.layers","snippet":"### Name: frag.get.layers\n### Title: Extracts the subgraph of each selected stratigraphic layer.\n### Aliases: frag.get.layers\n### Keywords: extraction\n\n### ** Examples\n\ng <- frag.simul.process(n.components=20, vertices=50, disturbance = .15)\nigraph::V(g)$layers <- c(rep(\"layer1\", 20), rep(\"layer2\", 20), rep(\"layer3\", 10))\nfrag.get.layers(g, layer.attr=\"layers\", sel.layers=c(\"layer1\", \"layer2\"))\n\n\n"} {"package":"archeofrag","topic":"frag.get.layers.pair","snippet":"### Name: frag.get.layers.pair\n### Title: Extracts the subgraph corresponding to a pair of stratigraphic\n### layers.\n### Aliases: frag.get.layers.pair\n### Keywords: extraction\n\n### ** Examples\n\ng <- frag.simul.process(n.components=20, vertices=50, disturbance=.15)\nigraph::V(g)$layers <- c(rep(\"layer1\", 20), rep(\"layer2\", 20), rep(\"layer3\", 10))\n\nfrag.get.layers.pair(g, \"layers\", sel.layers=c(\"layer2\",\"layer3\"),\n size.mini=2, mixed.components.only=FALSE)\nfrag.get.layers.pair(g, \"layers\", sel.layers=c(\"layer2\",\"layer3\"), \n size.mini=2, mixed.components.only=TRUE)\n\n\n"} {"package":"archeofrag","topic":"frag.get.parameters","snippet":"### Name: frag.get.parameters\n### Title: Returns a series of descriptive statistics for a fragmentation\n### graph\n### Aliases: frag.get.parameters\n### Keywords: summary\n\n### ** Examples\n\ng <- frag.simul.process(n.components=20, vertices=50, disturbance=0.1)\nfrag.get.parameters(g, \"layer\")\n\n\n"} {"package":"archeofrag","topic":"frag.graph.plot","snippet":"### Name: frag.graph.plot\n### Title: Plot a fragmentation graph\n### Aliases: frag.graph.plot\n### Keywords: summary\n\n### ** Examples\n\ng <- frag.simul.process(n.components=20, vertices=50, disturbance=.15)\nfrag.graph.plot(g, \"layer\")\n\n\n"} {"package":"archeofrag","topic":"frag.layers.admixture","snippet":"### Name: frag.layers.admixture\n### Title: Admixture of two stratigraphic layers\n### Aliases: frag.layers.admixture\n### Keywords: measurement\n\n### ** Examples\n\ng <- frag.simul.process(n.components=20, vertices=50, disturbance = .15)\ng <- frag.edges.weighting(g, layer.attr=\"layer\")\nfrag.layers.admixture(g, \"layer\")\n\n\n"} {"package":"archeofrag","topic":"frag.layers.cohesion","snippet":"### Name: frag.layers.cohesion\n### Title: Cohesion measure of layers\n### Aliases: frag.layers.cohesion\n### Keywords: measurement\n\n### ** Examples\n\ng <- frag.simul.process(n.components=20, vertices=50, disturbance=.1)\nfrag.layers.cohesion(g, layer.attr=\"layer\")\n\n\n"} {"package":"archeofrag","topic":"frag.observer.failure","snippet":"### Name: frag.observer.failure\n### Title: Simulate the failure of an observer to determine the\n### relationships between fragments.\n### Aliases: frag.observer.failure\n### Keywords: simulation\n\n### ** Examples\n\ng <- frag.simul.process(n.components=20, vertices=50, disturbance=.15)\nfrag.observer.failure(graph=g, likelihood=c(0.05, 0.1), remove.vertices=FALSE)\n\n\n"} {"package":"archeofrag","topic":"frag.path.lengths","snippet":"### Name: frag.path.lengths\n### Title: Path length distribution for unconnected graphs\n### Aliases: frag.path.lengths\n### Keywords: measurement\n\n### ** Examples\n\ng <- frag.simul.process(n.components=20, vertices=50, disturbance = .15)\nfrag.path.lengths(g)\nfrag.path.lengths(g, cumulative=TRUE)\n\n\n"} {"package":"archeofrag","topic":"frag.relations.by.layers","snippet":"### Name: frag.relations.by.layers\n### Title: Summary of the connection relationships between fragments within\n### and between spatial units.\n### Aliases: frag.relations.by.layers\n### Keywords: summary\n\n### ** Examples\n\ng <- frag.simul.process(n.components=20, vertices=50, disturbance=.15)\nfrag.relations.by.layers(g, \"layer\")\n\n\n"} {"package":"archeofrag","topic":"frag.simul.compare","snippet":"### Name: frag.simul.compare\n### Title: From an observed fragmentation graph, simulates two series of\n### graphs corresponding to two deposition hypotheses.\n### Aliases: frag.simul.compare\n### Keywords: simulation\n\n### ** Examples\n\ng <- frag.simul.process(n.components=20, vertices=50, disturbance=.15)\ng <- frag.edges.weighting(g, layer.attr=\"layer\")\n## Not run: frag.simul.compare(g, layer.attr=\"layer\", iter=30)\n\n\n"} {"package":"archeofrag","topic":"frag.simul.process","snippet":"### Name: frag.simul.process\n### Title: Simulate the fragmentation of archaeological objects scattered\n### in two stratigraphic layers\n### Aliases: frag.simul.process\n### Keywords: simulation\n\n### ** Examples\n\nfrag.simul.process(n.components=20, vertices=50, disturbance=.15)\n\ng <- frag.simul.process(initial.layers=1, \n n.components=20,\n vertices=50,\n edges=40,\n balance=.5,\n components.balance=.5,\n disturbance=.1,\n planar=FALSE)\nplot(g, vertex.color=factor(igraph::V(g)$layer), \n vertex.size=4, vertex.label=NA)\n\n\n\n"} {"package":"archeofrag","topic":"frag.simul.summarise","snippet":"### Name: frag.simul.summarise\n### Title: Summarise the comparison between an observed fragmentation graph\n### and simulated graphs for two deposition hypotheses.\n### Aliases: frag.simul.summarise\n### Keywords: simulation\n\n### ** Examples\n\ng <- frag.simul.process(n.components=20, vertices=50, disturbance=.15)\ng <- frag.edges.weighting(g, layer.attr=\"layer\")\n## Not run: \n##D res <- frag.simul.compare(g, layer.attr=\"layer\", iter=30, summarise=FALSE)\n##D frag.simul.summarise(g, layer.attr=\"layer\", res.h1=res[[1]], res.h2=res[[2]])\n## End(Not run)\n\n\n"} {"package":"archeofrag","topic":"make_cr_graph","snippet":"### Name: make_cr_graph\n### Title: Make a \"connection\" relationships graph.\n### Aliases: make_cr_graph make_cr_graph-methods\n### Keywords: build data\n\n### ** Examples\n\ncr.df <- matrix(c(1,2, 1,3, 2,3, 4,5, 4,6, 7,8), ncol=2, byrow=TRUE)\nsr.df <- matrix( c(1,1, 9,1, 10,1, 11,2, 12,2, 13,2), ncol=2, byrow=TRUE)\nfragments.df <- data.frame(1:13, letters[1:13]) \n\ncr_g <- make_frag_object(cr=cr.df, fragments=fragments.df)\ncrsr_g <- make_frag_object(cr=cr.df, sr=sr.df, fragments=fragments.df)\n\nmake_cr_graph(cr_g)\nmake_cr_graph(crsr_g)\n\n\n"} {"package":"archeofrag","topic":"make_crsr_graph","snippet":"### Name: make_crsr_graph\n### Title: Makes a \"connection\" relationships graph including the\n### \"similarity\" relationships.\n### Aliases: make_crsr_graph\n### Keywords: build data\n\n### ** Examples\n\ncr.df <- matrix(c(1,2, 1,3, 2,3, 4,5, 4,6, 7,8), ncol=2, byrow=TRUE)\nsr.df <- matrix( c(1,1, 9,1, 10,1, 11,2, 12,2, 13,2), ncol=2, byrow=TRUE)\nfragments.df <- data.frame(1:13, letters[1:13]) \ncrsr_g <- make_frag_object(cr=cr.df, sr=sr.df, fragments=fragments.df)\nmake_crsr_graph(crsr_g)\n\n\n"} {"package":"archeofrag","topic":"make_frag_object","snippet":"### Name: make_frag_object\n### Title: Makes a \"frag.object\" object.\n### Aliases: make_frag_object\n### Keywords: build data\n\n### ** Examples\n\ncr.df <- matrix(c(1,2, 1,3, 2,3, 4,5, 4,6, 7,8), ncol=2, byrow=TRUE)\nsr.df <- matrix( c(1,1, 9,1, 10,1, 11,2, 12,2, 13,2), ncol=2, byrow=TRUE)\nfragments.df <- data.frame(1:13, letters[1:13]) \n\nmake_frag_object(cr=cr.df, fragments=fragments.df)\nmake_frag_object(cr=cr.df, sr=sr.df, fragments=fragments.df)\n\n\n"} {"package":"archeofrag","topic":"make_sr_graph","snippet":"### Name: make_sr_graph\n### Title: Make a \"similarity\" relationships graph.\n### Aliases: make_sr_graph make_sr_graph-methods\n### Keywords: build data\n\n### ** Examples\n\nsr.df <- matrix( c(1,1, 9,1, 10,1, 11,2, 12,2, 13,2), ncol=2, byrow=TRUE)\nfragments.df <- data.frame(1:13, letters[1:13]) \ncrsr_g <- make_frag_object(sr=sr.df, fragments=fragments.df)\nmake_sr_graph(crsr_g)\n\n\n"} {"package":"GPM","topic":"Auxil","snippet":"### Name: Auxil\n### Title: An auxiliary function used in calculating the negative\n### log-likelehood and its gradient\n### Aliases: Auxil\n\n### ** Examples\n\n# see the examples in the fitting function.\n\n\n"} {"package":"GPM","topic":"CorrMat_Sym","snippet":"### Name: CorrMat\n### Title: Two Functions for Constructing the Correlation Matrix in 'GPM'\n### Package\n### Aliases: CorrMat_Sym CorrMat_Vec\n\n### ** Examples\n\n# see the examples in \\code{\\link[GPM]{Fit}}\n\n\n"} {"package":"GPM","topic":"Draw","snippet":"### Name: Draw\n### Title: The Plotting Function of 'GPM' Package\n### Aliases: Draw\n\n### ** Examples\n\n# See the examples in the fitting function.\n\n\n"} {"package":"GPM","topic":"Fit","snippet":"### Name: Fit\n### Title: The Fitting Function of 'GPM' Package\n### Aliases: Fit\n\n### ** Examples\n\n# 1D example: Fit a model (with default settings) and evaluate the performance\n# by computing the root mean squared error (RMSE) in prediction.\nlibrary(lhs)\nX <- 5*maximinLHS(15, 1)\nY <- 2*sin(2*X) + log(X+1)\nM <- Fit(X, Y)\nXF <- matrix(seq(0, 5, length.out = 100), 100, 1)\nYF <- Predict(XF, M)\nRMSE <- sqrt(mean((YF$YF - (2*sin(2*XF) + log(XF+1)))^2))\n\n## Not run: \n##D # 1D example: Fit a model, evaluate the performance, and plot the response\n##D # along with 95% prediction interval\n##D X <- 10*maximinLHS(10, 1) - 5\n##D Y <- X*cos(X)\n##D M <- Fit(X, Y)\n##D XF <- matrix(seq(-5, 5, length.out = 500), 500, 1)\n##D YF <- Predict(XF, M)\n##D RMSE <- sqrt(mean((YF$YF - (XF*cos(XF)))^2))\n##D Draw(M, 1, res = 20)\n##D \n##D # 2D example: Fit a model, evaluate the performance, and plot the response\n##D # surface along with 95% prediction interval\n##D X <- 2*maximinLHS(10, 2) - 1\n##D Y <- X[, 1]^2 + X[, 2]^2\n##D M <- Fit(X, Y, CorrType = \"PE\")\n##D XF <- 2*maximinLHS(100, 2) - 1\n##D YF <- Predict(XF, M)\n##D RMSE <- sqrt(mean((YF$YF - (XF[, 1]^2 + XF[, 2]^2))^2))\n##D library(lattice)\n##D Draw(M, c(1, 1), res = 15, PI95=1)\n##D \n##D # 2D example: Plot the previous model wrt X1 in the [-2, 2]\n##D # interval with X2=1\n##D Draw(M, c(1, 0), LB = -2, UB = 2, res = 15, PI95=1)\n##D \n##D # 3D example: Compare the performance of Gaussian (\"G\") and lifted Browninan\n##D # with Gamma=1 (\"LBG\")\n##D X <- 2*maximinLHS(50, 3) - 1\n##D Y <- cos(X[, 1]^2) + 2*sin(X[, 2]^2) + X[, 3]^2\n##D M_G <- Fit(X, Y)\n##D M_LBG <- Fit(X, Y, CorrType = \"LBG\")\n##D XF <- 2*maximinLHS(500, 3) - 1\n##D YF_G <- Predict(XF, M_G)\n##D YF_LBG <- Predict(XF, M_LBG)\n##D RMSE_G <- sqrt(mean((YF_G$YF - (cos(XF[, 1]^2) + 2*sin(XF[, 2]^2) + XF[, 3]^2))^2))\n##D RMSE_LBG <- sqrt(mean((YF_LBG$YF - (cos(XF[, 1]^2) + 2*sin(XF[, 2]^2) + XF[, 3]^2))^2))\n##D \n##D # 3D example: Draw the response in 2D using the M_G model when X3=0\n##D Draw(M_G, c(1, 1, 0), PI95 = 0, Values = 0, X1Label = 'Input 1', X2Label = 'Input 2')\n##D \n##D # 3D example: 2D response\n##D X <- 2*maximinLHS(50, 3) - 1\n##D Y <- cbind(cos(X[, 1]^2) + 2*sin(X[, 2]^2) + X[, 3]^2, rowSums(X))\n##D M <- Fit(X, Y)\n##D Draw(M, c(0, 1, 1), Response_ID = 2, Values = 0.5)\n##D \n##D # 2D example with noise\n##D X <- 2*maximinLHS(100, 2) - 1\n##D Y <- X[, 1]^2 + X[, 2]^2 + matrix(rnorm(nrow(X), 0, .5), nrow(X), 1)\n##D M <- Fit(X, Y)\n##D # Estimating the noise variance (should be close to 0.5^2)\n##D M$Details$Nug_opt*M$CovFunc$Parameters$Sigma2*M$Data$Yrange^2\n## End(Not run)\n\n\n"} {"package":"GPM","topic":"Eigen","snippet":"### Name: MatrixAlgebra\n### Title: A Set of Functions for Doing Some Calculations on Matrices in\n### 'GPM' Package\n### Aliases: Eigen CppSolve LowerChol\n\n### ** Examples\n\n# see the examples in \\code{\\link[GPM]{Fit}}\n\n\n"} {"package":"GPM","topic":"NLogL","snippet":"### Name: NLogL\n### Title: The Function for calculating the Negative Log-Likelehood in\n### 'GPM' Package\n### Aliases: NLogL\n\n### ** Examples\n\n# see the examples in the fitting function.\n\n\n"} {"package":"GPM","topic":"NLogL_G","snippet":"### Name: NLogL_G\n### Title: The Function for calculating the gradient of Negative\n### Log-Likelehood in 'GPM' Package\n### Aliases: NLogL_G\n\n### ** Examples\n# see the examples in the fitting function.\n\n"} {"package":"GPM","topic":"Predict","snippet":"### Name: Predict\n### Title: The Prediction Function of 'GPM' Package\n### Aliases: Predict\n\n### ** Examples\n\n# See the examples in the fitting function.\n\n\n"} {"package":"MacroZooBenthosWaterA","topic":"calculate_ASPT","snippet":"### Name: calculate_ASPT\n### Title: The Average Score Per Taxon (ASPT) represents the average\n### tolerance score of all taxa within the community, and is calculated\n### by dividing the BMWP by the number of families/taxa represented in\n### the sample.\n### Aliases: calculate_ASPT\n\n### ** Examples\n\nSite <- c(\"L1\", \"L2\", \"L2\")\nFamily <- c(\"Capniidae\", \"Aeshnidae\", \"Chloroperlidae\")\ntest <- data.frame(Family, Site)\ncalculate_ASPT(test)\n\n\n"} {"package":"MacroZooBenthosWaterA","topic":"calculate_EPT","snippet":"### Name: calculate_EPT\n### Title: This index is calculated based on the number of families\n### belonging to three macroinvertebrate orders: Ephemeroptera,\n### Plecoptera and Trichoptera.\n### Aliases: calculate_EPT\n\n### ** Examples\n\nSite= c(\"L1\", \"L2\", \"L2\")\nFamily=c(\"Capniidae\", \"Aeshnidae\", \"Chloroperlidae\")\ntest <- data.frame( Family, Site)\ncalculate_EPT(test)\n\n\n"} {"package":"MacroZooBenthosWaterA","topic":"calculate_FBI","snippet":"### Name: calculate_FBI\n### Title: Assigned tolerancevalues range from 0 to 10 for families and\n### increase as water quality decreases\n### Aliases: calculate_FBI\n\n### ** Examples\n\nSite= c(\"L1\", \"L2\", \"L2\")\nFamily=c(\"Capniidae\", \"Aeshnidae\", \"Chloroperlidae\")\nAbundance=c(4,2,15)\ntest <- data.frame( Family, Abundance, Site)\ncalculate_FBI(test)\n\n\n"} {"package":"MacroZooBenthosWaterA","topic":"calculate_SWRC","snippet":"### Name: calculate_SWRC\n### Title: SWRC - Biotic index (Stroud Water Research Centre - Biotic\n### index)\n### Aliases: calculate_SWRC\n\n### ** Examples\n\nSite= c(\"L1\", \"L2\", \"L2\")\nFamily=c(\"Capniidae\", \"Aeshnidae\", \"Chloroperlidae\")\nAbundance=c(4,2,15)\ntest <- data.frame( Family, Abundance, Site)\ncalculate_SWRC(test)\n\n\n"} {"package":"deepdep","topic":"deepdep","snippet":"### Name: deepdep\n### Title: Acquire the dependencies of the package on any depth level\n### Aliases: deepdep\n\n### ** Examples\n\n\n## No test: \nlibrary(deepdep)\n\ndd_downloads <- deepdep(\"ggplot2\")\nhead(dd_downloads)\n\ndd_2 <- deepdep(\"ggplot2\", depth = 2, downloads = TRUE)\nplot_dependencies(dd_2, \"circular\")\n\ndd_local <- deepdep(\"deepdep\", local = TRUE)\nplot_dependencies(dd_local)\n## End(No test)\n\n\n\n\n"} {"package":"deepdep","topic":"get_available_packages","snippet":"### Name: get_available_packages\n### Title: Get the list of available packages\n### Aliases: get_available_packages\n\n### ** Examples\n\n## No test: \n\nlibrary(deepdep)\n\nav <- get_available_packages()\nhead(av)\n\n## End(No test)\n\n\n\n"} {"package":"deepdep","topic":"get_dependencies","snippet":"### Name: get_dependencies\n### Title: Acquire the dependencies of the package\n### Aliases: get_dependencies\n\n### ** Examples\n\nlibrary(deepdep)\n\n## No test: \ndependencies <- get_dependencies(\"htmltools\", downloads = FALSE)\ndependencies\n\ndependencies_local <- get_dependencies(\"deepdep\", downloads = FALSE, local = TRUE)\ndependencies_local\n## End(No test)\n\n\n\n"} {"package":"deepdep","topic":"get_description","snippet":"### Name: get_description\n### Title: Scrap the DESCRIPTION file and CRAN metadata of the package\n### Aliases: get_description\n\n### ** Examples\n\nlibrary(deepdep)\n\n## No test: \ndescription <- get_description(\"ggplot2\")\ndescription\n\ndescription_local <- get_description(\"deepdep\", local = TRUE)\ndescription_local\n## End(No test)\n\n\n\n"} {"package":"deepdep","topic":"get_downloads","snippet":"### Name: get_downloads\n### Title: Scrap the download data of the package\n### Aliases: get_downloads\n\n### ** Examples\n\n## No test: \nlibrary(deepdep)\n\ndownloads <- get_downloads(\"ggplot2\")\ndownloads\n## End(No test)\n\n\n\n\n"} {"package":"deepdep","topic":"plot_dependencies","snippet":"### Name: plot_dependencies\n### Title: Main plot function for a 'deepdep' object\n### Aliases: plot_dependencies plot_dependencies.default\n### plot_dependencies.character plot_dependencies.deepdep\n\n### ** Examples\n\n\n## No test: \nlibrary(deepdep)\n\n#:# use local packages\nplot_dependencies(\"deepdep\", depth = 2, local = TRUE)\n\ndd <- deepdep(\"ggplot2\")\nplot_dependencies(dd, \"tree\")\n\ndd2 <- deepdep(\"ggplot2\", depth = 2)\nplot_dependencies(dd2, \"circular\")\n\n#:# show grand_total download count\nplot_dependencies(\"shiny\", show_downloads = TRUE)\n## End(No test)\n\n\n\n"} {"package":"deepdep","topic":"plot_downloads","snippet":"### Name: plot_downloads\n### Title: Plot download count of CRAN packages.\n### Aliases: plot_downloads plot_downloads.default plot_downloads.deepdep\n### plot_downloads.package_dependencies plot_downloads.package_downloads\n### plot_downloads.character\n\n### ** Examples\n\n\n## No test: \nlibrary(deepdep)\n\nplot_downloads(\"htmltools\")\n\ndd <- deepdep(\"ggplot2\")\nplot_downloads(dd)\n## End(No test)\n\n\n\n"} {"package":"deepdep","topic":"print.available_packages","snippet":"### Name: print.available_packages\n### Title: Print function for an object of 'available_packages' class\n### Aliases: print.available_packages\n\n### ** Examples\n\n## No test: \nlibrary(deepdep)\n\nav <- get_available_packages()\nhead(av)\n## End(No test)\n\n\n\n"} {"package":"deepdep","topic":"print.deepdep","snippet":"### Name: print.deepdep\n### Title: Print function for an object of 'deepdep' class\n### Aliases: print.deepdep\n\n### ** Examples\n\n## No test: \nlibrary(deepdep)\n\ndd <- deepdep(\"stringr\")\ndd\n## End(No test)\n\n\n\n"} {"package":"deepdep","topic":"print.package_dependencies","snippet":"### Name: print.package_dependencies\n### Title: Print function for an object of 'package_dependencies' class\n### Aliases: print.package_dependencies\n\n### ** Examples\n\n## No test: \nlibrary(deepdep)\n\nget_dependencies(\"htmltools\", downloads = TRUE)\n## End(No test)\n\n\n\n"} {"package":"deepdep","topic":"print.package_description","snippet":"### Name: print.package_description\n### Title: Print function for an object of 'package_description' class\n### Aliases: print.package_description\n\n### ** Examples\n\n## No test: \nlibrary(deepdep)\n\ndescription <- get_description(\"ggplot2\")\ndescription\n## End(No test)\n\n\n\n"} {"package":"deepdep","topic":"print.package_downloads","snippet":"### Name: print.package_downloads\n### Title: Print function for an object of 'package_downloads' class\n### Aliases: print.package_downloads\n\n### ** Examples\n\n## No test: \nlibrary(deepdep)\n\ndesc <- get_downloads(\"stringr\")\ndesc\n## End(No test)\n\n\n\n"} {"package":"DoE.MIParray","topic":"DoE.MIParray-package","snippet":"### Name: DoE.MIParray-package\n### Title: Package to Create a MIP Based Array\n### Aliases: DoE.MIParray-package DoE.MIParray\n### Keywords: array design\n\n### ** Examples\n\n## Not run: \n##D ## ideal sequence of optimization problems\n##D ## shown here for Mosek,\n##D ## for Gurobi analogous, if necessary increasing maxtime to e.g. 600 or 3600 or ...\n##D \n##D ## very small problem\n##D plan <- mosek_MIParray(16, rep(2,6), resolution=4, kmax=6)\n##D \n##D ## an example approach for a larger problem\n##D ## optimize shortest word length\n##D plan3 <- mosek_MIParray(24, c(2,4,3,2,2,2,2), resolution=3, maxtime=20)\n##D ## feasible solution was found, no confirmed optimum, 7/3 words of length 3\n##D ## try to optimize further or confirm optimality (improve=TRUE does this),\n##D ## give it 10 minutes\n##D plan3b <- mosek_MIPcontinue(plan3, improve=TRUE, maxtime=600)\n##D ## no improvement has been found, and the gap is still very large\n##D ## (the time limit makes the result non-deterministic, of course,\n##D ## because it depends on the computer's power and availability of its resources)\n##D \n##D ## For large problems, it cannot be expected that a *confirmed* optimum is found.\n##D ## Of course, one can put more effort into the optimization, e.g. by running overnight.\n##D ## It is also advisable to compare the outcome to other ways for obtaining a good array,\n##D ## e.g. function oa.design from package DoE.base with optimized column allocation.\n##D require(DoE.base)\n##D show.oas(nruns=24, nlevels=c(2,4,3,2,2,2,2), show=Inf)\n##D GWLP(plan_oad <- oa.design(nruns=24, nlevels=c(2,4,3,2,2,2,2), col=\"min34\"))\n##D ## here, plan3b has a better A3 than plan_oad\n##D \n##D ## one might also try to confirm optimality by switching to the other optimizer\n##D plan3c <- gurobi_MIPcontinue(plan3b, improve=TRUE, maxtime=600, MIPFocus=3)\n##D ## focus on improved bound with option MIPFocus\n##D ## still same value with very large gap after running this\n##D ## thus, now assume this as best practically feasible value\n##D \n##D ## one might now try to improve words of length 4 (improve=FALSE turns to the next word length)\n##D plan4 <- mosek_MIPcontinue(plan3b, improve=FALSE, maxtime=600)\n##D ## this does not yield any improvement\n##D ## working on longer words is not considered worthwhile\n##D ## thus, plan3 or plan3b are used for pragmatic reasons,\n##D ## without confirmed optimality\n## End(Not run)\n\n\n"} {"package":"DoE.MIParray","topic":"write_MPSILP","snippet":"### Name: create_MIQP.Rd\n### Title: Internal functions that support the export of (mixed) integer\n### quadratic or linear problems in MPS format\n### Aliases: write_MPSILP create_MIQP\n### Keywords: array design internal\n\n### ** Examples\n\n## Not run: \n##D ## can also be run with gurobi_MIParray instead of mosek_MIParray\n##D ## there are of course better ways to obtain good arrays for these parameters\n##D ## (e.g. function FrF2 from package FrF2)\n##D problemlist <- create_ILPlist(16, c(2,2,4,8), resolution=2, kmax=4)\n##D feld\n##D names(attributes(feld))\n##D attr(feld, \"MIPinfo\")$info\n##D \n##D ## using a start value\n##D start <- DoE.base::L16.2.8.8.1[,1:5]\n##D feld <- mosek_MIParray(16, rep(2,5), resolution=4, start=start)\n##D \n##D ## counting vector representation of the start value could also be used\n##D DoE.MIParray:::dToCount(start-1)\n##D ## \"-1\", because the function requires values starting with 0\n##D ## 32 elements for the full factorial in lexicographic order, 16 ones for the runs\n##D \n##D ## extending an existing array\n##D force <- matrix(as.numeric(as.matrix(DoE.base::undesign(DoE.base::oa.design(L8.2.7)))), nrow=8)\n##D feld <- mosek_MIParray(16, rep(2,7), resolution=3, kmax=4, forced=force)\n##D attr(feld, \"MIPinfo\")$info\n## End(Not run)\n\n\n"} {"package":"DoE.MIParray","topic":"dToCount","snippet":"### Name: dToCount.Rd\n### Title: Functions to switch between count and array representation of an\n### array\n### Aliases: dToCount countToDmixed ff\n### Keywords: array design\n\n### ** Examples\n\nd <- ff(c(2,2,4))[1:6,] ## first six rows of the full factorial only\nd\n## the count vector must have 2*2*4=16 elements,\n## the first six must be 1, the last ten must be zero\ndToCount(d) ## does not produce the desired result,\n ## because the first column of d\n ## does not contain both levels\n(d_as_count <- dToCount(d, nlevels=c(2,2,4)))\n ## corresponds to the above way of creating d\ndToCount(d, nlevels=c(2,2,5)) ## would correspond to a different reality,\n ## where the third factor has in fact 5 levels,\n ## of which only four are in the array\ncountToDmixed(c(2,2,4), d_as_count)\n ## creates d from the count representation\n\n\n"} {"package":"DoE.MIParray","topic":"oa_feasible","snippet":"### Name: functionsFromDoE.base\n### Title: Functions from package DoE.base\n### Aliases: oa_feasible lowerbound_AR length2 length3 length4 length5\n### contr.XuWu GWLP ICFTs SCFTs\n### Keywords: design array\n\n### ** Examples\n\noa_feasible(24, c(2,3,4,6),2)\nlowerbound_AR(24, c(2,3,4,6),2)\n\n\n"} {"package":"DoE.MIParray","topic":"mosek_MIParray","snippet":"### Name: mosek_MIParray\n### Title: Functions to Create a MIP Based Array Using Gurobi or Mosek\n### Aliases: mosek_MIParray gurobi_MIParray\n### Keywords: array design\n\n### ** Examples\n\n## Not run: \n##D ## can also be run with gurobi_MIParray instead of mosek_MIParray\n##D ## there are of course better ways to obtain good arrays for these parameters\n##D ## (e.g. function FrF2 from package FrF2)\n##D feld <- mosek_MIParray(16, rep(2,7), resolution=3, kmax=4)\n##D feld\n##D names(attributes(feld))\n##D attr(feld, \"MIPinfo\")$info\n##D \n##D ## using a start value\n##D start <- DoE.base::L16.2.8.8.1[,1:5]\n##D feld <- mosek_MIParray(16, rep(2,5), resolution=4, start=start)\n##D \n##D ## counting vector representation of the start value could also be used\n##D DoE.MIParray:::dToCount(start-1)\n##D ## \"-1\", because the function requires values starting with 0\n##D ## 32 elements for the full factorial in lexicographic order, 16 ones for the runs\n##D \n##D ## extending an existing array\n##D force <- matrix(as.numeric(as.matrix(DoE.base::undesign(DoE.base::oa.design(L8.2.7)))), nrow=8)\n##D feld <- mosek_MIParray(16, rep(2,7), resolution=3, kmax=4, forced=force)\n##D attr(feld, \"MIPinfo\")$info\n## End(Not run)\n\n\n"} {"package":"DoE.MIParray","topic":"mosek_MIPsearch","snippet":"### Name: mosek_MIPsearch\n### Title: Functions to Search for optimum MIP Based Array Using Gurobi or\n### Mosek\n### Aliases: mosek_MIPsearch gurobi_MIPsearch\n### Keywords: array design\n\n### ** Examples\n\n## Not run: \n##D ## can also be run with gurobi_MIParray instead of mosek_MIParray\n##D ## there are of course better ways to obtain good arrays for these parameters\n##D ## (e.g. function FrF2 from package FrF2)\n##D oa_feasible(18, c(2,3,3,3,3), 2) ## strength 2 array feasible\n##D lowerbound_AR(18, c(2,3,3,3,3), 3) ## lower bound for A3\n##D ## of course not necessary here, the design is found fast\n##D feld <- mosek_MIPsearch(18, c(2,3,3,3,3), stopearly=FALSE, listout=TRUE, maxtime=30)\n##D ## even stopearly=TRUE would not stop, because the lower bound 2 is not achievable\n##D feld\n##D names(attributes(feld))\n##D attr(feld, \"optorder\")\n##D ## even for this simple case, running optimization until confirmed optimality \n##D ## would be very slow\n## End(Not run)\n\n\n"} {"package":"DoE.MIParray","topic":"write_MPSILPlist","snippet":"### Name: write_MPSILPlist.Rd\n### Title: Functions to create and write lists of (mixed) integer quadratic\n### or linear problems related to orthogonal arrays\n### Aliases: write_MPSILPlist write_MPSMIQP create_ILPlist\n### Keywords: array design\n\n### ** Examples\n\n###################################################################\n## an array and its counting vector\n\n## arrays (starting the coding with 1)\n## and their counting vectors can be used interchangeably\nmyarr <- cbind(c(1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2),\n c(1,1,1,1,2,2,2,2,1,1,1,1,2,2,2,2),\n c(1,2,3,4,1,2,3,4,1,2,3,4,1,2,3,4),\n c(1,5,3,7,2,6,4,8,8,4,6,2,7,3,5,1))\n\n## we want to see it w.r.t. a 2,2,4,8 level full factorial\n## determine the counting vector representation of this array\n## nlevels is needed,\n## because the third column of myarr has only 2 levels\n(myarr_cv <- dToCount(myarr, nlevels=c(2,2,4,8), startfrom1=TRUE))\n\n###################################################################\n## demo: counting vector represents the array runs\n###################################################################\n## full factorial in lexicographic order\nfullfac <- ff(2,2,4,8) + 1 ### ff levels start with 0\n##\n## pick the selected runs from fullfac\nselfac <- fullfac[which(myarr_cv==1),]\n##\n## order both variants in the same way and compare them\n## (in this case, they are equal without reordering)\nord1 <- DoE.base::ord(selfac) ## order them\nord2 <- DoE.base::ord(myarr) ## order them\nselfac[ord1,] == myarr[ord2,]\n#######################################################\n\n#######################################################\n## We go for an array in 16 runs with four factors in\n## 2,2,4,8 levels.\n\n## Is a strength 2 oa feasible?\n##\noa_feasible(16, c(2,2,4,8), 2) ## FALSE\n##\n## consequence: use resolution 2 (=strength 1),\n## minimize number of words of length 2\n\nproblemlist <- create_ILPlist(16, nlevels = c(2,2,4,8), resolution = 2)\nlength(problemlist) ## 12 distinct search orders\nnames(problemlist[[3]])\nproblemlist[[3]][-2] ## ILP is too long for printing\nproblemlist1 <- create_ILPlist(16, nlevels = c(2,2,4,8),\n resolution = 2, search.orders = FALSE)\n ## only the pre-specified search order\nproblemlist2 <- create_ILPlist(16, nlevels = c(2,2,4,8),\n resolution = 2, orders = list(c(2,2,4,8),\n c(8,2,4,2)))\n ## the two specified search orders\n## Not run: \n##D write_MPSILPlist(prefix=\"miniprob\", problemlist)\n##D ## writes miniprob01.mps, ..., miniprob12.mps and miniprob_toc.txt\n##D write_MPSILPlist(prefix=\"miniprob\", problemlist1, toc=FALSE)\n##D ## writes miniprob1.mps\n## End(Not run)\n\n## The MPS files can be read by various optimizers.\n## The ILP problems aim for a feasible solution.\n## Start values are possible, but usually not useful.\n## The best solution (lowest target value) can be imported into R.\n\n## the solution a counting vector\n## its format depends on the optimizer\n## import it into R and calculated array from it\nimportedsol <- myarr_cv # for demo only\nsolarray <- countToDmixed(myarr_cv, nlevels=c(2,2,4,8))\n##\n## it is crucial to use the order of the levels\n## that corresponds to the problem that the solver solved\n\nGWLP(solarray)\n\n#######################################################\n## providing a lower bound for the number of\n## length 2 words in a strength 1 (resolution 2) array\n#######################################################\n##\nlowerbound_AR(nruns = 16, nlevels = c(2,2,4,8), R = 2) # 1\n##\n## In this example, we have immediately hit on a solution\n## with optimum A2-value (see GWLP)\n\n#######################################################\n## using a quadratic problem for optimizing A2\n##\n## Not run: \n##D write_MPSMIQP(\"quadprob\", 16, c(2,2,4,8), resolution=2)\n##D ## writes quadprob.mps\n## End(Not run)\n\n## Run time for solving the quadratic problem exported by write_MPSMIQP\n## may substantially (!) benefit from providing the lower bound of the\n## objective function, if that bound is attained.\n##\n## The lower bound for the minimum of the quadratic problem\n## created by write_MPSMIQP\n## is the lower bound for the word length, multiplied with n^2,\n## here 16 ^ 2 * 1 = 256,\n## or half that value,\n## depending on how the optimizer handles quadratic objectives.\n#######################################################\n\n## Depending on the optimizer, it is useful or even crucial to provide a\n## starting value to write_MPSMIQP. This starting value can be obtained\n## as the solution to a linear problem (that was exported using functions\n## create_ILPlist and write_MPSILPlist).\n\n\n\n"} {"package":"chronicle","topic":"add_barplot","snippet":"### Name: add_barplot\n### Title: Add a bar plot to a chronicle report\n### Aliases: add_barplot\n\n### ** Examples\n\nhtml_report <- add_barplot(report = '',\n dt = iris,\n bars = 'Species',\n value = 'Sepal.Length')\ncat(html_report)\n\n\n"} {"package":"chronicle","topic":"add_boxplot","snippet":"### Name: add_boxplot\n### Title: Add a box plot to a chronicle report\n### Aliases: add_boxplot\n\n### ** Examples\n\nhtml_report <- add_boxplot(report = '',\n dt = iris,\n value = 'Sepal.Length',\n groups = 'Species', jitter = TRUE)\ncat(html_report)\n\n\n"} {"package":"chronicle","topic":"add_chunk","snippet":"### Name: add_chunk\n### Title: Transforms a function call into an Rmarkdown chunk\n### Aliases: add_chunk\n\n### ** Examples\n\nlibrary(chronicle)\nhtml_chunk <- add_chunk(fun = chronicle::make_barplot,\n params = list(dt = 'iris',\n value = 'Sepal.Width',\n bars = 'Species'))\ncat(html_chunk)\n\n\n"} {"package":"chronicle","topic":"add_code","snippet":"### Name: add_code\n### Title: Add formatted code chunks to a chronicle R Markdown report\n### Aliases: add_code\n\n### ** Examples\n\nhtml_report <- add_code(report = '',\n code_title = 'Code comes after this title',\n code = 'f <- function(x, y){paste(x,y)},\nf(\"a\", \"b\")',\n eval = FALSE,\n echo = TRUE,\n fig_width = 12,\n fig_height = 8)\ncat(html_report)\n\n\n"} {"package":"chronicle","topic":"add_density","snippet":"### Name: add_density\n### Title: Add a density plot to a chronicle report\n### Aliases: add_density\n\n### ** Examples\n\nhtml_report <- add_density(report = \"\",\n dt = iris,\n value = 'Sepal.Length',\n groups = 'Species')\ncat(html_report)\n\n\n"} {"package":"chronicle","topic":"add_dygraph","snippet":"### Name: add_dygraph\n### Title: Add a dygraph to a chronicle report\n### Aliases: add_dygraph\n\n### ** Examples\n\ndat <- data.frame(x = c(rnorm(100, 2, 4),\n rnorm(100, 6, 1),\n rnorm(100, 8, 2)),\n group = c(rep('A', 100),\n rep('B', 100),\n rep('C', 100)),\n date = rep(seq(as.Date(\"2020-01-01\"),\n as.Date(\"2020-04-09\"),\n 'days'),\n 3))\nhtml_report <- add_dygraph(report = '',\n dt = dat,\n value = 'x',\n date = 'date')\ncat(html_report)\n\n\n"} {"package":"chronicle","topic":"add_histogram","snippet":"### Name: add_histogram\n### Title: Add a histogram plot to a chronicle report\n### Aliases: add_histogram\n\n### ** Examples\n\nhtml_report <- add_histogram(report = \"\",\n dt = iris,\n value = 'Sepal.Length',\n groups = 'Species')\ncat(html_report)\n\n\n"} {"package":"chronicle","topic":"add_image","snippet":"### Name: add_image\n### Title: Add an image to a chronicle Rmarkdown report\n### Aliases: add_image\n\n### ** Examples\n\n\nlibrary(chronicle)\nreport <- add_image(image_path = 'readme1.png',\n image_caption = 'This is the caption of the image',\n image_title = 'This is the image that I want to include')\n\n\n"} {"package":"chronicle","topic":"add_lineplot","snippet":"### Name: add_lineplot\n### Title: Add a line plot to a chronicle report\n### Aliases: add_lineplot\n\n### ** Examples\n\nhtml_report <- add_lineplot(report = \"\",\n dt = ggplot2::mpg,\n x = 'hwy',\n y = 'cty',\n groups = 'manufacturer',\n faceted = FALSE)\ncat(html_report)\n\n\n"} {"package":"chronicle","topic":"add_quotes","snippet":"### Name: add_quotes\n### Title: Adds additional quotations to character values\n### Aliases: add_quotes\n\n### ** Examples\n\nparams = list(a = TRUE, b = FALSE, c = 'ABC', d = 15)\nadd_quotes(params)\nadd_quotes(params, except = 'c')\n\n\n"} {"package":"chronicle","topic":"add_raincloud","snippet":"### Name: add_raincloud\n### Title: Add a raincloud plot to a chronicle report\n### Aliases: add_raincloud\n\n### ** Examples\n\nhtml_report <- add_raincloud(report = \"\",\n dt = iris,\n value = 'Sepal.Length',\n groups = 'Species')\ncat(html_report)\n\n\n"} {"package":"chronicle","topic":"add_scatterplot","snippet":"### Name: add_scatterplot\n### Title: Add a scatter plot to a chronicle report\n### Aliases: add_scatterplot\n\n### ** Examples\n\nhtml_report <- add_scatterplot(report = \"\",\n dt = ggplot2::mpg,\n x = 'hwy',\n y = 'cty',\n groups = 'manufacturer',\n faceted = FALSE)\ncat(html_report)\n\n\n"} {"package":"chronicle","topic":"add_table","snippet":"### Name: add_table\n### Title: Add a table to a chronicle report\n### Aliases: add_table\n\n### ** Examples\n\nhtml_report <- add_table(table = iris,\n table_title = 'Iris measures',\n html_table_type = 'kable')\ncat(html_report)\n\n\n"} {"package":"chronicle","topic":"add_text","snippet":"### Name: add_text\n### Title: Add text to a chronicle Rmarkdown report\n### Aliases: add_text\n\n### ** Examples\n\nhtml_report <- add_text(text = 'This is the text that will be seen outside of any chunk',\n text_title = 'Text title')\ncat(html_report)\n\n\n"} {"package":"chronicle","topic":"add_title","snippet":"### Name: add_title\n### Title: Add a titled section to a chronicle Rmarkdown report\n### Aliases: add_title\n\n### ** Examples\n\nhtml_report <- add_title(report = '',\n title = 'Just the title here')\ncat(html_report)\n\n\n"} {"package":"chronicle","topic":"add_violin","snippet":"### Name: add_violin\n### Title: Add a violin plot to a chronicle report\n### Aliases: add_violin\n\n### ** Examples\n\nhtml_report <- add_violin(report = \"\",\n dt = iris,\n value = 'Sepal.Length',\n groups = 'Species', jitter = TRUE)\ncat(html_report)\n\n\n"} {"package":"chronicle","topic":"assemble_call","snippet":"### Name: assemble_call\n### Title: Assembles a formatted function call from a function and a list\n### of parameters\n### Aliases: assemble_call\n\n### ** Examples\n\nchronicle::assemble_call(fun_name = 'base::sapply',\n params = list(X = 'iris',\n FUN= 'class'))\nchronicle::assemble_call(fun_name = 'base::sapply',\n params = list(X = 'iris',\n FUN= 'class'),\n non_char = c('X', 'FUN'))\n\n\n"} {"package":"chronicle","topic":"check_cols","snippet":"### Name: check_cols\n### Title: Warns if any of the passed column names is missing from the data\n### provided.\n### Aliases: check_cols\n\n### ** Examples\n\nchronicle::check_cols(mtcars, c('cyl', 'made_up_column'))\n\n\n"} {"package":"chronicle","topic":"file_extension","snippet":"### Name: file_extension\n### Title: Parse the file extension for each R Markdown output format\n### Aliases: file_extension\n\n### ** Examples\n\nfile_extension(c('prettydoc', 'word_document', 'tufte_handout'))\n\n\n"} {"package":"chronicle","topic":"make_barplot","snippet":"### Name: make_barplot\n### Title: Create a bar plot from a data frame through ggplotly\n### Aliases: make_barplot\n\n### ** Examples\n\nmake_barplot(dt = iris, bars = 'Species', value = 'Sepal.Length')\nmake_barplot(dt = ggplot2::mpg,\n bars = 'manufacturer',\n break_bars_by = 'model',\n value = 'cty',\n horizontal = TRUE,\n sort_by_value = TRUE)\n\n\n"} {"package":"chronicle","topic":"make_boxplot","snippet":"### Name: make_boxplot\n### Title: Create a box plot from a data frame through ggplotly\n### Aliases: make_boxplot\n\n### ** Examples\n\nmake_boxplot(dt = ggplot2::mpg, value = 'hwy', groups = 'drv', jitter = TRUE)\n\n\n\n"} {"package":"chronicle","topic":"make_density","snippet":"### Name: make_density\n### Title: Create a density plot from a data frame through ggplotly\n### Aliases: make_density\n\n### ** Examples\n\nmake_density(dt = iris,\n value = 'Sepal.Length',\n groups = 'Species')\nmake_density(dt = iris,\n value = 'Sepal.Length',\n groups = 'Species',\n faceted = FALSE)\n\n\n"} {"package":"chronicle","topic":"make_dygraph","snippet":"### Name: make_dygraph\n### Title: Plot a time series from a data frame through dygraph's\n### interactive html plot interface\n### Aliases: make_dygraph\n\n### ** Examples\n\ndat <- data.frame(x = c(rnorm(100, 2, 4),\n rnorm(100, 6, 1),\n rnorm(100, 8, 2)),\n group = c(rep('A', 100),\n rep('B', 100),\n rep('C', 100)),\n date = rep(seq(as.Date(\"2020-01-01\"),\n as.Date(\"2020-04-09\"),\n 'days'),\n 3))\nmake_dygraph(dt = dat,\n value = 'x',\n date = 'date')\nmake_dygraph(dt = dat,\n value = 'x',\n groups = 'group',\n date = 'date')\n\n\n"} {"package":"chronicle","topic":"make_histogram","snippet":"### Name: make_histogram\n### Title: Create a histogram plot from a data frame through ggplotly\n### Aliases: make_histogram\n\n### ** Examples\n\nmake_histogram(dt = iris,\n value = 'Sepal.Length',\n groups = 'Species')\n\n\n"} {"package":"chronicle","topic":"make_lineplot","snippet":"### Name: make_lineplot\n### Title: Create a line plot from a data frame through ggplotly\n### Aliases: make_lineplot\n\n### ** Examples\n\nmake_lineplot(dt = ggplot2::mpg,\n x = 'hwy',\n y = 'cty',\n groups = 'manufacturer',\n faceted = FALSE)\n\nmake_lineplot(dt = ggplot2::mpg,\n x = 'hwy',\n y = 'cty',\n groups = 'manufacturer',\n faceted = TRUE,\n scales = 'free')\n\n\n\n"} {"package":"chronicle","topic":"make_raincloud","snippet":"### Name: make_raincloud\n### Title: Create a raincloud plot from a data frame through ggplotly\n### Aliases: make_raincloud\n\n### ** Examples\n\nmake_raincloud(dt = iris, value = 'Sepal.Width')\nmake_raincloud(dt = iris, value = 'Sepal.Width', adjust = 1)\nmake_raincloud(dt = iris, value = 'Petal.Length', groups = 'Species', static = TRUE, adjust = 1)\nmake_raincloud(dt = iris, value = 'Sepal.Length', groups = 'Species', adjust = 1)\n\n\n"} {"package":"chronicle","topic":"make_scatterplot","snippet":"### Name: make_scatterplot\n### Title: Create a scatter plot from a data frame through ggplotly\n### Aliases: make_scatterplot\n\n### ** Examples\n\nmake_scatterplot(dt = ggplot2::mpg,\n x = 'hwy',\n y = 'cty',\n groups = 'manufacturer',\n faceted = FALSE)\n\nmake_scatterplot(dt = ggplot2::mpg,\n x = 'hwy',\n y = 'cty',\n groups = 'manufacturer',\n faceted = TRUE,\n scales = 'free')\n\n\n\n"} {"package":"chronicle","topic":"make_title","snippet":"### Name: make_title\n### Title: Guess a title out of function parameters\n### Aliases: make_title\n\n### ** Examples\n\nmake_title(fun = chronicle::make_barplot,\n params = list(value = 'Amount',\n bars = 'Country',\n break_bars_by = 'Region'))\n\nmake_title(fun = chronicle::make_raincloud,\n params = list(value = 'value',\n groups = 'species'))\n\n\n"} {"package":"chronicle","topic":"make_violin","snippet":"### Name: make_violin\n### Title: Create a violin plot from a data frame through ggplotly\n### Aliases: make_violin\n\n### ** Examples\n\nmake_violin(dt = ggplot2::mpg, value = 'hwy', groups = 'drv')\n\n\n\n"} {"package":"chronicle","topic":"output_config","snippet":"### Name: output_config\n### Title: Build the yaml output specification for an R Markdown\n### Aliases: output_config\n\n### ** Examples\n\ncat(output_config('prettydoc'))\ncat(output_config('ioslides'))\n\n\n"} {"package":"chronicle","topic":"plot_columns","snippet":"### Name: plot_columns\n### Title: Plot all columns of a table\n### Aliases: plot_columns\n\n### ** Examples\n\nchronicle::plot_columns(dt = iris, by_column = 'Species')\n\n\n"} {"package":"chronicle","topic":"render_report","snippet":"### Name: render_report\n### Title: Render the report using all objects from the global environment\n### Aliases: render_report\n\n### ** Examples\n\n# report_demo <- add_title(title = 'This is how a chronicle report looks', title_level = 1) %>%\n# add_density(dt = iris, groups = 'Species', value = 'Sepal.Length', faceted = F) %>%\n# add_boxplot(dt = iris, groups = 'Species', value = 'Sepal.Length') %>%\n# add_barplot(dt = iris, bars = 'Species', value = 'Sepal.Length')\n# add_table(table = iris,\n# table_title = 'This is the iris dataset. Smells good!',\n# html_table_type = 'kable') %>%\n# add_table(table = mpg,\n# table_title = 'And this is mpg',\n# html_table_type = 'DT')\n# render_report(report = report_demo,\n# title = 'Demo Output',\n# author = 'This is the author',\n# filename = 'demo_output',\n# output_format = 'prettydoc',\n# keep_rmd = TRUE)\n\n\n"} {"package":"chronicle","topic":"report_columns","snippet":"### Name: report_columns\n### Title: HTML interactive report detailing each column on a table\n### Aliases: report_columns\n\n### ** Examples\n\n# chronicle::report_columns(dt = iris,\n# by_column = 'Species',\n# horizontal_bars = TRUE,\n# keep_rmd = TRUE)\n\n\n"} {"package":"chronicle","topic":"rmd_title_level","snippet":"### Name: rmd_title_level\n### Title: Returns the count of '#' corresponding to a given title level\n### Aliases: rmd_title_level\n\n### ** Examples\n\nrmd_title_level(1)\nrmd_title_level(3)\n\n\n"} {"package":"chronicle","topic":"set_classes","snippet":"### Name: set_classes\n### Title: Change column classes with a named vector\n### Aliases: set_classes\n\n### ** Examples\n\nlibrary(chronicle)\niris_changed <- chronicle::set_classes(dt = iris,\n character = 'Species',\n integer = c('Sepal.Length', 'Sepal.Width'))\npurrr::map_chr(iris_changed, class)\n\n\n"} {"package":"ccml","topic":"callNCW","snippet":"### Name: callNCW\n### Title: Calculate normalized consensus weight(NCW) matrix based on\n### permutation.\n### Aliases: callNCW\n\n### ** Examples\n\n\n# load data\ndata(example_data)\nlabel=example_data\n\n# if plot is not NULL, results will be saved in \"result_output\" directory\ntitle=\"result_output\"\n\n## No test: \n# run ncw\nncw<-callNCW(title=title,label=label,stability=TRUE,nperm=4,ncore=1)\n## End(No test)\n\n\n\n"} {"package":"ccml","topic":"ccml","snippet":"### Name: ccml\n### Title: A two-step consensus clustering inputing multiple predictive\n### labels with different sample coverages (missing labels)\n### Aliases: ccml\n\n### ** Examples\n\n\n# load data\ndata(example_data)\nlabel=example_data\n\n# if plot is not NULL, results will be saved in \"result_output\" directory\ntitle=\"result_output\"\n\n## No test: \n# not estimate stability of permutation numbers.\nres_1=ccml(title=title,label=label,nperm = 3,ncore=1,stability=FALSE,maxK=5,pItem=0.8)\n\n# other methods for clustering of distance matrix\nres_2<-ccml(title=title,label=label,nperm = 10,ncore=1,stability=TRUE,maxK=3,\n pItem=0.9,clusterAlg = \"hc\")\n\n# set the start random seed\nres_3<-ccml(title=title,label=label,output=FALSE,nperm = 5,ncore=1,seedn=150,stability=TRUE,maxK=3,\n pItem=0.9)\n## End(No test)\n\n\n\n"} {"package":"ccml","topic":"plotCompareCW","snippet":"### Name: plotCompareCW\n### Title: Plot of original consensus weights vs. normalized consensus\n### weights grouping by the number of co-appeared percent of\n### clustering(non-missing).\n### Aliases: plotCompareCW\n\n### ** Examples\n\n\n# load data\ndata(example_data)\nlabel=example_data\n\n# if plot is not NULL, results will be saved in \"result_output\" directory\ntitle=\"result_output\"\n\n## No test: \nncw<-callNCW(title=title,label=label,stability=TRUE)\nplotCompareCW(title=title,label=label,ncw=ncw)\n## End(No test)\n\n\n"} {"package":"COMBAT","topic":"COMBAT","snippet":"### Name: COMBAT\n### Title: A Combined Gene-based Association Test\n### Aliases: COMBAT\n\n### ** Examples\n\n# read SNP P values\nfile1 <- paste(path.package(\"COMBAT\"),\"extdata\",\"SNP_info.txt.gz\",sep=\"/\")\nsnp.info <- read.table(file1, header = TRUE, as.is=TRUE)\nsnp.pvals <- as.matrix(snp.info[,2])\n\n# read reference genotype\nfile2 <- paste(path.package(\"COMBAT\"),\"extdata\",\"SNP_ref.txt.gz\",sep=\"/\")\nsnp.ref <- read.table(file2, header = TRUE)\nsnp.ref <- as.matrix(snp.ref)\n#call COMBAT\n## No test: \nCOMBAT(snp.pvals, snp.ref, nperm=100, ncores=2)\n## End(No test)\n## Don't show: \nCOMBAT(snp.pvals, snp.ref, nperm=10, ncores=1)\n## End(Don't show)\n\n\n"} {"package":"COMBAT","topic":"ext_simes","snippet":"### Name: ext_simes\n### Title: Extended Simes Procedure\n### Aliases: ext_simes\n\n### ** Examples\n\n# see ?COMBAT\n\n\n"} {"package":"COMBAT","topic":"gates","snippet":"### Name: gates\n### Title: Calling Gene-based Association Tests\n### Aliases: gates vegas simpleM\n\n### ** Examples\n\n# read SNP P values\nfile1 <- paste(path.package(\"COMBAT\"),\"extdata\",\"SNP_info.txt.gz\",sep=\"/\")\nsnp.info <- read.table(file1, header = TRUE, as.is=TRUE)\nsnp.pvals <- as.matrix(snp.info[,2])\n\n# read reference genotype\nfile2 <- paste(path.package(\"COMBAT\"),\"extdata\",\"SNP_ref.txt.gz\",sep=\"/\")\nsnp.ref <- read.table(file2, header = TRUE)\nsnp.ref <- as.matrix(snp.ref)\n\n#compute correlation among SNPs\ncor_G <- ld.Rsquare(snp.ref)\n\n#call gates\n(pval_gates <- gates(x=snp.pvals, cor_G=cor_G))\n\n#call vegas\n(pval_vegas <- vegas(x=snp.pvals, cor_G=cor_G))\n\n#call simpleM\n(pval_simpleM <- simpleM(x=snp.pvals, cor_G=cor_G))\n\n\n"} {"package":"envstat","topic":"edit_envstat","snippet":"### Name: edit_envstat\n### Title: Edit an envstat configuration file\n### Aliases: edit_envstat\n\n### ** Examples\n\n## Not run: \n##D # By default envstat uses a config file in your home directory\n##D envstat::edit_envstat()\n##D \n##D # But you can tell it to use a different config file if you prefer\n##D envstat::edit_envstat(filepath = \"/tmp/config.yml\")\n## End(Not run)\n\n\n"} {"package":"envstat","topic":"sitrep","snippet":"### Name: sitrep\n### Title: Output an environment status situation report\n### Aliases: sitrep\n\n### ** Examples\n\n## Not run: \n##D # By default envstat uses a config file in your home directory\n##D envstat::sitrep()\n##D \n##D # But you can tell it to use a different config file if you prefer\n##D envstat::sitrep(path = \"/tmp/config.yml\")\n##D \n##D # sitrep can also run silently, so that it can be used programatically\n##D envstat::sitrep(silent = TRUE)\n## End(Not run)\n\n\n"} {"package":"envstat","topic":"use_envstat","snippet":"### Name: use_envstat\n### Title: Create a new skeleton configuration file\n### Aliases: use_envstat\n\n### ** Examples\n\n## Not run: \n##D # By default envstat uses a config file in your home directory\n##D envstat::use_envstat()\n##D \n##D # But you can tell it to use a different config file if you prefer\n##D envstat::use_envstat(filepath = \"/tmp/config.yml\")\n##D \n##D # During normal operation use_envstat uses a default config file that ships\n##D # with the package, but you can specify your own if you have one. This can\n##D # be useful in settings such as multi-user server deployments of R, as many\n##D # users could use the same base configuration file and personalise it for\n##D # their own specific use case.\n##D envstat::use_envstat(source = \"/shared/corp_envstat_conf.yml\")\n## End(Not run)\n\n\n"} {"package":"ozmaps","topic":"abs-data","snippet":"### Name: abs-data\n### Title: Australian Bureau of Statistics (ABS) map data\n### Aliases: abs-data abs_ced abs_lga abs_ste\n\n### ** Examples\n\nozmap(\"abs_ste\")\n\nozmap(\"abs_lga\", col = sample(rainbow(nrow(abs_lga), alpha = .4)))\npal <- rainbow(12, alpha = 0.6) ## boring! install paletteer for ochRe palettes\n\nif (isTRUE(requireNamespace(\"paletteer\", quietly = TRUE))) {\n if (utils::packageVersion(\"paletteer\") < '1.0.0') {\n pal <- paletteer::paletteer_d(package = \"ochRe\", palette = \"namatjira_qual\")\n } else {\n pal <- paletteer::paletteer_d(palette = \"ochRe::namatjira_qual\")\n }\n}\nopal <- colorRampPalette(pal)\nozmap(\"abs_ced\", col = opal(30))\n\n\n"} {"package":"ozmaps","topic":"ozmap","snippet":"### Name: ozmap\n### Title: Australia map\n### Aliases: ozmap\n\n### ** Examples\n\nozmap()\nozmap(\"country\", lwd = 6)\nozmap(\"abs_ced\", add = TRUE, border = \"firebrick\") ## commonwealth (national) electoral divisions\n\n\n"} {"package":"ozmaps","topic":"ozmap_data","snippet":"### Name: ozmap_data\n### Title: Australia map data\n### Aliases: ozmap_data\n\n### ** Examples\n\n\ncountry_sf <- ozmap_data(\"country\")\n\n## No test: \n ## can take time to print out\n lga_sf <- ozmap_data(\"abs_lga\")\n lga_sf[1:6, ]\n## End(No test)\n\n\n"} {"package":"Recon","topic":"MRW_steady_state","snippet":"### Name: MRW_steady_state\n### Title: Mankiw-Romer-Weil Growth Model Steady State\n### Aliases: MRW_steady_state\n\n### ** Examples\n\n\nMRW_steady_state(gamma = .005)\n\n\n\n\n"} {"package":"Recon","topic":"cobb_douglas","snippet":"### Name: cobb_douglas\n### Title: Cobb-Douglas Model\n### Aliases: cobb_douglas\n\n### ** Examples\n\n\nI <- c(3, 4, 5)\n\ncobb_douglas(I)\n\n\n\n"} {"package":"Recon","topic":"cobb_douglas_2","snippet":"### Name: cobb_douglas_2\n### Title: 2 inputs Cobb-Douglas Model\n### Aliases: cobb_douglas_2\n\n### ** Examples\n\n\nx <- c(3, 4, 5)\ny <- c(1, 4, 2)\n\ndata <- data.frame(x = x, y = y)\n\ncobb_douglas_2(data)\n\n\n\n\n"} {"package":"Recon","topic":"cournot_solver","snippet":"### Name: cournot_solver\n### Title: Cournot Duopoly with numeric solution\n### Aliases: cournot_solver\n\n### ** Examples\n\n\nd = c(20,-1,0)\ncournot_solver(demand = d)\n\n\n\n"} {"package":"Recon","topic":"grid2","snippet":"### Name: grid2\n### Title: Cartesian coordinates generator\n### Aliases: grid2\n\n### ** Examples\n\n\ngrid2(a = 0, b = 10, c = .1)\n\n\n\n"} {"package":"Recon","topic":"monopoly_solver","snippet":"### Name: monopoly_solver\n### Title: Monopoly Profit Maximization\n### Aliases: monopoly_solver\n\n### ** Examples\n\n\n\nc = c(50, 3, 1)\np = c(500, -8, -1)\nmonopoly_solver(cost = c, demand = p)\n\n\n\n"} {"package":"Recon","topic":"sim_nasheq","snippet":"### Name: sim_nasheq\n### Title: Simultaneous Games Strategies Nash Equilibria\n### Aliases: sim_nasheq\n\n### ** Examples\n\n\na = matrix(c(-8, -10, 0, -1), nrow = 2)\nb = matrix(c(-8, 0, -10, -1), nrow = 2)\nsim_nasheq(a, b)\nsim_nasheq(a, b, \"mixed\")\n\n\n\n"} {"package":"Recon","topic":"solow_steady_state","snippet":"### Name: solow_steady_state\n### Title: Solow Growth Model Steady State\n### Aliases: solow_steady_state\n\n### ** Examples\n\n\nsolow_steady_state()\n\n\n\n\n"} {"package":"Recon","topic":"stackelberg_solver","snippet":"### Name: stackelberg_solver\n### Title: Stackelberg Duopoly with numeric solution\n### Aliases: stackelberg_solver\n\n### ** Examples\n\n\nl = c(100, 4)\nf = c(120, 5)\np = c(300, -10)\nstackelberg_solver(leader = l, follower = f, demand = p)\n\n\n\n"} {"package":"seqimpute","topic":"seqQuickLook","snippet":"### Name: seqQuickLook\n### Title: Numbering NAs and types of gaps among a dataset\n### Aliases: seqQuickLook\n\n### ** Examples\n\ndata(OD)\n\nseqQuickLook(OD=OD, np=1, nf=0)\n\n\n\n"} {"package":"seqimpute","topic":"seqTrans","snippet":"### Name: seqTrans\n### Title: Computing and spotting transitions among a dataset\n### Aliases: seqTrans\n\n### ** Examples\n\ndata(OD)\n\nseqTransList <- seqTrans(OD=OD, trans=c(\"yes->no\"))\n\n\n\n\n"} {"package":"seqimpute","topic":"seqimpute","snippet":"### Name: seqimpute\n### Title: Imputation of missing data in sequence analysis\n### Aliases: seqimpute\n\n### ** Examples\n\n\n# Default single imputation\nRESULT <- seqimpute(OD=OD, np=1, nf=1, nfi=1, npt=1, mi=1)\n\n# Seqimpute used with parallelisation\n## Not run: \n##D RESULT <- seqimpute(OD=OD, np=1, nf=1, nfi=1, npt=1, mi=2, ParExec=TRUE, SetRNGSeed=17,ncores=2)\n## End(Not run)\n\n\n\n"} {"package":"rtson","topic":"fromTSON","snippet":"### Name: fromTSON\n### Title: Deserialize a raw vector\n### Aliases: fromTSON\n\n### ** Examples\n\n## Example\n\nlibrary(rtson)\n\nlist = list(integer=42L,\n double=42,\n bool=TRUE,\n uint8=tson.uint8.vec(c(42,0)),\n uint16=tson.uint16.vec(c(42,0)),\n uint32=tson.uint32.vec(c(42,0)),\n int8=tson.int8.vec(c(42,0)),\n int16=tson.int16.vec(c(42,0)),\n int32=as.integer(c(42,0)),\n float32=tson.float32.vec(c(0.0, 42.0)),\n float64=c(42.0,42.0),\n map=list(x=42, y=42, label=\"42\"),\n list=list(\"42\",42)\n)\n\nbytes = toTSON(list)\nobject = fromTSON(bytes)\n\n\n"} {"package":"rtson","topic":"readTSON","snippet":"### Name: readTSON\n### Title: Deserialize a connection\n### Aliases: readTSON\n\n### ** Examples\n\n## Example\n\nlibrary(rtson)\n\nlist = list(integer=42L,\n double=42,\n bool=TRUE,\n uint8=tson.uint8.vec(c(42,0)),\n uint16=tson.uint16.vec(c(42,0)),\n uint32=tson.uint32.vec(c(42,0)),\n int8=tson.int8.vec(c(42,0)),\n int16=tson.int16.vec(c(42,0)),\n int32=as.integer(c(42,0)),\n float32=tson.float32.vec(c(0.0, 42.0)),\n float64=c(42.0,42.0),\n map=list(x=42, y=42, label=\"42\"),\n list=list(\"42\",42)\n)\n\ncon = rawConnection(raw(0), \"r+\")\nwriteTSON(list, con)\nbytes = rawConnectionValue(con)\nclose(con)\ncon = rawConnection(bytes, \"r\")\nobject = readTSON(con)\n\n\n"} {"package":"rtson","topic":"toTSON","snippet":"### Name: toTSON\n### Title: Serialize a list\n### Aliases: toTSON\n\n### ** Examples\n\n## Example\n\nlibrary(rtson)\n\nlist = list(integer=42L,\n double=42,\n bool=TRUE,\n uint8=tson.uint8.vec(c(42,0)),\n uint16=tson.uint16.vec(c(42,0)),\n uint32=tson.uint32.vec(c(42,0)),\n int8=tson.int8.vec(c(42,0)),\n int16=tson.int16.vec(c(42,0)),\n int32=as.integer(c(42,0)),\n float32=tson.float32.vec(c(0.0, 42.0)),\n float64=c(42.0,42.0),\n map=list(x=42, y=42, label=\"42\"),\n list=list(\"42\",42)\n)\n\nbytes = toTSON(list)\n\n\n"} {"package":"rtson","topic":"writeTSON","snippet":"### Name: writeTSON\n### Title: Serialize a list\n### Aliases: writeTSON\n\n### ** Examples\n\n## Example\n\nlibrary(rtson)\n\nlist = list(integer=42L,\n double=42,\n bool=TRUE,\n uint8=tson.uint8.vec(c(42,0)),\n uint16=tson.uint16.vec(c(42,0)),\n uint32=tson.uint32.vec(c(42,0)),\n int8=tson.int8.vec(c(42,0)),\n int16=tson.int16.vec(c(42,0)),\n int32=as.integer(c(42,0)),\n float32=tson.float32.vec(c(0.0, 42.0)),\n float64=c(42.0,42.0),\n map=list(x=42, y=42, label=\"42\"),\n list=list(\"42\",42)\n)\n\ncon = rawConnection(raw(0), \"r+\")\nwriteTSON(list, con)\nbytes = rawConnectionValue(con)\nclose(con)\ncon = rawConnection(bytes, \"r\")\nobject = readTSON(con)\n\n\n"} {"package":"paleotree","topic":"DiversityCurves","snippet":"### Name: DiversityCurves\n### Title: Diversity Curves\n### Aliases: DiversityCurves taxicDivCont taxicDivDisc phyloDiv\n\n### ** Examples\n\n\n# taxicDivDisc example with the retiolinae dataset\ndata(retiolitinae)\ntaxicDivDisc(retioRanges)\n\n##################################################\n\n# simulation examples\n\n# 07-15-19\n# note that the examples below are weird and rather old\n # the incomplete sampling can now be done\n # with the same function that simulates diversification\n\nset.seed(444)\n\nrecord <- simFossilRecord(\n p = 0.1, \n q = 0.1, \n nruns = 1,\n nTotalTaxa = c(30,40), \n nExtant = 0)\ntaxa <- fossilRecord2fossilTaxa(record)\n\n# let's see what the 'true' diversity curve looks like in this case\n#plot the FADs and LADs with taxicDivCont\ntaxicDivCont(taxa)\n\n# simulate a fossil record with imperfect sampling via sampleRanges\nrangesCont <- sampleRanges(taxa, r = 0.5)\n\n# plot the diversity curve based on the sampled ranges\nlayout(1:2)\ntaxicDivCont(rangesCont)\n# Now let's use binTimeData to bin in intervals of 1 time unit\nrangesDisc <- binTimeData(rangesCont,\n int.length = 1)\n# plot with taxicDivDisc\ntaxicDivDisc(rangesDisc)\n# compare to the continuous time diversity curve\n\nlayout(1)\n# Now let's make a tree using taxa2phylo\ntree <- taxa2phylo(taxa,obs_time = rangesCont[,2])\nphyloDiv(tree)\n\n# a simple example with phyloDiv\n # using a tree from rtree in ape\nset.seed(444)\ntree <- rtree(100)\nphyloDiv(tree)\n\n###########################################################\n\n#a neat example of using phyDiv with timeSliceTree \n #to simulate doing molecular-phylogeny studies \n #of diversification...in the past\n\nset.seed(444)\nrecord <- simFossilRecord(\n p = 0.1, \n q = 0.1, \n nruns = 1,\n nTotalTaxa = c(30,40), \n nExtant = 0)\ntaxa <- fossilRecord2fossilTaxa(record)\ntaxicDivCont(taxa)\n\n#that's the whole diversity curve\n\n#with timeSliceTree we could look at the lineage accumulation curve \n #we'd get of species sampled at a point in time\n\ntree <- taxa2phylo(taxa)\n#use timeSliceTree to make tree of relationships up until time = 950 \ntree950 <- timeSliceTree(tree,\n sliceTime = 950,\n plot = TRUE,\n drop.extinct = FALSE)\n\n#use drop.extinct = TRUE to only get the tree of lineages extant at time = 950\ntree950 <- timeSliceTree(tree,\n sliceTime = 950,\n plot = TRUE,\n drop.extinct = TRUE)\n\n#now its an ultrametric tree with many fewer tips...\n#lets plot the lineage accumulation plot on a log scale\nphyloDiv(tree950, \n plotLogRich = TRUE)\n\n##################################################\n#an example of a 'spiky' diversity curve \n # and why split.int is a good thing\nset.seed(444)\nrecord <- simFossilRecord(\n p = 0.1, \n q = 0.1, \n nruns = 1,\n nTotalTaxa = c(30,40), \n nExtant = 0)\ntaxa <- fossilRecord2fossilTaxa(record)\n\ntaxaDiv <- taxicDivCont(taxa)\n\n#simulate a fossil record with imperfect sampling with sampleRanges\nrangesCont <- sampleRanges(taxa, r = 0.5)\nrangesDisc <- binTimeData(rangesCont,\n int.length = 10)\n\n#now let's plot with taxicDivDisc\n # but with the intervals from taxaDiv\n # by default, split.int = TRUE\n\ntaxicDivDisc(rangesDisc,\n int.times = taxaDiv[,1:2],\n split.int = TRUE)\n\n#look pretty!\n\n#now let's turn off split.int\ntaxicDivDisc(rangesDisc,\n int.times = taxaDiv[,1:2],\n split.int = FALSE)\n#looks 'spiky'!\n\n\n\n"} {"package":"paleotree","topic":"RaiaCopesRule","snippet":"### Name: RaiaCopesRule\n### Title: Dated Trees and Trait Data for Ammonites, Ceratopsians and\n### Cervids from Raia et al. 2015\n### Aliases: RaiaCopesRule ceratopsianTreeRaia cervidTreeRaia\n### ammoniteTreeRaia sutureComplexity shellSize ammoniteTraitsRaia\n### Keywords: datasets\n\n### ** Examples\n\ndata(RaiaCopesRule)\n\n# plotting trees\nplot(ladderize(ammoniteTreeRaia));axisPhylo()\n\nplot(ceratopsianTreeRaia);axisPhylo()\n\nplot(cervidTreeRaia);axisPhylo()\n\n# plotting traitgrams for ammonites\nplotTraitgram(tree = multi2di(ammoniteTreeRaia), trait = sutureComplexity,\n\t conf.int = FALSE, main = \"Ammonite Suture Complexity\")\n\nplotTraitgram(tree = multi2di(ammoniteTreeRaia), trait = shellSize,\n\t conf.int = FALSE, main = \"Ammonite Shell Diameter\")\n\n##############################################################################################\n\n# The data set was generated by sourcing the following script:\n\t \n## No test: \n\nlibrary(paleotree)\n\n# Let's read in the trees from Raia et al 2015, AmNat\n\t# following is taken from their supplemental appendix, available at AmNat\n# they all appear to be trees dated to the last appearance times\n\t# *and* specifically the end-boundary of the interval containing the last appearance\n\n#########################################\n# ammonite genera\n\nammoniteTreeRaia <- paste0(\"(((((Araxoceras:4,Eoaraxoceras:4)Araxoceratidae:26.5,Pseudasp\",\n \"idites:33.199997,Dieneroceras:37.300003,(Tardicolumbites:13.000015,Cowboyiceras:13.000023)\",\n \"Dinaritaceae:24.299988,Grambergia:42.5,(Amphipopanoceras:6, Megaphyllites:46.399994)Megaph\",\n \"yllitaceae:36.5,(Proteusites:11,Nathorstites:21)Nathorstitaceae:31.5,(Inyoites:7,Lanceolit\",\n \"es:7,Parussuria:7)Noritaceae:30.300003,(((Placites:66.700012,((Acrochordiceras:10.199997, \",\n \"Bradyia:10.199997,Globacrochordiceras:5,Paracrochordiceras:10.199997)Acrochordiceratidae:9\",\n \".000015,Balatonites:19.200012,(Favreticeras:10,Guexites:10,Gymnotoceras:10)Beyrichitidae:9\",\n \".200012, Eogymnotoceras:19.200012,Goricanites:14.000015)Ceratitaceae:7.100006)clade_16:7.0\",\n \"99976, (((Gaudemerites:13.000015,(Owenites:9.000015,Prosphingites:9.000015)Paranannitidae:\",\n \"4,Meekoceras:13.000015, Arctoceras:13.000015)Meekoceratoidea:5.06665,(((Riedelites:85.6000\",\n \"06,((((Berriasella:15.399994, (Polyptychites:20.399994,Surites:14)Polyptychitidae:1.399994\",\n \")clade_32:1.400002,Bodrakiceras:20.300003, Busnardoites:16.800003,Campylotoxia:20.300003,K\",\n \"arakaschiceras:23.199997,Luppovella:16.800003,Malbosiceras:13, Pomeliceras:13.399994)Neoco\",\n \"mitidae:21.199997,(Otohoplites:8.199997,Sonneratia:4.5,Anadesmoceras:4.5, Anahoplites:20,A\",\n \"rcthoplites:8.199997,Cleoniceras:8.199997,Dimorphoplites:8.199997,Epihoplites:20, Gastropl\",\n \"ites:13.900002,Grycia:13.900002,Hoplites:13.900002)Hoplitidae:60.899994)clade_29:20.200005\",\n \", (Engonoceras:20.400002,(Knemiceras:16.400002,Parengonoceras:7,Platiknemiceras:7)Knemicer\",\n \"atidae:4) Engonoceratoidea:74.600006,(((Glochiceras:11,(Aconeceras:36.799995,Falciferella:3\",\n \"5.899994,Protaconeceras:7, Sanmartinoceras:24.369995)Oppeliidae:25.400009)Haplocerataceae:\",\n \"15.775009,(((Mortoniceras:16.300003, Oxytropidoceras:14)Brancoceratidae:27.633331,((Parado\",\n \"lphia:12.700005,Stoliczkaia:18.800003,Tegoceras:7) Lyelliceratidae:7.566666,((Borissiakoce\",\n \"ras:10.5,Mammites:7,Mantelliceras:12.800003)Acanthoceratidae:11.783333, (Neoptychites:6,Va\",\n \"scoceras:6)Vascoceratidae:12.783333)clade_49:11.783333)clade_47:7.566666)clade_45:7.566666\",\n \", (Epileymeriella:5,Leymeriella:11.099998)Leymeriellidae:30.400002,(Beudanticeras:44.03332\",\n \"5, Burckhardtites:21.303329,(Barremites:1.666672,Desmoceras:48.166672)clade_55:1.666656, P\",\n \"seudohaploceras:21.303329,Pseudosaynella:21.303329,Pseudosilesites:21.303329,(Puzosia:56.6\",\n \"50002, (Forbesiceras:27.666664,(Melchiorites:6.083328,Uhligella:15.48333)clade_58:6.083336\",\n \")clade_57:6.083336) clade_56:6.083328,Valdedorsella:33.633331,Zuercherella:33.73333)Desmoc\",\n \"eratidae:1.666667) Acanthocerataceae:42.575012)clade_39:15.774994,((Coroniceras:1.25,(Mega\",\n \"tyloceras:76.203336, (Zugodactylites:10.016663,Amaltheus:2.616669)Eoderocerataceae:2.61666\",\n \"9)clade_61:2.616669)clade_60:1.25, Oxynoticeras:9.100006)Psilocerataceae:1.25)clade_38:1.2\",\n \"5)Ammonitina:4,((Saghalinites:14,Tetragonites:14) Tetragonitidae:22,(Eogaudryceras:4,Gaudr\",\n \"yceras:32,Zelandites:32)Gaudryceratidae:4)Tetragonitoidea:97.100006, (Costidiscus:12.00000\",\n \"8,Macroscaphites:34.860008)Macroscaphitidae:64.139999)Ammonitida:30.222214, (Ammonitoceras\",\n \":98.570007,Argonauticeras:98.570007,Audaxlytoceras:27.600006,Holcolytoceras:21, (Eulytocer\",\n \"as:65.713333,Jaubertella:78.043335)clade_84:32.85667,(Ectocentrites:9.433334,(Adnethiceras\",\n \":8.166656, Galaticeras:14.766663)clade_87:8.166672,((Protetragonites:56.933334,Lytoceras:5\",\n \"0.833336)clade_89:50.833344, Pleuroacanthites:4.666672)clade_88:4.666656)Pleuroacanthitida\",\n \"e:4.666667,Pterolytoceras:65.100006) Psiloceratida:18.222214)clade_26:18.222229,((Juraphyl\",\n \"lites:6,Nevadaphyllites:6,Togaticeras:6, Tragophylloceras:12.600006)Juraphyllitidae:6,Hypo\",\n \"rbulites:107.300003,(Adabofoloceras:25.400009, Hypophylloceras:121.100006,Ptychophyllocera\",\n \"s:56.600006,Salfeldiella:56.600006,Holcophylloceras:61.150009, Phylloceras:121.100006,Leio\",\n \"phylloceras:46.800003)Phylloceratidae:15)Phylloceratida:45.444443)clade_25:18.222214) clad\",\n \"e_22:5.066681,(Paranannites:11.566666,(Proarcestes:8.383331,Ptychites:8.383331)clade_94:8.\",\n \"383331) clade_93:11.566681)clade_21:5.06665)clade_15:7.100006,(Deweveria:33.300003,Juvenit\",\n \"es:33.300003,(Cibolites:11.5, Kingoceras:22.5,Meitianoceras:24.199997,Paraceltites:4)Parac\",\n \"eltitidae:4,Preflorianites:33.300003, Xenodiscus:33.300003)Xenodiscoidea:2)clade_14:2,Cart\",\n \"eria:37.300003,Courtilloticeras:37.300003, Eschericeratites:37.300003,Tapponnierites:37.30\",\n \"0003)Ceratitida:101.025024,(((Daraelites:76.399994, Epicanites:23.299988,Praedaraelites:15\",\n \")Daraelitidae:28.300018,(Becanites:19.900024,Dombarocanites:47.100006, Eocanites:19.900024\",\n \",Merocanites:22.400024,Prolecanites:4.5)Prolecanitidae:4.5)Prolecanitina:1,((Neopronorites\",\n \":7, Sakmarites:14)Pronoritidae:17.5,(Artinskia:4.5,Bamyaniceras:34.5,Medlicottia:40.5,Prop\",\n \"inacoceras:31.5,Synartinskia:20, Uddenites:4.5)Medlicottiidae:4.5)Medlicottiina:66.700012)\",\n \"Prolecanitida:14.825012)clade_3:14.824982, (((Raymondiceras:6,(Dimeroceras:5,Paratornocera\",\n \"s:5)Dimeroceratidae:5)Dimerocerataceae:10,((Acrocanites:7, Jdaidites:7)Acrocanitidae:16.40\",\n \"0024,Kazakhstania:25.900024,Praeglyphiloceras:8,(Imitoceras:10.900024,Prionoceras:4, Triim\",\n \"itoceras:19.400024)Prionoceratidae:4,(Maeneceras:1,Sporadoceras:4)Sporadoceratidae:4)Prion\",\n \"ocerataceae:12, (Pseudoclymenia:5,(Discoclymenia:4.5,Posttornoceras:4.5)Posttornoceratidae\",\n \":4.5)Tornocerataceae:11)Tornoceratina:10, (Popanoceras:117.533356,((Epitornoceras:28,Falci\",\n \"tornoceras:28,Lobotornoceras:6.300018,Protornoceras:5, Tornoceras:18.100006)Tornoceratidae\",\n \":0.666656,(Cheiloceras:13,Torleyoceras:13)Cheiloceratidae:15.666656, Polonoceras:28.666656\",\n \")Cheilocerataceae:0.666687,((((Kargalites:30.344452,(Adrianites:22.5,Nevadoceras:11, Veruz\",\n \"hites:11)Adrianitidae:19.344452)clade_122:19.344452,Pintoceras:25.78891)clade_121:25.78887\",\n \"9, ((Waagenoceras:53.14447,((Metalegoceras:9,Pericycloceras:12)Metalegoceratidae:9.5,Uralo\",\n \"ceras:14) Neoicoceratoidea:25.64447)clade_125:25.64444,((Branneroceras:3,Diaboloceras:10.6\",\n \"49994,Paralegoceras:63.600006, Schistoceras:38.100006)Schistoceratidae:3,(Wellerites:10.64\",\n \"9994,Winslowoceras:3)Welleritidae:3) Schistocerataceae:17.188904)clade_124:17.188873)clade\",\n \"_120:17.188904,(Antegoniatites:9,Habadraites:9, Primogoniatites:9,Progoniatites:9)Goniatit\",\n \"idae:17.866699)clade_119:17.866669,(Dzhaprakoceras:23,(Follotites:18.5, Muensteroceras:10,\",\n \"Xinjiangites:10)Muensteroceratidae:2,(Ammonellipsites:12.5,Helicocyclus:10,Nodopericyclus:\",\n \"10, Ouaoufilalites:10,Pericyclus:10)Pericyclidae:10.5,(Eurites:10.25,Mouydiria:10.25,Rotop\",\n \"ericyclus:10.25) Rotopericlydae:10.25,(Jerania:6,Kusinia:6,Temertassetia:6)Temertassetiida\",\n \"e:14.5)Pericyclaceae:24.233368, Stacheoceras:136.033356)Goniatitina:0.666667)Goniatitida:9\",\n \".649994)clade_2:9.649994,(Gyroceratites:14.799988, ((Teicherticeras:8.93335,((((Probelocer\",\n \"as:36,(Timanites:12.600006,(Darkaoceras:2.5,Keuppites:2.5)Taouzitidae:2.5, (Gogoceras:10.1\",\n \"00006,Pseudoprobeloceras:2.5)Ponticeratidae:2.5,(Beloceras:9,Mesobeloceras:9) Beloceratida\",\n \"e:3.600006,(Archoceras:23.399994,Manticoceras:8,Mixomanticoceras:8,Sphaeromanticoceras:8) \",\n \"Gephuroceratidae:4.600006)Gephurocerataceae:8)Gephuroceratatina:1.033325,Agoniatites:14.03\",\n \"3325) clade_144:1.033325,Celaeceras:6.866669)clade_143:1.033325,((Werneroceras:0.399994,(S\",\n \"obolewia:0.200012, (((Cyrtoclymenia:2.5,Clymenia:2.5)Clymeniina:2.5,Protoxyclymenia:5,Plat\",\n \"yclymenia:5)Clymeniida:21.066681, (Lunupharciceras:1.533325,Pharciceras:9.133331,Stenophar\",\n \"ciceras:1.533325,Synpharciceras:1.533325) Pharciceratatina:1.533356)clade_154:1.533325)cla\",\n \"de_153:0.199982)clade_152:0.200002,Anarcestes:5) Anarcestina:11.099976)clade_142:1.033356)\",\n \"clade_141:1.033325,Anetoceras:9.966675)clade_140:1.03333) Agoniatitida:7.100006)clade_1;\")\n\nammoniteTreeRaia <- read.tree(text = ammoniteTreeRaia)\n\n# what about the root age?\n# Raia et al. are unclear\n # however... ahandful of taxa are known to last occur at the end-Cretaceous mass ext\n # Phylloceras\n#\n# Latest occurring tips are:\nammoniteTreeRaia$tip.label[\n which(node.depth.edgelength(ammoniteTreeRaia) == max(node.depth.edgelength(ammoniteTreeRaia)))]\n#\n# so we can treat distance of Phylloceras from root + end Cretaceous (66.043 Ma) as $root.time\n(ammoniteTreeRaia$root.time <- 66.043+\n node.depth.edgelength(ammoniteTreeRaia)[which(ammoniteTreeRaia$tip.label == \"Phylloceras\")])\n\n# now let's plot it\nplot(ladderize(ammoniteTreeRaia));axisPhylo()\n\n## End(No test)\n## Not run: \n##D \n##D # and let's load trait data from Raia et al. Appendix B:\n##D # FD = fractal dimension of first suture (suture complexity)\n##D # Log D = log of the mean shell diameter per genus (body size)\n##D # log dur = log of the stratigraphic duration in million years.\n##D ammoniteTraitsRaia <- read.table(\"ammoniteTraitsRaia.txt\",row.names = 1,header = TRUE)\n##D \n##D sutureComplexity <- ammoniteTraitsRaia$FD\n##D shellSize <- ammoniteTraitsRaia$Log_D\n##D names(shellSize) <- names(sutureComplexity) <- rownames(ammoniteTraitsRaia)\n##D \n##D plotTraitgram(tree = multi2di(ammoniteTreeRaia), trait = sutureComplexity,\n##D conf.int = FALSE, main = \"Ammonite Suture Complexity\")\n##D \n##D plotTraitgram(tree = multi2di(ammoniteTreeRaia), trait = shellSize,\n##D conf.int = FALSE, main = \"Ammonite Shell Diameter\")\n##D \n## End(Not run)\n## No test: \n########################################\n# ceratopsian species\n\nceratopsianTreeRaia <- paste0(\"((((((((((((Centrosaurus_apertus:5.1,Styracosaurus_alberte\",\n \"nsis:5.9):1,(((Pachyrhinosaurus_perotorum:10.5,Pachyrhinosaurus_lakustai:7):0.5,Achelousau\",\n \"rus_horneri:6.3):0.5,Einiosaurus_procurvicornis:6.5):1):0.5, Avaceratops_lammersi:5.5):0.5\",\n \",Diabloceratops_eatoni:3):1.1,((Chasmosaurus_russelli:1.4,Chasmosaurus_belli:1.6):2.5, (Mo\",\n \"joceratops_perifania:3.7,(Agujaceratops_mariscalensis:1.9,((Pentaceratops_sternbergii:3.5,\",\n \" Utahceratops_gettyi:1):1.5,((Vagaceratops_irvinensis:1.3,Kosmoceratops_richardsoni:1):0.4\",\n \",(Anchiceratops_ornatus:3.9, (Arrhinoceratops_brachyops:3.9,(Torosaurus_latus:3,(Tricerato\",\n \"ps_horridus:2, Triceratops_prorsus:2):1):6):0.5):1.7):1):0.5):0.5):0.5):3.8):12.9,(Bagacer\",\n \"atops_rozhdestvenskyi:17, (Protoceratops_hellenikorhinus:9.5,Protoceratops_andrewsi:9.5):1\",\n \"2):4.5):6,(Prenoceratops_pieganensis:21, Leptoceratops_gracilis:31.6):4.5):7.5,Archaeocera\",\n \"tops_oshimai:6):5,Auroraceratops_rugosus:15):21, Liaoceratops_yanzigouensis:6):4,(Hongshan\",\n \"osaurus_houi:9,(Psittacosaurus_mongoliensis:33.5, (Psittacosaurus_meileyingensis:20,(Psitt\",\n \"acosaurus_major:7.5,(Psittacosaurus_gobiensis:21,(Psittacosaurus_sinensis:24, Psittacosaur\",\n \"us_neimongoliensis:18):1):1.5):0.5):0.5):0.5):1):23,Yinlong_downsi:6):3;\")\n\nceratopsianTreeRaia <- read.tree(text = ceratopsianTreeRaia)\n\n# Raia et al. placed origin of ceratopsians at ~163 Ma, base of Oxfordian\nceratopsianTreeRaia$root.time <- 163\n\nplot(ceratopsianTreeRaia);axisPhylo()\n\n###############################################\n# cervid species\n\ncervidTreeRaia <- paste0(\"((((Lagomeryx_parvulus:9.925998,Lagomeryx_pumilio:10.775998):3.\",\n \"25,(Procervulus_flerovi:11.425998,Procervulus_dichotomus:7.025998,Procervulus_praelucidus:\",\n \"5.675998):3.25,(Stephanocemas_aralensis:6.925998, Stephanocemas_thomsoni:11.175998):2):2,(\",\n \"((Euprox_furcatus:14.440997,Euprox_minimus:12.590997, Euprox_dicranoceros:14.190997):2.185\",\n \"001,Heteroprox_larteti:12.175998):1.5,Muntiacus_muntjak:25.531498):1.5):1.5, (((((Alces_la\",\n \"tifrons:7.151589758,Alces_alces:7.245998):2.29,Cervalces_scotti:9.525768):6.64, Rangifer_t\",\n \"arandus:16.175998):4.35,(Procapreolus_loczyi:17.840998,Capreolus_capreolus:17.905998):2.62\",\n \"5):5.25, (((Cervavitus_novorossiae:9.109332,Cervavitus_variabilis:9.379332):7.149999, Plio\",\n \"cervus_pentelici:13.069331):2.966667,(((((Dama_clactoniana:5.133775345,Dama_dama:5.199332)\",\n \":2.903333, (Pseudodama_farnetensis:5.860846548,Pseudodama_lyra:4.242887928, Pseudodama_nes\",\n \"tii:5.762011259):2.083333):4.166667,(Eucladoceros_ctenoides:6.892665, Eucladoceros_dicrani\",\n \"os:7.692563015):4.166667):2.083333,((Cervus_elaphus:5.734332,Cervus_nippon:5.744332, Rusa_\",\n \"timorensis:5.740332,Rusa_unicolor:5.744332,Cervus_duvaucelii:5.671332):3.4, Croizetoceros_\",\n \"ramosus:7.834332):5.208333):2.083333,((Praemegaceros_verticornis:9.610727017, (Megaceroide\",\n \"s_obscurus:6.084504245,Megaceroides_solilhacus:6.725161676):2.883334):2.883333, (Megalocer\",\n\t\"os_savini:7.349060017,Megaloceros_giganteus:7.430999):5.145):3.849999):6.6):2.75):2.75);\")\n\ncervidTreeRaia <- read.tree(text = cervidTreeRaia)\n\n# Many of the latest-occurring tips are still extant, like Rusa unicolor and Dama dama:\ncervidTreeRaia$tip.label[\n\twhich(node.depth.edgelength(cervidTreeRaia) == max(node.depth.edgelength(cervidTreeRaia)))]\n\n# note!\n# if you plot the tree there seem to be a lot more taxa that are *almost* as late-occurring\n\t# unclear if this is recently extinct taxa, computational rounding error, or what\n\n# so we can treat distance of Dama dama to root as $root.time\n(cervidTreeRaia$root.time <- \n\tnode.depth.edgelength(cervidTreeRaia)[which(cervidTreeRaia$tip.label == \"Dama_dama\")])\n\nplot(cervidTreeRaia);axisPhylo()\n\n## End(No test)\n## Not run: \n##D \n##D save.image(\"RaiaCopesRule.rdata\")\n##D \n## End(Not run)\n\n\n\n"} {"package":"paleotree","topic":"SamplingConv","snippet":"### Name: SamplingConv\n### Title: Converting Sampling Estimates\n### Aliases: SamplingConv sProb2sRate sRate2sProb pqsRate2sProb qsProb2Comp\n### qsRate2Comp\n\n### ** Examples\n\n\nsRate2sProb(r = 0.5)\nsProb2sRate(R = 0.1)\npqsRate2sProb(r = 0.5,p = 0.1,q = 0.1)\n\n# different modes can be tried\nqsProb2Comp(R = 0.1,q = 0.1,mode = \"budding\")\nqsProb2Comp(R = 0.1,q = 0.1,mode = \"bifurcating\")\n\nqsRate2Comp(r = 0.1,q = 0.1)\n\n\n"} {"package":"paleotree","topic":"SongZhangDicrano","snippet":"### Name: SongZhangDicrano\n### Title: Cladistic Data for Dicranograptid Graptolites from Song and\n### Zhang (2014)\n### Aliases: SongZhangDicrano charMatDicrano cladogramDicranoX12\n### cladogramDicranoX13\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(SongZhangDicrano)\n\n# Examining morphospace with a distance matrix\n\n# calculate a distance matrix from the morph character data\nchar <- charMatDicrano[,-22]\t# remove strat character\ncharDist <- matrix(,nrow(char),nrow(char))\nrownames(charDist) <- colnames(charDist) <- rownames(char)\nfor(i in 1:nrow(char)){for(j in 1:nrow(char)){\n\tcharDiff <- logical()\n\tfor(k in 1:ncol(char)){\n\t\tselectPair <- char[c(i,j),k]\n\t\tif(all(!is.na(selectPair))){\n\t\t\t#drop states that are missing\t\t\t\n\t\t\tisSame <- identical(selectPair[1],selectPair[2])\n\t\t\tcharDiff <- c(charDiff,isSame)\n\t\t\t}\n\t\t}\n\tcharDist[i,j] <- 1-sum(charDiff)/length(charDiff)\n\t}}\n\n#####\n# PCO of character distance matrix\n\n#can apply PCO (use lingoes correction to account for negative values\n #resulting from non-euclidean matrix\npco_res <- pcoa(charDist,correction = \"lingoes\")\n\n#relative corrected eigenvalues\nrel_corr_eig <- pco_res$values$Rel_corr_eig\nlayout(1:2)\nplot(rel_corr_eig)\n#cumulative\nplot(cumsum(rel_corr_eig))\n\n#well let's look at those PCO axes anyway\nlayout(1)\npco_axes <- pco_res$vectors\nplot(pco_axes[,1],pco_axes[,2],pch = 16,\n xlab = paste(\"PCO Axis 1, Rel. Corr. Eigenvalue = \",round(rel_corr_eig[1],3)),\n ylab = paste(\"PCO Axis 2, Rel. Corr. Eigenvalue = \",round(rel_corr_eig[2],3)))\n\n#######\n\n# plot 12 taxon majority rule tree from Song and Zhang\nplot(cladogramDicranoX12,\n\tmain = \"MajRule_24charX12Taxa_wBiostratChar\")\n\n# plot 13 taxon MPT\nplot(cladogramDicranoX13,\n\tmain = \"MPT_24charX13Taxa_wBiostratChar\")\n\n##############\n\n## Not run: \n##D # Data was generated with following script:\n##D require(ape)\n##D require(phylobase)\n##D \n##D charMatDicrano <- readNexus(file.choose(),type = \"data\",SYMBOLS = \" 0 1 2\")\n##D \n##D cladogramDicranoX12 <- read.tree(file.choose())\n##D cladogramDicranoX13 <- read.nexus(file.choose())\n##D \n##D cladogramDicranoX13$tip.label <- rownames(\n##D \t charMatDicrano)[c(13,8,7,9,12,10,1,4,6,2,3,11,5)]\n##D \n##D save(charMatDicrano,cladogramDicranoX12,file = \"SongZhangDicrano.rdata\")\n## End(Not run)\n\n\n"} {"package":"paleotree","topic":"binTimeData","snippet":"### Name: binTimeData\n### Title: Bin Simulated Temporal Ranges in Discrete Intervals\n### Aliases: binTimeData\n\n### ** Examples\n\n\n# Simulate some fossil ranges with simFossilRecord\nset.seed(444)\nrecord <- simFossilRecord(p = 0.1, \n q = 0.1, \n nruns = 1,\n nTotalTaxa = c(30,40), \n nExtant = 0\n )\ntaxa <- fossilRecord2fossilTaxa(record)\n# simulate a fossil record with imperfect sampling via sampleRanges\nrangesCont <- sampleRanges(taxa,r = 0.5)\n# Now let's use binTimeData() to bin in intervals of 1 time unit\nrangesDisc <- binTimeData(rangesCont,int.length = 1)\n# plot with taxicDivDisc()\nequalDiscInt <- taxicDivDisc(rangesDisc)\n\n# example with pre-set intervals input (including overlapping)\npresetIntervals <- cbind(\n c(1000, 990, 970, 940),\n c(980, 970, 950, 930)\n )\nrangesDisc1 <- binTimeData(rangesCont,\n int.times = presetIntervals)\n\n# plot the diversity curve with these uneven bins\ntaxicDivDisc(rangesDisc1)\n\n# now let's plot the diversity from these unequal-length bins\n # with the original equal length intervals from above\ntaxicDivDisc(rangesDisc1, int.times = equalDiscInt[,1:2])\n\n\n####################################\n#example with extant taxa\nset.seed(444)\nrecord <- simFossilRecord(p = 0.1, \n q = 0.1, \n nruns = 1,\n nTotalTaxa = c(30,40)\n )\ntaxa <- fossilRecord2fossilTaxa(record)\n# simulate a fossil record \n # with imperfect sampling via sampleRanges\nrangesCont <- sampleRanges(\n taxa, r = 0.5,\n modern.samp.prob = 1)\n# Now let's use binTimeDat to bin into intervals of 1 time-unit\nrangesDisc <- binTimeData(rangesCont,\n int.length = 1)\n# plot with taxicDivDisc()\ntaxicDivDisc(rangesDisc)\n\n# example with pre-set intervals input\n # (including overlapping)\npresetIntervals <- cbind(\n c(40, 30, 20, 10),\n c(30, 20, 10, 0)\n )\nrangesDisc1 <- binTimeData(rangesCont,\n int.times = presetIntervals)\n \ntaxicDivDisc(rangesDisc1)\n\n\n\n"} {"package":"paleotree","topic":"branchClasses","snippet":"### Name: branchClasses\n### Title: Partitions the branch lengths of a tree into several classes\n### based on their placement.\n### Aliases: branchClasses\n\n### ** Examples\n\n#simulated example\nset.seed(444)\nrecord <- simFossilRecord(\n p = 0.1, \n q = 0.1, \n nruns = 1,\n nTotalTaxa = c(30,40), \n nExtant = c(10,20)\n )\ntaxa <- fossilRecord2fossilTaxa(record)\ntree <- taxa2phylo(taxa)\nbrlenRes <- branchClasses(tree)\n\n#see frequency histograms of branch lengths\nlayout(1:4)\nfor(x in 1:length(brlenRes)){ \n\thist(\n\t brlenRes[[x]],\n\t main = \"Branch Lengths\",\n\t xlab = names(brlenRes)[x])\n\t}\n\n#see frequency histograms of branch depths\nlayout(1:4)\nfor(x in 1:length(brlenRes)){ \n\thist(\n\t as.numeric(names(brlenRes[[x]])),\n\t main = \"Branch Depths\",\n\t xlab = names(brlenRes)[x])\n\t}\n\nlayout(1)\n\n\n"} {"package":"paleotree","topic":"cal3TimePaleoPhy","snippet":"### Name: cal3TimePaleoPhy\n### Title: Three Rate Calibrated _a posteriori_ Dating of Paleontological\n### Phylogenies\n### Aliases: cal3TimePaleoPhy bin_cal3TimePaleoPhy cal3\n\n### ** Examples\n\n\n# Simulate some fossil ranges with simFossilRecord\nset.seed(444)\nrecord <- simFossilRecord(p = 0.1,\n q = 0.1,\n nruns = 1,\n\t nTotalTaxa = c(30,40),\n nExtant = 0)\ntaxa <- fossilRecord2fossilTaxa(record)\n\n# simulate a fossil record with imperfect sampling with sampleRanges\nrangesCont <- sampleRanges(taxa,\n r = 0.5)\n# let's use taxa2cladogram to get the 'ideal' cladogram of the taxa\ncladogram <- taxa2cladogram(taxa,\n plot = TRUE)\n\n# this package allows one to use\n # rate calibrated type time-scaling methods (Bapst, 2014)\n# to use these, we need an estimate of the sampling rate \n # (we set it to 0.5 above)\nlikFun <- make_durationFreqCont(rangesCont)\nsrRes <- optim(\n parInit(likFun),\n likFun,\n lower = parLower(likFun),\n upper = parUpper(likFun),\n method = \"L-BFGS-B\",\n control = list(maxit = 1000000))\nsRate <- srRes[[1]][2]\n\n# we also need extinction rate and branching rate\n # we can get extRate from getSampRateCont too\n# we'll assume extRate = brRate (ala Foote et al., 1999)\n # this may not always be a good assumption!\ndivRate <- srRes[[1]][1]\n\n# now let's try cal3TimePaleoPhy\n # which time-scales using a sampling rate to calibrate\n# This can also resolve polytomies based on\n # sampling rates, with some stochastic decisions\nttree <- cal3TimePaleoPhy(\n cladogram,\n rangesCont,\n brRate = divRate,\n extRate = divRate,\n sampRate = sRate,\n ntrees = 1,\n plot = TRUE)\n \n# notice the warning it gives!\nphyloDiv(ttree)\n\n# by default, cal3TimePaleoPhy may predict indirect ancestor-descendant relationships\n # can turn this off by setting anc.wt = 0\nttree <- cal3TimePaleoPhy(\n cladogram,\n rangesCont,\n brRate = divRate,\n extRate = divRate,\n sampRate = sRate,\n ntrees = 1,\n anc.wt = 0,\n plot = TRUE)\n\n## No test: \n# let's look at how three trees generated\n # with very different time of obs. look\n \nttreeFAD <- cal3TimePaleoPhy(\n cladogram, \n rangesCont,\n brRate = divRate,\n extRate = divRate,\n FAD.only = TRUE,\n dateTreatment = \"firstLast\",\n sampRate = sRate,\n ntrees = 1,\n plot = TRUE)\n \nttreeRand <- cal3TimePaleoPhy(\n cladogram, \n rangesCont,\n brRate = divRate,\n extRate = divRate,\n FAD.only = FALSE,\n dateTreatment = \"randObs\",\n sampRate = sRate,\n ntrees = 1,plot = TRUE)\n \n# by default the time of observations are the LADs\nttreeLAD <- cal3TimePaleoPhy(\n cladogram, \n rangesCont,\n brRate = divRate,\n extRate = divRate,\n FAD.only = FALSE,\n dateTreatment = \"randObs\",\n sampRate = sRate,\n ntrees = 1,\n plot = TRUE)\n\n# and let's plot\nlayout(1:3)\nparOrig <- par(no.readonly = TRUE)\npar(mar = c(0,0,0,0))\nplot(ladderize(ttreeFAD));text(5,5,\n \"time.obs = FAD\",\n cex = 1.5, pos = 4)\nplot(ladderize(ttreeRand));text(5,5,\n \"time.obs = Random\",\n cex = 1.5, pos = 4)\nplot(ladderize(ttreeLAD));text(5,5,\n \"time.obs = LAD\",\n cex = 1.5, pos = 4)\nlayout(1)\npar(parOrig)\n\n# to get a fair sample of trees\n # let's increase ntrees\n \nttrees <- cal3TimePaleoPhy(\n cladogram,\n rangesCont,\n brRate = divRate,\n extRate = divRate,\n sampRate = sRate,\n ntrees = 9,\n plot = FALSE)\n \n# let's compare nine of them at once in a plot\n \nlayout(matrix(1:9,3,3))\nparOrig <- par(no.readonly = TRUE)\npar(mar = c(0,0,0,0))\nfor(i in 1:9){\n plot(ladderize(ttrees[[i]]),\n show.tip.label = FALSE)\n }\nlayout(1)\npar(parOrig)\n# they are all a bit different!\n\n# can plot the median diversity curve with multiDiv\nmultiDiv(ttrees)\n\n# using node.mins\n# let's say we have (molecular??) evidence that\n # node (5) is at least 1200 time-units ago\n# to use node.mins, first need to drop any unshared taxa\ndroppers <- cladogram$tip.label[is.na(\n match(cladogram$tip.label,\n names(which(!is.na(rangesCont[,1])))\n )\n )\n ]\n \n# and then drop those taxa\ncladoDrop <- drop.tip(cladogram, droppers)\n \n# now make vector same length as number of nodes\nnodeDates <- rep(NA, Nnode(cladoDrop))\nnodeDates[5] <- 1200\nttree <- cal3TimePaleoPhy(cladoDrop,\n rangesCont,\n brRate = divRate,\n extRate = divRate,\n sampRate = sRate,\n ntrees = 1,\n node.mins = nodeDates,\n plot = TRUE)\n\n# example with time in discrete intervals\nset.seed(444)\nrecord <- simFossilRecord(p = 0.1,\n q = 0.1,\n nruns = 1,\n nTotalTaxa = c(30,40),\n nExtant = 0)\ntaxa <- fossilRecord2fossilTaxa(record)\n# simulate a fossil record\n # with imperfect sampling with sampleRanges\nrangesCont <- sampleRanges(taxa,r = 0.5)\n# let's use taxa2cladogram to get the 'ideal' cladogram of the taxa\ncladogram <- taxa2cladogram(taxa,plot = TRUE)\n# Now let's use binTimeData to bin in intervals of 1 time unit\nrangesDisc <- binTimeData(rangesCont,int.length = 1)\n \n# we can do something very similar for\n # the discrete time data (can be a bit slow)\nlikFun <- make_durationFreqDisc(rangesDisc)\nspRes <- optim(\n parInit(likFun),\n likFun,\n lower = parLower(likFun),\n upper = parUpper(likFun),\n method = \"L-BFGS-B\",\n control = list(maxit = 1000000))\nsProb <- spRes[[1]][2]\n \n# but that's the sampling PROBABILITY per bin\n # NOT the instantaneous rate of change\n \n# we can use sProb2sRate() to get the rate\n # We'll need to also tell it the int.length\nsRate1 <- sProb2sRate(sProb,int.length = 1)\n \n# we also need extinction rate and branching rate (see above)\n # need to divide by int.length...\ndivRate <- spRes[[1]][1]/1\n \n# estimates that r = 0.3... \n # that's kind of low (simulated sampling rate is 0.5)\n# Note: for real data, you may need to use an average int.length \n # (i.e. if intervals aren't all the same duration)\nttree <- bin_cal3TimePaleoPhy(cladogram,\n rangesDisc,\n brRate = divRate,\n extRate = divRate,\n sampRate = sRate1,\n ntrees = 1,\n plot = TRUE)\nphyloDiv(ttree)\n \n# can also force the appearance timings\n # not to be chosen stochastically\nttree1 <- bin_cal3TimePaleoPhy(cladogram,\n rangesDisc,\n brRate = divRate,\n extRate = divRate,\n sampRate = sRate1,\n ntrees = 1,\n nonstoch.bin = TRUE,\n plot = TRUE)\nphyloDiv(ttree1)\n\n# testing node.mins in bin_cal3TimePaleoPhy\nttree <- bin_cal3TimePaleoPhy(cladoDrop,\n rangesDisc,\n brRate = divRate,\n extRate = divRate,\n sampRate = sRate1,\n ntrees = 1,\n node.mins = nodeDates,\n plot = TRUE)\n# with randres = TRUE\nttree <- bin_cal3TimePaleoPhy(cladoDrop,\n rangesDisc,\n brRate = divRate,\n extRate = divRate,\n sampRate = sRate1,\n ntrees = 1,\n randres = TRUE,\n node.mins = nodeDates,\n plot = TRUE)\n\n\n# example with multiple values of anc.wt\nancWt <- sample(0:1,\n nrow(rangesDisc[[2]]),\n replace = TRUE)\nnames(ancWt) <- rownames(rangesDisc[[2]])\n \nttree1 <- bin_cal3TimePaleoPhy(cladogram,\n rangesDisc,\n brRate = divRate, \n extRate = divRate,\n sampRate = sRate1, \n ntrees = 1,\n anc.wt = ancWt, \n plot = TRUE)\n \n## End(No test)\n\n\n\n"} {"package":"paleotree","topic":"cladogeneticTraitCont","snippet":"### Name: cladogeneticTraitCont\n### Title: Simulate Cladogenetic Trait Evolution\n### Aliases: cladogeneticTraitCont\n\n### ** Examples\n\n\n## No test: \n \n\nset.seed(444)\nrecord <- simFossilRecord(\n p = 0.1, q = 0.1, \n nruns = 1,\n nTotalTaxa = c(30, 1000), \n plot = TRUE)\ntaxa <- fossilRecord2fossilTaxa(record)\ntrait <- cladogeneticTraitCont(taxa)\ntree <- taxa2phylo(taxa)\nplotTraitgram(trait, tree,\n conf.int = FALSE)\n\n#with cryptic speciation\nrecord <- simFossilRecord(\n p = 0.1, q = 0.1, \n prop.cryptic = 0.5, \n nruns = 1, \n nTotalTaxa = c(30, 1000), \n plot = TRUE)\ntaxa <- fossilRecord2fossilTaxa(record)\ntrait <- cladogeneticTraitCont(taxa)\ntree <- taxa2phylo(taxa)\nplotTraitgram(trait, tree,\n conf.int = FALSE)\n\n## End(No test)\n\n\n"} {"package":"paleotree","topic":"communityEcology","snippet":"### Name: communityEcology\n### Title: Miscellaneous Functions for Community Ecology\n### Aliases: communityEcology pairwiseSpearmanRho HurlbertPIE PIE\n### ProbabilityInterspecificEncounter\n\n### ** Examples\n\n\n# let's load some example data:\n# a classic dataset collected by Satoshi & Okido from the Kanto region\n\ndata(kanto)\n\nrhoBothAbsent <- pairwiseSpearmanRho(kanto,dropAbsent = \"bothAbsent\")\n\n#other dropping options\nrhoEitherAbsent <- pairwiseSpearmanRho(kanto,dropAbsent = \"eitherAbsent\")\nrhoNoDrop <- pairwiseSpearmanRho(kanto,dropAbsent = \"noDrop\")\n\n#compare\nlayout(1:3)\nlim <- c(-1,1)\nplot(rhoBothAbsent, rhoEitherAbsent, xlim = lim, ylim = lim)\n\tabline(0,1)\nplot(rhoBothAbsent, rhoNoDrop, xlim = lim, ylim = lim)\n\tabline(0,1)\nplot(rhoEitherAbsent, rhoNoDrop, xlim = lim, ylim = lim)\n\tabline(0,1)\nlayout(1)\n\n#using dropAbsent = \"eitherAbsent\" reduces the number of taxa so much that\n\t# the number of taxa present drops too low to be useful\n#dropping none of the taxa restricts the rho measures to high coefficients\n\t# due to the many shared 0s for absent taxa\n\n#############\n\n# Try the rho coefficients as a rescaled dissimilarity\nrhoDist <- pairwiseSpearmanRho(kanto,asDistance = TRUE,dropAbsent = \"bothAbsent\")\n\n# What happens if we use these in typical distance matrix based analyses?\n\n# Cluster analysis\nclustRes <- hclust(rhoDist)\nplot(clustRes)\n\n# Principle Coordinates Analysis\npcoRes <- pcoa(rhoDist,correction = \"lingoes\")\nscores <- pcoRes$vectors\n#plot the PCO\nplot(scores,type = \"n\")\ntext(labels = rownames(kanto),scores[,1],scores[,2],cex = 0.5)\n\n##################################\n\n# measuring evenness with Hurlbert's PIE\n\nkantoPIE <- HurlbertPIE(kanto)\n\n#histogram\nhist(kantoPIE)\n#evenness of the kanto data is fairly high\n\n#barplot\nparX <- par(mar = c(7,5,3,3))\nbarplot(kantoPIE,las = 3,cex.names = 0.7,\n\tylab = \"Hurlbert's PIE\",ylim = c(0.5,1),xpd = FALSE)\npar(parX)\n\n#and we can see that the Tower has extremely low unevenness\n\t#...overly high abundance of ghosts?\n\n# NOTE it doesn't matter whether we use absolute abundances\n# or proportional (relative) abundances\nkantoProp<-t(apply(kanto,1,function(x) x/sum(x)))\nkantoPropPIE <- HurlbertPIE(kantoProp)\nidentical(kantoPIE,kantoPropPIE)\n\n#let's look at evenness of 5 most abundant taxa\nkantoPIE_5 <- HurlbertPIE(kanto,nAnalyze = 5)\n\n#barplot\nparX <- par(mar = c(7,5,3,3))\nbarplot(kantoPIE_5,las = 3,cex.names = 0.7,\n\tylab = \"Hurlbert's PIE for 5 most abundant taxa\",ylim = c(0.5,1),xpd = FALSE)\npar(parX)\n\n\n"} {"package":"paleotree","topic":"compareTimescaling","snippet":"### Name: compareTimescaling\n### Title: Comparing the Time-Scaling of Trees\n### Aliases: compareTimescaling compareNodeAges compareTermBranches\n\n### ** Examples\n\n\nset.seed(444)\nrecord <- simFossilRecord(p = 0.1, q = 0.1, nruns = 1,\n\tnTotalTaxa = c(30,40), nExtant = 0)\ntaxa <- fossilRecord2fossilTaxa(record)\n#get the true tree\ntree1 <- taxa2phylo(taxa)\n#simulate a fossil record with imperfect sampling with sampleRanges()\nrangesCont <- sampleRanges(taxa,r = 0.5)\n#let's use taxa2cladogram to get the 'ideal' cladogram of the taxa\ncladogram <- taxa2cladogram(taxa,plot = TRUE)\n#Now let's try timePaleoPhy using the continuous range data\ntree2 <- timePaleoPhy(cladogram,rangesCont,type = \"basic\")\n#let's look at the distribution of node shifts\nhist(compareNodeAges(tree1,tree2))\n#let's look at the distribution of terminal branch lengths\nhist(compareTermBranches(tree1,tree2))\n\n#testing ability to compare multiple trees with compareNodeAges\ntrees <- cal3TimePaleoPhy(cladogram,rangesCont,brRate = 0.1,extRate = 0.1,\n sampRate = 0.1,ntrees = 10)\nnodeComparison <- compareNodeAges(tree1,trees)\n#plot it as boxplots for each node\nboxplot(nodeComparison,names = NULL);abline(h = 0)\n#plot mean shift in node dates\nabline(h = mean(apply(nodeComparison,2,mean,na.rm = TRUE)),lty = 2)\n\n#just shifting a tree back in time\nset.seed(444)\ntree1 <- rtree(10)\ntree2 <- tree1\ntree1$root.time <- 10\ncompareNodeAges(tree1,tree2)\ncompareTermBranches(tree1,tree2)\n\n\n\n"} {"package":"paleotree","topic":"constrainParPaleo","snippet":"### Name: constrainParPaleo\n### Title: Constrain Parameters for a Model Function from paleotree\n### Aliases: constrainParPaleo\n\n### ** Examples\n\n# simulation example with make_durationFreqCont, with three random groups\nset.seed(444)\nrecord <- simFossilRecord(\n p = 0.1, \n q = 0.1, \n nruns = 1,\n nTotalTaxa = c(30,40), \n nExtant = 0\n )\ntaxa <- fossilRecord2fossilTaxa(record)\nrangesCont <- sampleRanges(taxa,r = 0.5)\n# create a groupings matrix\ngrp1 <- matrix(\n sample(1:3,nrow(taxa),replace = TRUE), , 1) \nlikFun <- make_durationFreqCont(rangesCont, groups = grp1)\n\n# can constrain both extinction rates to be equal\nconstrainFun <- constrainParPaleo(likFun, q.2 ~ q.1)\n\n#see the change in parameter names and bounds\nparnames(likFun)\nparnames(constrainFun)\nparbounds(likFun)\nparbounds(constrainFun)\n\n# some more ways to constrain stuff!\n\n#constrain all extinction rates to be equal\nconstrainFun <- constrainParPaleo(likFun, q.all ~ q.1)\nparnames(constrainFun)\n\n#constrain all rates for everything to be a single parameter\nconstrainFun <- constrainParPaleo(likFun, r.all ~ q.all)\nparnames(constrainFun)\n\n#constrain all extinction rates to be equal & all sampling to be equal\nconstrainFun <- constrainParPaleo(likFun, q.all ~ q.1, r.all ~ r.1)\nparnames(constrainFun)\n\n#similarly, can use match.all to make all matching parameters equal each other\nconstrainFun <- constrainParPaleo(likFun, match.all ~ match.all)\nparnames(constrainFun)\n\n#Constrain rates in same group to be equal\nconstrainFun <- constrainParPaleo(likFun, r.match ~ q.match)\nparnames(constrainFun)\n\n\n\n"} {"package":"paleotree","topic":"createMrBayesConstraints","snippet":"### Name: createMrBayesConstraints\n### Title: Transform a Topology into a Set of Constraint Commands for\n### MrBayes\n### Aliases: createMrBayesConstraints\n\n### ** Examples\n\nset.seed(444)\ntree <- rtree(10)\ncreateMrBayesConstraints(tree)\ncreateMrBayesConstraints(tree,partial = FALSE)\n\n## Not run: \n##D \n##D createMrBayesConstraints(tree,file = \"topoConstraints.txt\")\n##D \n## End(Not run)\n\n\n"} {"package":"paleotree","topic":"createMrBayesTipCalibrations","snippet":"### Name: createMrBayesTipCalibrations\n### Title: Construct A Block of Tip Age Calibrations for Use with\n### Tip-Dating Analyses in MrBayes\n### Aliases: createMrBayesTipCalibrations\n\n### ** Examples\n\n\n# load retiolitid dataset\ndata(retiolitinae)\n\n# uniform prior, with a 10 million year offset for\n\t# the expected tree age from the earliest first appearance\n\ncreateMrBayesTipCalibrations(\n tipTimes = retioRanges, \n whichAppearance = \"first\",\n\t ageCalibrationType = \"uniformRange\", \n\t treeAgeOffset = 10)\n\n# fixed prior, at the earliest bound for the first appearance\n\ncreateMrBayesTipCalibrations(\n tipTimes = retioRanges, \n whichAppearance = \"first\",\n\t ageCalibrationType = \"fixedDateEarlier\", \n\t treeAgeOffset = 10\n\t )\n\n# fixed prior, sampled from between the bounds on the last appearance\n\t# you should probably never do this, fyi\n\ncreateMrBayesTipCalibrations(\n tipTimes = retioRanges, \n whichAppearance = \"first\",\n\t ageCalibrationType = \"fixedDateRandom\", \n\t treeAgeOffset = 10\n\t )\n\n\n## Not run: \n##D \n##D createMrBayesTipCalibrations(\n##D tipTimes = retioRanges, \n##D whichAppearance = \"first\",\n##D \t ageCalibrationType = \"uniformRange\", \n##D \t treeAgeOffset = 10, \n##D \t file = \"tipCalibrations.txt\"\n##D \t )\n##D \n## End(Not run)\n\n\n\n"} {"package":"paleotree","topic":"createMrBayesTipDatingNexus","snippet":"### Name: createMrBayesTipDatingNexus\n### Title: Construct a Fully Formatted NEXUS Script for Performing\n### Tip-Dating Analyses With MrBayes\n### Aliases: createMrBayesTipDatingNexus tipdating\n\n### ** Examples\n\n\n# load retiolitid dataset\ndata(retiolitinae)\n\n# let's try making a NEXUS file!\n\n# Use a uniform prior, with a 10 million year offset for\n\t # the expected tree age from the earliest first appearance\n\n# Also set average tree age to be 10 Ma earlier than first FAD\n\noutgroupRetio <- \"Rotaretiolites\" \n# this taxon will now be sister to all other included taxa\n\n# the following will create a NEXUS file \n # with an 'empty' morph matrix\n\t # where the only topological constraint is on ingroup monophyly\n\t # Probably shouldn't do this: leaves too much to the FBD prior\n \n# with doNotRun set to TRUE for troubleshooting\n\ncreateMrBayesTipDatingNexus(\ntipTimes = retioRanges,\n\t\toutgroupTaxa = outgroupRetio,\n\t\ttreeConstraints = NULL,\n\t\tageCalibrationType = \"uniformRange\",\n\t\twhichAppearance = \"first\",\n\t\ttreeAgeOffset = 10,\t\n\t\tnewFile = NULL,\t\n\t\torigNexusFile = NULL,\n\t\tcreateEmptyMorphMat = TRUE,\n\t\trunName = \"retio_dating\",\n\t\tdoNotRun = TRUE\n\t\t)\n\n# let's try it with a tree for topological constraints\n # this requires setting outgroupTaxa to NULL\n# let's also set doNotRun to FALSE\n\ncreateMrBayesTipDatingNexus(\n tipTimes = retioRanges,\n\t\toutgroupTaxa = NULL,\n\t\ttreeConstraints = retioTree,\n\t\tageCalibrationType = \"uniformRange\",\n\t\twhichAppearance = \"first\",\n\t\ttreeAgeOffset = 10,\t\n\t\tnewFile = NULL,\t\n\t\torigNexusFile = NULL,\n\t\tcreateEmptyMorphMat = TRUE,\n\t\trunName = \"retio_dating\",\n\t\tdoNotRun = FALSE\n\t\t)\n\n# the above is essentially cal3 with a better algorithm,\n\t\t# and no need for a priori rate estimates\n# just need a tree and age estimates for the tips!\n\n####################################################\n# some more variations for testing purposes\n\n# no morph matrix supplied or generated\n\t# you'll need to manually append to an existing NEXUS file\n\t\ncreateMrBayesTipDatingNexus(\n tipTimes = retioRanges,\n\t\toutgroupTaxa = NULL,\n\t\ttreeConstraints = retioTree,\n\t\tageCalibrationType = \"uniformRange\",\n\t\twhichAppearance = \"first\",\n\t\ttreeAgeOffset = 10,\n\t\tnewFile = NULL,\t\n\t\torigNexusFile = NULL,\n\t\tcreateEmptyMorphMat = FALSE,\n\t\trunName = \"retio_dating\",\n\t\tdoNotRun = TRUE\n\t\t)\n\n## Not run: \n##D \n##D # let's actually try writing an example with topological constraints\n##D \t# to file and see what happens\n##D \n##D # here's my super secret MrBayes directory\n##D file <- \"D:\\\\dave\\\\workspace\\\\mrbayes\\\\exampleRetio.nex\"\n##D \n##D createMrBayesTipDatingNexus(\n##D tipTimes = retioRanges,\n##D \t\toutgroupTaxa = NULL,\n##D \t\ttreeConstraints = retioTree,\n##D \t\tageCalibrationType = \"uniformRange\",\n##D \t\twhichAppearance = \"first\",\n##D \t\ttreeAgeOffset = 10,\t\n##D \t\tnewFile = file,\t\n##D \t\torigNexusFile = NULL,\n##D \t\tcreateEmptyMorphMat = TRUE,\n##D \t\trunName = \"retio_dating\",\n##D \t\tdoNotRun = FALSE\n##D \t\t)\n##D \n## End(Not run)\n\n\n\n"} {"package":"paleotree","topic":"dateNodes","snippet":"### Name: dateNodes\n### Title: Absolute Dates for Nodes of a Time-Scaled Phylogeny\n### Aliases: dateNodes\n\n### ** Examples\n\n#let's simulate some example data\nset.seed(444)\nrecord <- simFossilRecord(p = 0.1, q = 0.1, nruns = 1,\n\tnTotalTaxa = c(30,40), nExtant = 0)\ntaxa <- fossilRecord2fossilTaxa(record)\n#get the true time-sclaed tree\ntree1 <- taxa2phylo(taxa)\n\n#now let's try dateNodes\ndateNodes(tree1)\n\n#let's ignore $root.time\ndateNodes(tree1,rootAge = NULL)\n\n#with the lengthy tip-label based labels\n #some of these will be hideously long\ndateNodes(tree1,labelDates = TRUE)\n\n\n"} {"package":"paleotree","topic":"dateTaxonTreePBDB","snippet":"### Name: dateTaxonTreePBDB\n### Title: Date a Taxon-Topology from the Paleobiology Database Using\n### Appearance Data from the API\n### Aliases: dateTaxonTreePBDB\n\n### ** Examples\n\n# Note that all examples here use argument \n # failIfNoInternet = FALSE so that functions do\n # not error out but simply return NULL if internet\n # connection is not available, and thus\n # fail gracefully rather than error out (required by CRAN).\n# Remove this argument or set to TRUE so functions fail\n # when internet resources (paleobiodb) is not available.\n \n## No test: \n\ntaxaAnimals <- c(\"Archaeopteryx\", \"Eldredgeops\",\n\t\"Corvus\", \"Acropora\", \"Velociraptor\", \"Gorilla\", \n\t\"Olenellus\", \"Lingula\", \"Dunkleosteus\",\n\t\"Tyrannosaurus\", \"Triceratops\", \"Giraffa\",\n\t\"Megatheriidae\", \"Aedes\", \"Histiodella\",\n\t\"Rhynchotrema\", \"Pecten\", \"Homo\", \"Dimetrodon\",\n\t\"Nemagraptus\", \"Panthera\", \"Anomalocaris\")\n\nanimalData <-getSpecificTaxaPBDB(taxaAnimals, \n failIfNoInternet = FALSE)\n \nif(!is.null(animalData)){\n\ntree <- makePBDBtaxonTree(animalData, \n rankTaxon = \"genus\")\n\n#get the ranges \ntimeTree <- dateTaxonTreePBDB(tree)\n \n}\n\n## End(No test)\n\n#####################################\n\n## Not run: \n##D \n##D # plotting the tree with phyloPics\n##D \n##D plotPhyloPicTree(tree = timeTree,\n##D depthAxisPhylo = TRUE, \n##D failIfNoInternet = FALSE)\n##D \n##D \n##D # can also plot dated tree with strap\n##D \n##D library(strap)\n##D #now plot it\n##D strap::geoscalePhylo(\n##D tree = timeTree,\n##D direction = \"upwards\",\n##D ages = rangesMinMax,\n##D cex.tip = 0.7,\n##D cex.ts = 0.55,\n##D cex.age = 0.5,\n##D width = 3,\n##D tick.scale = 50,\n##D quat.rm = TRUE,\n##D boxes = \"Period\",\n##D arotate = 90,\n##D units = c(\"Eon\",\"Period\",\"Era\"),\n##D x.lim = c(650,-20)\n##D )\n## End(Not run)\n\n##############################################################\n\n## HORSES\n\n## No test: \n#if(require(curl)){\n\n# we can also use this for pre-existing trees\n # for example, this tree of equuids (horses)\n # borrowed from UCMP materials on horse evolution\n # https://evolution.berkeley.edu/evolibrary/images/HorseTree.pdf\n # (apparently from MacFadden, 1992? Citation above)\n\n# read the tree in as Newick string\nhorseTree <- ape::read.tree(file=NULL, \n text = paste0(\n \"(Eohippus,(Xenicohippus,(Haplohippus,(Epihippus,\",\n \"(Miohippus,(((Hypohippus,Megahippus),(Anchitherium,\",\n \"Kalobatippus)),(Archaeohippus,(Desmatippus,(Parahippus,\",\n \"(Merychippus,(((Hipparion_Merychippus,(Nannippus,\",\n \" Cormohipparion)),(Pseudhipparion,(Neohipparion,\",\n \" Hipparion))),(Equine_Merychippus,((Protohippus,Calippus),\",\n \"(Pliohippus,(Astrohippus,(Dinohippus,Equus))))))))))))))));\"\n )\n )\n\n# note there is a message that the tree lacks node names\n # this is unexpected / atypical for taxon trees\n\nplot(horseTree)\n\n# now let's get data on the tip from the PBDB\n # using getSpecificTaxaPBDB\nhorseData <- getSpecificTaxaPBDB(horseTree$tip.label, \n failIfNoInternet = FALSE)\n\nif(!is.null(horseData)){\n\n# now we can date the tree with dateTaxonTreePBDB\n\ndatedHorseTree <- dateTaxonTreePBDB(\n taxaTree = horseTree,\n taxaDataPBDB = horseData,\n minBranchLen = 1, \n failIfNoInternet = FALSE)\n\n# and let's try plotting it!\t\nplotPhyloPicTree(\n tree = datedHorseTree,\n depthAxisPhylo = TRUE, \n failIfNoInternet = FALSE)\t\t\n\t\n# a fairly boring phylopic diagram\n # not many horse phylopics as of 07-16-19?\n\n}\n\n#}\n## End(No test)\n\n## Not run: \n##D \n##D # Let's look at this horse tree with strap\n##D \n##D library(strap)\n##D \n##D geoscalePhylo(\n##D tree = datedHorseTree,\n##D ages = datedHorseTree$ranges.used,\n##D cex.tip = 0.7,\n##D cex.ts = 0.7,\n##D cex.age = 0.7,\n##D width = 4,\n##D tick.scale = 15,\n##D boxes = \"Epoch\",\n##D erotate = 90,\n##D quat.rm = TRUE,\n##D units = c(\"Period\",\"Epoch\"),\n##D x.lim = c(65,-10)\n##D )\n##D \n## End(Not run)\n\n\n\n"} {"package":"paleotree","topic":"degradeTree","snippet":"### Name: degradeTree\n### Title: Randomly Collapse a Portion of Nodes on a Phylogeny\n### Aliases: degradeTree collapseNodes\n\n### ** Examples\n\n\nset.seed(444)\ntree <- rtree(100)\ntree1 <- degradeTree(tree,prop_collapse = 0.5) \ntree3 <- degradeTree(tree,nCollapse = 50) \n\n#let's compare the input and output\nlayout(matrix(1:2,,2))\nplot(tree,show.tip.label = FALSE,use.edge.length = FALSE)\nplot(tree1,show.tip.label = FALSE,use.edge.length = FALSE)\n\n#now with collapseNodes\ntree <- rtree(10)\n#collapse nodes backwards\n #let's collapse lucky node number 13!\ntree1 <- collapseNodes(nodeID = 13,tree = tree,collapseType = \"backward\") \n#collapse nodes forwards \ntree2 <- collapseNodes(nodeID = 13,tree = tree,collapseType = \"forward\")\n#collapse entire clade\ntree3 <- collapseNodes(nodeID = 13,tree = tree,collapseType = \"clade\")\n\n#let's compare\nlayout(1:4)\nplot(tree,use.edge.length = FALSE,main = \"original\")\nplot(tree1,use.edge.length = FALSE,main = \"backward collapse\")\nplot(tree2,use.edge.length = FALSE,main = \"forward collapse\")\nplot(tree3,use.edge.length = FALSE,main = \"entire clade\")\n\nlayout(1)\n\n\n\n"} {"package":"paleotree","topic":"depthRainbow","snippet":"### Name: depthRainbow\n### Title: Paint Tree Branch Depth by Color\n### Aliases: depthRainbow\n\n### ** Examples\n\n\nset.seed(444)\ntree <- rtree(500)\ndepthRainbow(tree)\n\n\n\n"} {"package":"paleotree","topic":"divCurveFossilRecordSim","snippet":"### Name: divCurveFossilRecordSim\n### Title: Diversity-Curve Plotting for Simulations of Diversification and\n### Sampling In the Fossil Record\n### Aliases: divCurveFossilRecordSim\n\n### ** Examples\n\n\nset.seed(44)\nrecord <- simFossilRecord(p = 0.1, q = 0.1, r = 0.1, nruns = 1,\n\tnTotalTaxa = c(20,30) ,nExtant = 0, plot = FALSE)\n\n# now let's plot it\ndivCurveFossilRecordSim(record)\n\n\n\n"} {"package":"paleotree","topic":"durationFreq","snippet":"### Name: durationFreq\n### Title: Models of Sampling and Extinction for Taxonomic Duration\n### Datasets\n### Aliases: durationFreq make_durationFreqCont make_durationFreqDisc\n\n### ** Examples\n\n# let's simulate some taxon ranges from \n # an imperfectly sampled fossil record\nset.seed(444)\nrecord <- simFossilRecord(\n p = 0.1, \n q = 0.1, \n nruns = 1,\n\t nTotalTaxa = c(30,40), \n\t nExtant = 0\n\t )\ntaxa <- fossilRecord2fossilTaxa(record)\nrangesCont <- sampleRanges(taxa,r = 0.5)\n#bin the ranges into discrete time intervals\nrangesDisc <- binTimeData(rangesCont,int.length = 1)\n#note that we made interval lengths = 1: \n \t# thus q (per int) = q (per time) for make_durationFreqDisc\n\n## Not run: \n##D #old ways of doing it (defunct as of paleotree version 2.6)\n##D getSampRateCont(rangesCont)\n##D getSampProbDisc(rangesDisc)\n## End(Not run)\n\n#new ways of doing it\n # we can constrain our functions\n # we can use parInit, parLower and parUpper\n # to control parameter bounds\n\n#as opposed to getSampRateCont, we can do:\nlikFun <- make_durationFreqCont(rangesCont)\noptim(parInit(likFun),\n likFun,\n lower = parLower(likFun),\n upper = parUpper(likFun),\n method = \"L-BFGS-B\",\n control = list(maxit = 1000000)\n )\n\n#as opposed to getSampProbDisc, we can do:\n\nlikFun <- make_durationFreqDisc(rangesDisc)\noptim(parInit(likFun),\n likFun,\n lower = parLower(likFun),\n upper = parUpper(likFun),\n method = \"L-BFGS-B\",\n control = list(maxit = 1000000)\n )\n\n#these give the same answers (as we'd expect them to!)\n\n#with newer functions we can constrain our functions easily\n # what if we knew the extinction rate = 0.1 a priori?\n \nlikFun <- make_durationFreqCont(rangesCont)\nlikFun <- constrainParPaleo(likFun,q.1~0.1)\noptim(parInit(likFun),\n likFun,\n lower = parLower(likFun),\n upper = parUpper(likFun),\n\t method = \"L-BFGS-B\",\n\t control = list(maxit = 1000000)\n\t )\n\n#actually decreases our sampling rate estimate\n # gets further away from true generating value, r = 0.5 (geesh!)\n # but this *is* a small dataset...\n\n\n"} {"package":"paleotree","topic":"equation2function","snippet":"### Name: equation2function\n### Title: Turn a Character String of the Right-Hand Side of an Equation\n### into an R Function\n### Aliases: equation2function\n\n### ** Examples\n\n# some simple examples\nfoo <- equation2function(\"x+y\")\nfoo\nfoo(x = 4,y = 0.1)\n\nfoo <- equation2function(\"x+2*sqrt(2*y+3)^2\")\nfoo\nfoo(x = 4,y = 0.1)\n\n# what about weird long argument names and spaces\nfoo <- equation2function(\"stegosaur + 0.4 * P\")\nfoo\nfoo(stegosaur = 5,P = 0.3)\n\n\n"} {"package":"paleotree","topic":"exhaustionFunctions","snippet":"### Name: exhaustionFunctions\n### Title: Analyses of the exhaustion of Character States Over Evolutionary\n### History\n### Aliases: exhaustionFunctions accioExhaustionCurve\n### accioBestAcquisitionModel charExhaustPlot\n\n### ** Examples\n\n## No test: \n\n# get data\ndata(SongZhangDicrano)\n\ndicranoTree <- cladogramDicranoX13\n\n# modify char data\ncharMat <- data.matrix(charMatDicrano)\ncharMat[is.na(charMatDicrano)] <- 0\ncharMat <- (charMat-1)\ncolnames(charMat) <- NULL\n# replace missing values\ncharMat[is.na(charMatDicrano)] <- \"?\"\n\n# the 'outgroup' is Exigraptus\n # also the first taxon listed in the matrix\nexhaustionResults <- accioExhaustionCurve(\n phyloTree = dicranoTree,\n charData = charMat, charTypes = \"unordered\",\n outgroup = \"Exigraptus_uniformis\")\n\n# fits models to exhaustion for total accumulation\naccioBestAcquisitionModel(\n exhaustion_info = exhaustionResults,\n changesType = \"totalAcc\", \t\n models = c(\"exponential\",\"gamma\",\"lognormal\",\"zipf\")) \n\n# plot of exhaustion of total accumulation of character states\ncharExhaustPlot(exhaustion_info = exhaustionResults,\n\t changesType = \"totalAcc\")\n\n# plot of exhaustion of character alterations\ncharExhaustPlot(exhaustion_info = exhaustionResults,\n\t changesType = \"charAlt\")\n\n## End(No test)\t\n\n\n"} {"package":"paleotree","topic":"expandTaxonTree","snippet":"### Name: expandTaxonTree\n### Title: Extrapolating Lower-Level Taxon Phylogenies from Higher-Level\n### Taxon Trees\n### Aliases: expandTaxonTree\n\n### ** Examples\n\n\nset.seed(444)\n# lets make our hypothetical simulated tree of higher taxa\ntaxtr <- rtree(10)\n# taxa to place within higher taxa\ntaxd <- sample(taxtr$tip.label, 30, replace = TRUE)\t\nnames(taxd) <- paste(taxd,\"_x\", 1:30, sep = \"\")\ncoll <- sample(taxtr$tip.label,3)\t\t#what to collapse?\nexpandTaxonTree(taxonTree = taxtr, taxaData = taxd, \n collapse = coll, plot = TRUE)\n\n\n\n"} {"package":"paleotree","topic":"fixRootTime","snippet":"### Name: fixRootTime\n### Title: Modify, Drop or Bind Terminal Branches of Various Types (Mainly\n### for Paleontological Phylogenies)\n### Aliases: fixRootTime\n\n### ** Examples\n\n\n## No test: \n\n#testing dropPaleoTip... and fixRootTime by extension\n\n#simple example\ntree <- read.tree(text = \"(A:3,(B:2,(C:5,D:3):2):3);\")\ntree$root.time <- 10\nplot(tree,no.margin = FALSE)\naxisPhylo()\n\n# now a series of tests, dropping various tips\n(test <- dropPaleoTip(tree,\"A\")$root.time) # = 7\n(test[2] <- dropPaleoTip(tree,\"B\")$root.time) # = 10\n(test[3] <- dropPaleoTip(tree,\"C\")$root.time) # = 10\n(test[4] <- dropPaleoTip(tree,\"D\")$root.time) # = 10\n(test[5] <- dropPaleoTip(tree,c(\"A\",\"B\"))$root.time) # = 5\n(test[6] <- dropPaleoTip(tree,c(\"B\",\"C\"))$root.time) # = 10\n(test[7] <- dropPaleoTip(tree,c(\"A\",\"C\"))$root.time) # = 7\n(test[8] <- dropPaleoTip(tree,c(\"A\",\"D\"))$root.time) # = 7\n\n# is it all good? if not, fail so paleotree fails...\nif(!identical(test,c(7,10,10,10,5,10,7,7))){stop(\"fixRootTime fails!\")}\n\n## End(No test)\n\n\n"} {"package":"paleotree","topic":"footeValues","snippet":"### Name: footeValues\n### Title: Calculates Values for Foote's Inverse Survivorship Analyses\n### Aliases: footeValues\n\n### ** Examples\n\n#very simple example with three intervals, same value for all parameters\n\n# example rates (for the most part)\nrate <- rep(0.1, 3)\n \n#all continuous\nfooteValues(rate,rate,rate)\t\n\n# origination pulsed\nfooteValues(rate,rate,rate,p_cont = FALSE)\t\t \n\n# extinction pulsed\nfooteValues(rate,rate,rate,q_cont = FALSE) \t \n\n# all pulsed\nfooteValues(rate,rate,rate,p_cont = FALSE,q_cont = FALSE) \n\n\n"} {"package":"paleotree","topic":"freqRat","snippet":"### Name: freqRat\n### Title: Frequency Ratio Method for Estimating Sampling Probability\n### Aliases: freqRat\n\n### ** Examples\n\n\n# Simulate some fossil ranges with simFossilRecord\nset.seed(444)\nrecord <- simFossilRecord(p = 0.1, \n q = 0.1, \n nruns = 1,\n\t nTotalTaxa = c(30,40), \n\t nExtant = 0\n\t )\ntaxa <- fossilRecord2fossilTaxa(record)\n# simulate a fossil record with imperfect sampling with sampleRanges\nrangesCont <- sampleRanges(taxa,r = 0.1)\n# Now let's use binTimeData to bin in intervals of 5 time units\nrangesDisc <- binTimeData(rangesCont,int.length = 5)\n\n# now, get an estimate of the sampling rate (we set it to 0.5 above)\n\n# for discrete data we can estimate the sampling probability per interval (R)\n # i.e. this is not the same thing as the instantaneous sampling rate (r)\n \n# can use sRate2sProb to see what we would expect\nsRate2sProb(r = 0.1, int.length = 5)\n\n# expect R = ~0.39\n\n# now we can apply freqRat to get sampling probability\nSampProb <- freqRat(rangesDisc, plot = TRUE)\nSampProb\n\n# I estimated R = ~0.25 \n# Not wildly accurate, is it?\n\n# can also calculate extinction rate per interval of time\nfreqRat(rangesDisc, calcExtinction = TRUE)\n\n# est. ext rate = ~0.44 per interval\n# 5 time-unit intervals, so ~0.44 / 5 = ~0.08 per time-unit\n# That's pretty close to the generating value of 0.01, used in sampleRanges\n\n## Not run: \n##D #################\n##D # The following example code (which is not run by default) examines how \n##D \t# the freqRat estimates vary with sample size, interval length\n##D \t# and compares it to using make_durationFreqDisc\n##D \n##D # how good is the freqRat at 20 sampled taxa on avg?\n##D set.seed(444)\n##D r <- runif(100)\n##D int.length = 1\n##D # estimate R from r, assuming stuff like p = q\n##D R <- sapply(r, sRate2sProb, int.length = 1)\t\n##D ntaxa <- freqRats <- numeric()\n##D for(i in 1:length(r)){\n##D \t# assuming budding model\n##D \trecord <- simFossilRecord(p = 0.1, \n##D \t q = 0.1, \n##D \t r = r[i], \n##D \t nruns = 1,\n##D \t nSamp = c(15,25), \n##D \t nExtant = 0, \n##D \t plot = TRUE\n##D \t )\n##D \tranges <- fossilRecord2fossilRanges(record)\n##D \ttimeList <- binTimeData(ranges,int.length = int.length)\n##D \tntaxa[i] <- nrow(timeList[[2]])\n##D \tfreqRats[i] <- freqRat(timeList)\n##D \t}\n##D plot(R,freqRats);abline(0,1)\n##D # without the gigantic artifacts bigger than 1...\n##D plot(R,freqRats,ylim = c(0,1));abline(0,1)\n##D # very worrisome lookin'!\n##D \n##D # how good is it at 100 sampled taxa on average?\n##D set.seed(444)\n##D r <- runif(100)\n##D int.length = 1\n##D R <- sapply(r,sRate2sProb,int.length = 1)\n##D ntaxa <- freqRats <- numeric()\n##D for(i in 1:length(r)){\n##D # assuming budding model\n##D record <- simFossilRecord(p = 0.1, \n##D q = 0.1, \n##D r = r[i], \n##D nruns = 1, \n##D nSamp = c(80,150), \n##D nExtant = 0, \n##D plot = TRUE)\n##D \tranges <- fossilRecord2fossilRanges(record)\n##D \ttimeList <- binTimeData(ranges, \n##D \t int.length = int.length)\n##D \tntaxa[i] <- nrow(timeList[[2]])\n##D \tfreqRats[i] <- freqRat(timeList)\n##D \t}\n##D \t\n##D plot(R, freqRats,\n##D ylim = c(0,1)\n##D )\n##D abline(0,1)\n##D \n##D #not so hot, eh?\n##D \n##D ################\n##D #LETS CHANGE THE TIME BIN LENGTH!\n##D \n##D # how good is it at 100 sampled taxa on average, with longer time bins?\n##D set.seed(444)\n##D r <- runif(100)\n##D int.length <- 10\n##D R <- sapply(r, sRate2sProb, int.length = int.length)\n##D ntaxa <- freqRats <- numeric()\n##D for(i in 1:length(r)){\n##D \t # assuming budding model\n##D \t record <- simFossilRecord(p = 0.1, \n##D \t q = 0.1, \n##D \t r = r[i], \n##D \t nruns = 1,\t\n##D \t nSamp = c(80,150), \n##D \t nExtant = 0, \n##D \t plot = TRUE)\n##D \t ranges <- fossilRecord2fossilRanges(record)\n##D \t timeList <- binTimeData(ranges, int.length = int.length)\n##D \t ntaxa[i] <- nrow(timeList[[2]])\n##D \t freqRats[i] <- freqRat(timeList)\n##D \t }\n##D \t\n##D plot(R, freqRats, ylim = c(0,1))\n##D abline(0,1)\n##D # things get more accurate as interval length increases... odd, eh?\n##D \n##D # how good is it at 20 sampled taxa on average, with longer time bins?\n##D set.seed(444)\n##D r <- runif(100)\n##D int.length <- 10\n##D R <- sapply(r, sRate2sProb, int.length = int.length)\n##D ntaxa <- freqRats <- numeric()\n##D for(i in 1:length(r)){\n##D \t# assuming budding model\n##D \trecord <- simFossilRecord(p = 0.1, \n##D \t q = 0.1, \n##D \t r = r[i], \n##D \t nruns = 1,\t\n##D \t nSamp = c(15,25), \n##D \t nExtant = 0, \n##D \t plot = TRUE)\n##D \tranges <- fossilRecord2fossilRanges(record)\n##D \ttimeList <- binTimeData(ranges, int.length = int.length)\n##D \tntaxa[i] <- nrow(timeList[[2]])\n##D \tfreqRats[i] <- freqRat(timeList)\n##D \t}\n##D plot(R, freqRats, ylim = c(0,1))\n##D abline(0,1)\n##D # still not so hot at low sample sizes, even with longer bins\n##D \n##D ########################\n##D # ML METHOD\n##D \n##D # how good is the ML method at 20 taxa, 1 time-unit bins?\n##D set.seed(444)\n##D r <- runif(100)\n##D int.length <- 1\n##D R <- sapply(r,sRate2sProb,int.length = int.length)\n##D ntaxa <- ML_sampProb <- numeric()\n##D for(i in 1:length(r)){\n##D \t # assuming budding model\n##D \t record <- simFossilRecord(p = 0.1, \n##D \t q = 0.1, \n##D \t r = r[i], \n##D \t nruns = 1,\n##D \t nSamp = c(15,25), \n##D \t nExtant = 0, \n##D \t plot = TRUE\n##D \t )\n##D \t ranges <- fossilRecord2fossilRanges(record)\n##D \t timeList <- binTimeData(ranges, int.length = int.length)\n##D \t ntaxa[i] <- nrow(timeList[[2]])\n##D likFun <- make_durationFreqDisc(timeList)\n##D ML_sampProb[i] <- optim(\n##D parInit(likFun), likFun,\n##D \t\t lower = parLower(likFun),\n##D \t\t upper = parUpper(likFun),\n##D method = \"L-BFGS-B\",\n##D control = list(maxit = 1000000)\n##D )[[1]][2]\n##D \t }\n##D \t \n##D plot(R, ML_sampProb)\n##D abline(0,1)\n##D \n##D # Not so great due to likelihood surface ridges\n##D # but it returns values between 0-1\n##D \n##D # how good is the ML method at 100 taxa, 1 time-unit bins?\n##D set.seed(444)\n##D r <- runif(100)\n##D int.length <- 1\n##D R <- sapply(r, sRate2sProb,\n##D int.length = int.length)\n##D ntaxa <- ML_sampProb <- numeric()\n##D for(i in 1:length(r)){\n##D # assuming budding model\n##D \t record <- simFossilRecord(p = 0.1, \n##D \t q = 0.1, \n##D \t r = r[i], \n##D \t nruns = 1,\n##D \t nSamp = c(80,150), \n##D \t nExtant = 0, \n##D \t plot = TRUE)\n##D \t ranges <- fossilRecord2fossilRanges(record)\n##D \t timeList <- binTimeData(ranges,int.length = int.length)\n##D \t ntaxa[i] <- nrow(timeList[[2]])\n##D likFun <- make_durationFreqDisc(timeList)\n##D ML_sampProb[i] <- optim(parInit(likFun),\n##D likFun,\n##D \t\t lower = parLower(likFun),\n##D \t\t upper = parUpper(likFun),\n##D method = \"L-BFGS-B\",\n##D control = list(maxit = 1000000)\n##D )[[1]][2]\n##D \t}\n##D \t\n##D plot(R,ML_sampProb)\n##D abline(0,1)\n##D \n##D # Oh, fairly nice, although still a biased uptick as R gets larger\n## End(Not run)\n\n\n"} {"package":"paleotree","topic":"getDataPBDB","snippet":"### Name: getDataPBDB\n### Title: Obtaining Data for Taxa or Occurrences From Paleobiology\n### Database API\n### Aliases: getDataPBDB getCladeTaxaPBDB getSpecificTaxaPBDB getPBDBocc\n\n### ** Examples\n\n## No test: \n\n# Note that all examples here use argument \n # failIfNoInternet = FALSE so that functions do\n # not error out but simply return NULL if internet\n # connection is not available, and thus\n # fail gracefully rather than error out (required by CRAN).\n# Remove this argument or set to TRUE so functions fail\n # when internet resources (paleobiodb) is not available.\n\n#graptolites\ngraptData <- getCladeTaxaPBDB(\"Graptolithina\", \n failIfNoInternet = FALSE)\ndim(graptData)\nsum(graptData$taxon_rank == \"genus\")\n\n# so we can see that our call for graptolithina returned \n # a large number of taxa, a large portion of which are\n # individual genera\n# (554 and 318 respectively, as of 03-18-19)\n\ntetrapodList<-c(\"Archaeopteryx\", \"Columba\", \"Ectopistes\",\n \"Corvus\", \"Velociraptor\", \"Baryonyx\", \"Bufo\",\n \"Rhamphorhynchus\", \"Quetzalcoatlus\", \"Natator\",\n \"Tyrannosaurus\", \"Triceratops\", \"Gavialis\",\n \"Brachiosaurus\", \"Pteranodon\", \"Crocodylus\",\n \"Alligator\", \"Giraffa\", \"Felis\", \"Ambystoma\",\n \"Homo\", \"Dimetrodon\", \"Coleonyx\", \"Equus\",\n \"Sphenodon\", \"Amblyrhynchus\")\n\ntetrapodData <-getSpecificTaxaPBDB(tetrapodList, \n failIfNoInternet = FALSE)\ndim(tetrapodData)\nsum(tetrapodData$taxon_rank == \"genus\")\n# should be 26, with all 26 as genera\n\n#############################################\n# Now let's try getting occurrence data\n\n# getting occurrence data for a genus, sorting it\n# Dicellograptus\ndicelloData <- getPBDBocc(\"Dicellograptus\", \n failIfNoInternet = FALSE)\n\nif(!is.null(dicelloData)){\n\ndicelloOcc2 <- taxonSortPBDBocc(dicelloData, \n rank = \"species\", onlyFormal = FALSE, \n failIfNoInternet = FALSE)\nnames(dicelloOcc2)\n\n}\n\n## End(No test)\n\n\n\n"} {"package":"paleotree","topic":"graptDisparity","snippet":"### Name: graptDisparity\n### Title: Morphlogical Character and Range Data for late Ordovician and\n### Early Silurian Graptoloidea\n### Aliases: graptDisparity graptCharMatrix graptRanges graptDistMat\n### Keywords: datasets\n\n### ** Examples\n\n\n#load data\ndata(graptDisparity)\n\n#separate out two components of character matrix\n\n#45 discrete characters\ndiscChar <- graptCharMatrix[,1:45]\n\n#min ranges for 4 continuous characters\ncMinChar <- graptCharMatrix[,c(46,48,50,52)]\n#max ranges for 4 continuous characters\ncMaxChar <- graptCharMatrix[,c(47,49,51,53)]\n\n#group (clade/paraclade) coding\ngroupID <- graptCharMatrix[,54]\n\n#number of species\nnspec <- nrow(graptCharMatrix)\n\n#some plotting information from Bapst et al.'s plotting scripts\ngrpLabel <- c(\"Normalo.\",\"Monogr.\",\"Climaco.\",\n\t\t\"Dicrano.\",\"Lasiogr.\",\"Diplogr.\",\"Retiol.\")\ngrpColor <- c(\"red\",\"purple\",colors()[257],colors()[614],\n\t\tcolors()[124],\"blue\",colors()[556])\n\n##########\n\n#plot diversity curve of taxa\ntaxicDivDisc(graptRanges)\n\n#but the actual study interval for the data is much smaller\nabline(v = 448.57,lwd = 3) #start of study interval\nabline(v = 439.37,lwd = 3) #end of study interval\n\n#plot diversity curve just for study interval\ntaxicDivDisc(graptRanges, timelims = c(448.57,439.37))\n\n############\n\n#distance matrix is given as graptDistMat\n #to calculate yourself, see code below in DoNotRun section\n\n#now, is the diagonal zero? (it should be)\nall(diag(graptDistMat) == 0)\n\n#now, is the matrix symmetric? (it should be)\nisSymmetric(graptDistMat)\n\n#can apply cluster analysis\nclustRes <- hclust(as.dist(graptDistMat))\nplot(clustRes,labels = FALSE)\n\n#use ape to plot with colors at the tips\ndev.new(width = 15) \t# for a prettier plot\nplot.phylo(as.phylo(clustRes),show.tip.label = FALSE,\n\t\tno.margin = TRUE,direction = \"upwards\")\ntiplabels(pch = 16,col = grpColor[groupID+1])\nlegend(\"bottomright\",legend = grpLabel,col = grpColor,pch = 16)\ndev.set(2)\n\n#can apply PCO (use lingoes correction to account for negative values\n #resulting from non-euclidean matrix\npco_res <- pcoa(graptDistMat,correction = \"lingoes\")\n\n#relative corrected eigenvalues\nrel_corr_eig <- pco_res$values$Rel_corr_eig\nlayout(1:2)\nplot(rel_corr_eig)\n#cumulative\nplot(cumsum(rel_corr_eig))\n\n#first few axes account for very little variance!!\n\n\n\n#well let's look at those PCO axes anyway\nlayout(1)\npco_axes <- pco_res$vectors\nplot(pco_axes[,1],pco_axes[,2],pch = 16,col = grpColor[groupID+1],\n xlab = paste(\"PCO Axis 1, Rel. Corr. Eigenvalue = \",round(rel_corr_eig[1],3)),\n ylab = paste(\"PCO Axis 2, Rel. Corr. Eigenvalue = \",round(rel_corr_eig[2],3)))\nlegend(\"bottomright\",legend = grpLabel,col = grpColor,pch = 16,ncol = 2,cex = 0.8)\n\n\n##########m##############\n\n## Not run: \n##D \n##D \n##D #calculate a distance matrix (very slow!)\n##D #Bapst et al. calculated as # char diffs / total # of chars\n##D #but both calculated for only non-missing characters for both taxa\n##D #non-identical discrete states = difference for discrete traits\n##D #non-overlapping ranges for continuous characters = difference for cont traits\n##D \n##D distMat <- matrix(,nspec,nspec)\n##D rownames(distMat) <- colnames(distMat) <- rownames(graptCharMatrix)\n##D for(i in 1:nspec){ for(j in 1:nspec){ #calculate for each pair of species\n##D #discrete characters\n##D di <- discChar[i,] #discrete character vector for species i\n##D dj <- discChar[j,] #discrete character vector for species j\n##D #now calculate pair-wise differences for non-missing characters\n##D discDiff <- (di != dj)[!is.na(di)&!is.na(dj)] #logical vector\n##D #\n##D #continuous characters: need another for() loop\n##D contDiff <- numeric()\n##D for(ct in 1:4){\n##D #if they do not overlap, a min must be greater than a max value\n##D contDiff[ct] <- cMinChar[i,ct]>cMaxChar[j,ct] | cMinChar[j,ct]>cMaxChar[i,ct]\n##D }\n##D #remove NAs\n##D contDiff <- contDiff[!is.na(contDiff)]\n##D #combine\n##D totalDiff <- c(discDiff,contDiff)\n##D #divide total difference \n##D distMat[i,j] <- sum(totalDiff)/length(totalDiff)\n##D }}\n##D \n##D #but is it identical to the distance matrix already provided?\n##D identical(distMat,graptDistMat)\n##D #ehh, numerical rounding issues...\n##D \n##D #A somewhat speeder alternative to calculate a distance matrix\n##D distMat <- matrix(,nspec,nspec)\n##D rownames(distMat) <- colnames(distMat) <- rownames(graptCharMatrix)\n##D for(i in 1:(nspec-1)){ for(j in (i+1):nspec){ #calculate for each pair of species\n##D #now calculate pair-wise differences for non-missing characters\n##D discDiff <- (discChar[i,] != discChar[j,])[\n##D !is.na(discChar[i,])&!is.na(discChar[j,])] #logical vector\n##D #continuous characters: if they do not overlap, a min must be greater than a max value\n##D contDiff <- sapply(1:4,function(ct)\n##D cMinChar[i,ct]>cMaxChar[j,ct] | cMinChar[j,ct]>cMaxChar[i,ct])\n##D #remove NAs, combine, divide total difference \n##D distMat[i,j] <- distMat[j,i] <- sum(c(discDiff,contDiff[!is.na(contDiff)]))/length(\n##D c(discDiff,contDiff[!is.na(contDiff)]))\n##D }}\n##D diag(distMat) <- 0\n##D \n##D #but is it identical to the distance matrix already provided?\n##D identical(distMat,graptDistMat)\n##D #ehh, MORE numerical rounding issues...\n##D \n## End(Not run)\n\n\n\n"} {"package":"paleotree","topic":"graptPBDB","snippet":"### Name: graptPBDB\n### Title: Example Occurrence and Taxonomic Datasets of the Graptolithina\n### from the Paleobiology Database\n### Aliases: graptPBDB graptOccPBDB graptTaxaPBDB graptTimeTree graptTree\n### Keywords: datasets\n\n### ** Examples\n\n# let's look for pterobranch genera\n # pterobranchs are the larger group containing graptolites\n\ntaxon <- \"Pterobranchia\"\nselectRank <- \"genus\"\n\n## Not run: \n##D library(paleotree)\n##D \n##D # get taxon data\n##D # default variables\n##D graptTaxaPBDB<-getCladeTaxaPBDB(taxon)\n##D \n##D # get the taxon tree\n##D graptTree <- makePBDBtaxonTree(graptTaxaPBDB,\n##D rankTaxon = selectRank\n##D )\n##D \n##D # date the tree using the ranges\n##D # provided directly by the PBDB\n##D graptTimeTree <- dateTaxonTreePBDB(graptTree)\n##D \n##D library(strap)\n##D dev.new(height=6, width=10)\n##D geoscalePhylo(graptTimeTree, \n##D ages=graptTimeTree$ranges.used\n##D )\n##D nodelabels(graptTimeTree$node.label,\n##D cex=0.7,\n##D adj=c(0.3,0)\n##D )\n##D \n##D # slice tree at the Mississippian-Pennslyvannian boundary so\n##D # the *two* extant genera don't obfuscate the tree\n##D graptTimeTreePrePenn <- timeSliceTree(\n##D ttree = graptTimeTree,\n##D sliceTime = 323.2\n##D )\n##D slicedRanges <- graptTimeTree$ranges.used\n##D slicedRanges [slicedRanges < 323.2] <- 323.2\n##D \n##D # plot it!\n##D dev.new(height=6, width=10)\n##D geoscalePhylo(graptTimeTreePrePenn, \n##D ages = slicedRanges \n##D )\n##D nodelabels(graptTimeTreePrePenn$node.label,\n##D cex=0.7,\n##D adj=c(0.3,0)\n##D )\n##D \n##D # we could also date the tree using the occurrence data\n##D # default variables\n##D graptOccPBDB <- getPBDBocc(taxon)\n##D \n##D # some PBDB people have names that aren't in ASCII\n##D # but CRAN hates non-ASCII character, sooo...\n##D # convert using gtools::ASCIIfy \n##D levels(graptOccPBDB$enterer) <- gtools::ASCIIfy(\n##D levels(graptOccPBDB$enterer))\n##D levels(graptOccPBDB$authorizer) <- gtools::ASCIIfy(\n##D levels(graptOccPBDB$authorizer))\n##D levels(graptOccPBDB$modifier) <- gtools::ASCIIfy(\n##D levels(graptOccPBDB$modifier))\n##D \n##D graptOccSort <- taxonSortPBDBocc(graptOccPBDB, \n##D rank = selectRank,\n##D onlyFormal = FALSE, \n##D cleanUncertain = FALSE)\n##D \n##D graptTimeList <- occData2timeList(occList = graptOccSort)\n##D \n##D graptTimeTreeFromOcc <- bin_timePaleoPhy(\n##D graptTree,\n##D timeList = graptTimeList,\n##D nonstoch.bin = TRUE,\n##D type = \"mbl\",\n##D vartime = 3)\n##D \n##D plot(graptTimeTreeFromOcc, show.tip.label=FALSE)\n##D axisPhylo()\n##D \n##D # don't need to slice tree because extant-only taxa were dropped\n##D dev.new(height=6, width=10)\n##D geoscalePhylo(graptTimeTreeFromOcc, \n##D ages=graptTimeTreeFromOcc$ranges.used\n##D \t )\n##D nodelabels(graptTimeTreeFromOcc$node.label,\n##D cex=0.7,\n##D \t adj=c(0.3,0)\n##D \t )\n##D \n##D graphics.off()\n##D \n##D save(graptOccPBDB,\n##D graptTaxaPBDB,\n##D graptTree,\n##D graptTimeTree,\n##D file = \"graptPBDB.rdata\")\n## End(Not run)\n\n# load archived example data\ndata(graptPBDB)\n\n# let's visualize who entered the majority of the occurrence data\npie(sort(table(graptOccPBDB$enterer)))\n# and now who authorized it\npie(sort(table(graptOccPBDB$authorizer)))\n\n# I *sort of* apologize for using pie charts.\n\n# Let's look at age resolution of these occurrences\nhist(graptOccPBDB$max_ma - graptOccPBDB$min_ma,\n main = \"Age Resolution of Occurrences\",\n xlab = \"Ma\")\n\n# use table to calculate distribution \n #of taxa among taxonomic ranks\ntable(graptTaxaPBDB$taxon_rank)\n\nbarplot(table(graptTaxaPBDB$taxon_rank))\n\n\n\n"} {"package":"paleotree","topic":"horizonSampRate","snippet":"### Name: horizonSampRate\n### Title: Estimate Sampling Rate from Sampling Horizon Data (Solow and\n### Smith, 1997)\n### Aliases: horizonSampRate\n\n### ** Examples\n\n#can simulate this type of data with sampleRanges\n # just set ranges.only = FALSE\n#let's try a simulation example:\nset.seed(444)\nrecord <- simFossilRecord(p = 0.1, q = 0.1, nruns = 1,\n\tnTotalTaxa = c(30,40), nExtant = 0)\ntaxa <- fossilRecord2fossilTaxa(record)\nsampledOccurrences <- sampleRanges(taxa,r = 0.5,ranges.only = FALSE)\n\n# now try with horizonSampRate\nhorizonSampRate(sampOcc = sampledOccurrences)\n\n# but we could also try with the *other* inputs\n # useful because some datasets we may only have durations\n # and number of sampling events for\nfiltered <- sampledOccurrences[!is.na(sampledOccurrences)] \ndur <- sapply(filtered,max) - sapply(filtered,min)\nnCol <- sapply(filtered,length)\n# supply as durations and nCollections\nhorizonSampRate(durations = dur, nCollections = nCol)\n\n\n"} {"package":"paleotree","topic":"inverseSurv","snippet":"### Name: inverseSurv\n### Title: Inverse Survivorship Models in the Fossil Record\n### Aliases: inverseSurv make_inverseSurv invSurv\n\n### ** Examples\n\n## No test: \n\n# let's simulate some taxon ranges from an imperfectly sampled fossil record\nset.seed(444)\nrecord <- simFossilRecord(\n p = 0.1, \n q = 0.1, \n nruns = 1,\n\t nTotalTaxa = c(30,40), \n\t nExtant = 0\n\t )\ntaxa <- fossilRecord2fossilTaxa(record)\nrangesCont <- sampleRanges(taxa, r = 0.5)\n\n#bin the ranges into discrete time intervals\nrangesDisc <- binTimeData(rangesCont, int.length = 5)\n\n#apply make_inverseSurv\nlikFun <- make_inverseSurv(rangesDisc)\n\n#use constrainParPaleo to make the model time-homogeneous\n \t# match.all ~ match.all will match parameters\n \t# so only 2 parameters: p (= q) and r\n\nconstrFun <- constrainParPaleo(likFun, \n match.all~match.all)\n\nresults <- optim(parInit(constrFun), \n constrFun,\n lower = parLower(constrFun), \n upper = parUpper(constrFun),\n method = \"L-BFGS-B\", \n control = list(maxit = 1000000)\n )\nresults\n\n#plot the results\nconstrFun(results$par, altMode = TRUE)\n## End(No test)\n\n## Not run: \n##D #unconstrained function with ALL of the 225 possible parameters!!!\n##D # this will take forever to converge\t\n##D optim(parInit(likFun),\n##D likFun,\n##D lower = parLower(likFun), \n##D upper = parUpper(likFun),\n##D method = \"L-BFGS-B\", \n##D control = list(maxit = 1000000)\n##D )\n## End(Not run)\n\n\n"} {"package":"paleotree","topic":"kanto","snippet":"### Name: kanto\n### Title: Example Species Abundances Tables\n### Aliases: kanto\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(kanto)\n\n#visualize site abundances as barplots\nbarplotAbund <- function(x){\n\tx <- x[,colSums(x)>0]\n\tlayout(1:(nrow(x)+1))\n\txpar <- par(mar = c(0,7,2,0))\n\tfor(i in 1:(nrow(x)-1)){\n\t\tbarplot(x[i,],ylab = rownames(x)[i],\n\t\t\tnames.arg = \"\")\n\t\t}\n\tbarplot(x[nrow(x),],\n\t\tylab = rownames(x)[nrow(x)],las = 3)\n\tpar(xpar)\n\tlayout(1)\n\tmtext(\"Abundances\",side = 2,line = 3,adj = 0.8)\n\t}\n\n#first five sites\nkanto5 <- kanto[1:5,]\nbarplotAbund(kanto5)\n\n#get pairwise Spearman rho coefficients\nrhoCoeff <- pairwiseSpearmanRho(kanto,dropAbsent = \"bothAbsent\")\n\n#what are the nearest-neighbor rhos (largest rho correlations)?\ndiag(rhoCoeff) <- NA\nrhoNearest <- apply(rhoCoeff,1,max,na.rm = TRUE)\nrhoNearest\n\n# We can see the power plant sample is extremely different from the rest\n\n# measure evenness: Hurlbert's PIE\n\nkantoPIE <- HurlbertPIE(kanto)\n\n# compare to dominance (relative abundance of most abundant taxon)\n\ndominance <- apply(kanto,1,function(x) max(x)/sum(x) )\n\nplot(kantoPIE,dominance)\n\n# relatively strong relationship!\n\n\n## Not run: \n##D #########################################\n##D #################################################\n##D #########################################################\n##D # Some Cool Ecology Stuff With Other Packages\n##D \n##D # basically all the analyses & visualizations\n##D \t\t#for ecology in R that I think are awesome\n##D \n##D \n##D ##########################################\n##D ###########################\n##D #Ordination (PCO, DCA)\n##D \n##D #get bray-curtis distances\n##D library(vegan)\n##D bcDist <- vegdist(kanto,method = \"bray\")\n##D \n##D # do a PCO on the bray-curtis distances\n##D pcoRes <- pcoa(bcDist,correction = \"lingoes\")\n##D scores <- pcoRes$vectors\n##D # plot the PCO\n##D plot(scores,type = \"n\")\n##D text(labels = rownames(kanto),scores[,1],scores[,2],cex = 0.5)\n##D \n##D # the way the power plant and the pokemon tower converge\n##D \t# is very suspicious: may be distortion due to a long gradient\n##D \n##D # do a DCA instead with vegan's decorana\n##D dcaRes <- decorana(kanto)\n##D # plot using native vegan functions\n##D \t #will show species scores in red\n##D plot(dcaRes,cex = 0.5)\n##D #kind of messy\n##D \n##D #show just the sites scores\n##D plot(dcaRes,cex = 0.5,display = \"sites\")\n##D \n##D #show just the species scores\n##D plot(dcaRes,cex = 0.5,display = \"species\")\n##D \n##D #well, that's pretty cool\n##D \n##D #######################\n##D #get the nearest neighbor for each site\n##D # based on pair-wise rho coefficients\n##D rhoNeighbor <- apply(rhoCoeff,1,function(x)\n##D \t rownames(kanto)[tail(order(x,na.last = NA),1)])\n##D \n##D #let's plot the nearest neighbor connections with igraph\n##D NNtable <- cbind(rownames(kanto),rhoNeighbor)\n##D \n##D # now plot with igraph\n##D library(igraph)\n##D NNlist <- graph.data.frame(NNtable)\n##D plot(NNlist)\n##D \n##D #arrows point at the nearest neighbor of each sample\n##D \t # based on maximum Spearman rho correlation\n##D \n##D #########################################\n##D #######################################################\n##D # Two Way Cluster With Heatmap\n##D \n##D # This example based on code provided by Max Christie\n##D \n##D # load pheatmap library for this example\n##D library(pheatmap) \n##D \n##D # get distance matrices for sites and taxa\n##D \t# based on bray-curtis dist\n##D \t# standardized to total abundance\n##D \n##D # standardize site matrix to relative abundance\n##D siteStand <- decostand(kanto, method = \"total\")\n##D # site distance matrix (Bray-Curtis)\n##D siteDist <- vegdist(siteStand, \"bray\", diag = TRUE)\n##D \n##D # standardize taxa matrix to relative abundance\n##D taxaStand <- decostand(t(kanto), method = \"total\")\n##D # taxa distance matrix (Bray-Curtis)\n##D taxaDist <- vegdist(taxaStand, \"bray\", diag = TRUE)\n##D \n##D ### Need to set graphic parameters for table\n##D \n##D # Check out range of values for relative abundance\n##D # hist(myStand) # none get very high...\n##D \n##D # number of breaks: number of colors for heatmap\n##D nBreaks <- 15\n##D \n##D # set underValue\n##D \t# anything below this counts as not appearing\n##D \t# at that site for visualization purposes\n##D underValue <- min(siteStand[siteStand>0])-min(siteStand[siteStand>0])/10\n##D # set overValue (max relative abundance)\n##D overValue <- max(siteStand)\n##D # you can set your breaks to any sequence you want\n##D \t# and they don't have to be the same length. \n##D \t# You can do this manually too.\n##D # here we added a 0 to 'underValue' bin to \n##D \t# the heatmap, making this bin essentially 0.\n##D colorBreaks <- c(0,seq(underValue,max(siteStand), \n##D \tby = max(siteStand)/(nBreaks-1)))\n##D # here we used the function rainbow to create a vector of colors. \n##D \t# You can set these colors yourself too. \n##D # It is important that this vector is one element \n##D \t# less than the myBreaks vector\n##D rainColors <- rainbow(nBreaks) \n##D # now we can add \"white\" onto the vector, \n##D \t# this will be the first color bin, \n##D \t# which we're going to set to be (essentially) 0. \n##D rainColors <- c(\"white\", rainColors) \n##D # If you don't add white, taxa at 0 abundance get colored in\n##D \n##D ### Plot the 2-Way Cluster\n##D \n##D # heatmap, with user-set colors\n##D # feed the function a distance matrix we wanted to use. \n##D \t#siteDist and taxaDist made above by vegdist (bray-curtis)\n##D # scale is the relative abundance, let's label it as such\n##D \n##D dev.new(width = 10)\n##D \n##D #for some reason, mtext() doesn't recognize pheatmap as plot.new\n##D plot.new(width = 7) \n##D \n##D pheatmap(\n##D siteStand, \n##D \t clustering_method = \"ward.D\", \n##D \t clustering_distance_rows = siteDist, \n##D \t clustering_distance_cols = taxaDist,\n##D color = rainColors, \n##D breaks = colorBreaks\n##D )\n##D mtext(\"Relative Abundance\",\n##D side = 4, line = -1.4, adj = 0.95)\n##D \n##D # pretty cool looking!\n##D \n##D ########################\n##D # even better: \n##D # twoWayEcologyCluster in paleotree\n##D \n##D dev.new(width=10)\t\n##D \n##D twoWayEcologyCluster(\n##D xDist = siteDist,\n##D yDist = taxaDist,\n##D propAbund = siteStandKanto,\n##D cex.axisLabels = 0.8\n##D )\n##D \n##D #########################################\n##D #########################################################\n##D ## Testing for differences between groups of sites\n##D \n##D #is there a difference between routes and non-routes\n##D groups <- rep(0, nrow(kanto))\n##D groups[grep(rownames(kanto), pattern = \"Route\")] <- 1\n##D \n##D #anosim (in vegan)\n##D \t#are distances within groups smaller than distances between?\n##D library(vegan)\n##D anosim(dat = kanto, grouping = groups)\n##D \n##D # we could also use PERMANOVA instead\n##D # this is generally considered more robust than ANOSIM\n##D # note that group needs to be factor for PERMANOVA\n##D groupsAsFactor <- factor(groups)\n##D adonis(kanto ~ groupsAsFactor)\n##D \n##D # both analyses are very significant\n##D \n##D ####################################################################\n##D # SIMPER analysis (SIMalarity PERcentages) in Vegan\n##D # which taxa contribute most to the difference between groups?\n##D # this might be 'index' taxa for different communities\n##D # beware: it might also be the taxa that vary most within groups\n##D \n##D simperResult <- simper(comm = kanto, group = groupsAsFactor)\n##D simperResult\n##D \n##D # these are the species that account for at least 70% of\n##D # differences between groups, based on Bray-Curtis distances\n##D \n##D # can see % contribtion for all species with summary()\n##D # as well as more detail in general...\n##D summary(simperResult)\n##D \n##D # other analyses to look into:\n##D # SimProf to test clusters from a cluster analysis...\n##D \n##D #########################################################\n##D # alternative for differentiating groups:\n##D # using multivariate GLMs in mvabund\n##D \n##D library(mvabund)\n##D \n##D ft <- manyglm(formula = kanto ~ groupsAsFactor)\n##D anova(ft)\n##D \n##D # also highly significant!\n##D # note that this method though uses absolute abundances\n##D # it will not accepted\n##D # which are usually impossible to get \n##D \n## End(Not run)\n\n\n\n\n"} {"package":"paleotree","topic":"macroperforateForam","snippet":"### Name: macroperforateForam\n### Title: Ancestor-Descendant Relationships for Macroperforate\n### Foraminifera, from Aze et al. (2011)\n### Aliases: macroperforateForam foramAL foramAM foramALb foramAMb\n### Keywords: datasets\n\n### ** Examples\n\n\n# Following Text Reproduced from Aze et al. 2011's Supplemental Material\n# Appendix S5\n# \n# 'Data required to produce all of the phylogenies included in the manuscript\n# using paleoPhylo (Ezard & Purvis, 2009) a free software package to draw\n# paleobiological phylogenies in R.'\n#\n# 'The four tabs hold different versions of our phylogeny:\n#\t aMb: fully bifurcating morphospecies phylogeny\n#\t aM: budding/bifurcating morphospecies phylogeny\n#\t aLb: fully bifurcating lineage phylogeny\n#\t aL: budding/bifurcating lineage phylogeny\n#\n# 'Start Date gives the first occurence of the species according\n# to the particular phylogeny; End Date gives the last occurence\n# according to the particular phylogeny.'\n\n## Not run: \n##D \n##D # load the data \n##D # given in supplemental as XLS sheets\n##D # converted to separate tab-deliminated text files\n##D \n##D # aM: budding/bifurcating morphospecies phylogeny\n##D foramAM <- read.table(file.choose(),stringsAsFactors = FALSE,header = TRUE)\n##D # aL: budding/bifurcating lineage phylogeny\n##D foramAL <- read.table(file.choose(),stringsAsFactors = FALSE,header = TRUE)\n##D # aMb: fully bifurcating morphospecies phylogeny\n##D foramAMb <- read.table(file.choose(),stringsAsFactors = FALSE,header = TRUE)\n##D # aLb: fully bifurcating lineage phylogeny\n##D foramALb <- read.table(file.choose(),stringsAsFactors = FALSE,header = TRUE)\n##D \n##D save.image(\"macroperforateForam.rdata\")\n##D \n## End(Not run)\n\n# or instead, we'll just load the data directly\ndata(macroperforateForam)\n\n#Two distinctions among the four datasets:\n#(1): morphospecies vs morphospecies combined into sequences of anagenetic\n\t# morpospecies referred to as 'lineages'. Thus far more morphospecies\n\t# than lineages. The names of lineages are given as the sequence of\n\t# their respective component morphospecies.\n#(2): Datasets where taxon units (morphospecies or lineages) are broken up\n\t# at 'budding' branching events (where the ancestral taxon persists)\n\t# so that final dataset is 'fully bifurcating', presumably\n\t# to make comparison easier to extant-taxon only datasets.\n\t# (This isn't a limitation for paleotree, though!).\n\t# This division of taxon units requires abstracting the taxon IDs,\n\t# requiring another column for Species Name.\n\ndim(foramAM)\ndim(foramAL)\ndim(foramAMb)\ndim(foramALb)\n\n#Need to convert these to same format as fossilRecord2fossilTaxa output.\n\t#those 'taxa' tables has 6 columns:\n\t#taxon.id ancestor.id orig.time ext.time still.alive looks.like\n\n#for the purposes of this, we'll make taxon.id = looks.like\n\t# (That's only for simulating cryptic speciation anyway)\n#still.alive should be TRUE (1) if ext.time = 0\n\n#a function to convert Aze et al's suppmat to paleotree-readable format\n\ncreateTaxaData <- function(table){\n\t#reorder table by first appearance time\n\ttable <- table[order(-as.numeric(table[,3])),]\n\tID <- 1:nrow(table)\n\tanc <- sapply(table[,2],function(x)\n\t\tif(!is.na(x)){\n\t\t\twhich(x == table[,1])\n\t\t}else{ NA })\n\tstillAlive <- as.numeric(table[,4] == 0)\n\tages <- cbind(as.numeric(table[,3]),as.numeric(table[,4]))\n\tres <- cbind(ID,anc,ages,stillAlive,ID)\n\tcolnames(res) <- c('taxon.id','ancestor.id','orig.time',\n\t\t'ext.time','still.alive','looks.like')\n\trownames(res) <- table[,1]\n\treturn(res)\n\t}\n\ntaxaAM <- createTaxaData(foramAM)\ntaxaAMb <- createTaxaData(foramAMb)\ntaxaAL <- createTaxaData(foramAL)\ntaxaALb <- createTaxaData(foramALb)\n\n##################################\n\n#Checking Ancestor-Descendant Relationships for Irregularities\n\n#For each of these, there should only be a single taxon\n\t# without a parent listed (essentially, the root ancestor)\n\ncountParentsWithoutMatch <- function(table){\n \tparentMatch <- match(unique(table[,2]),table[,1])\n \tsum(is.na(parentMatch))\n\t}\n\n#test this on the provided ancestor-descendant relationships\ncountParentsWithoutMatch(foramAM)\ncountParentsWithoutMatch(foramAL)\ncountParentsWithoutMatch(foramAMb)\ncountParentsWithoutMatch(foramALb)\n\n#and on the converted datasets\ncountParentsWithoutMatch(taxaAM)\ncountParentsWithoutMatch(taxaAL)\ncountParentsWithoutMatch(taxaAMb)\ncountParentsWithoutMatch(taxaALb)\n\n## No test: \n \n\n#can construct the parentChild2taxonTree\n\t#using the ancestor-descendant relationships \n\n#can be very slow...\n\ntreeAM <- parentChild2taxonTree(foramAM[,2:1])\ntreeAL <- parentChild2taxonTree(foramAL[,2:1])\ntreeAMb <- parentChild2taxonTree(foramAMb[,2:1])\ntreeALb <- parentChild2taxonTree(foramALb[,2:1])\n\nlayout(matrix(1:4,2,2))\nplot(treeAM,main = 'treeAM',show.tip.label = FALSE)\nplot(treeAL,main = 'treeAL',show.tip.label = FALSE)\nplot(treeAMb,main = 'treeAMb',show.tip.label = FALSE)\nplot(treeALb,main = 'treeALb',show.tip.label = FALSE)\n\n# FYI \n# in case you were wondering\n# you would *not* time-scale these Frankenstein monsters\n\n## End(No test)\n\n###########################################\n\n# Checking stratigraphic ranges\n\n# do all first occurrence dates occur before last occurrence dates?\n\t# we'll check the original datasets here\n\ncheckFoLo <- function(data){\n\tdiffDate <- data[,3]-data[,4]\t#subtract LO from FO\n\tisGood <- all(diffDate >= 0)\t#is it good\n\treturn(isGood)\n\t}\n\ncheckFoLo(foramAM)\ncheckFoLo(foramAL)\ncheckFoLo(foramAMb)\ncheckFoLo(foramALb)\n\n#cool, but do all ancestors appear before their descendants?\n\t# easier to check unified fossilRecord2fossilTaxa format here\n\ncheckAncOrder <- function(taxa){\n\t#get ancestor's first occurrence\n\tancFO <- taxa[taxa[,2],3]\n\t#get descendant's first occurrence\t\n\tdescFO <- taxa[,3]\n\tdiffDate <- ancFO-descFO\t#subtract descFO from ancFO\n\t#remove NAs due to root taxon\n\tdiffDate <- diffDate[!is.na(diffDate)]\n\tisGood <- all(diffDate >= 0)\t#is it all good\t\n\treturn(isGood)\n\t}\n\ncheckAncOrder(taxaAM)\ncheckAncOrder(taxaAL)\ncheckAncOrder(taxaAMb)\ncheckAncOrder(taxaALb)\n\n#now, are there gaps between the last occurrence of ancestors\n\t# and the first occurrence of descendants?\n\t# (shall we call these 'stratophenetic ghost branches'?!)\n\t# These shouldn't be problematic, but do they occur in this data?\n# After all, fossilRecord2fossilTaxa output tables are designed for\n\t # fully observed simulated fossil records with no gaps.\n\nsumAncDescGap <- function(taxa){\n\t#get ancestor's last occurrence\n\tancLO <- taxa[taxa[,2],4]\n\t#get descendant's first occurrence\t\n\tdescFO <- taxa[,3]\n\tdiffDate <- ancLO-descFO\t#subtract descFO from ancFO\n\t#remove NAs due to root taxon\n\tdiffDate <- diffDate[!is.na(diffDate)]\n\t#should be negative or zero, positive values are gaps\n\tgaps <- c(0,diffDate[diffDate>0])\n\tsumGap <- sum(gaps)\n\treturn(sumGap)\n\t}\n\n#get the total gap between ancestor LO and child FO\nsumAncDescGap(taxaAM)\nsumAncDescGap(taxaAL)\nsumAncDescGap(taxaAMb)\nsumAncDescGap(taxaALb)\n\n#It appears there is *no* gaps between ancestors and their descendants\n\t#in the Aze et al. foram dataset... wow!\n\n###############\n\n## No test: \n \n\n# Creating time-scaled phylogenies from the Aze et al. data\n\n# Aze et al. (2011) defines anagenesis such that taxa may overlap\n# in time during a transitional period (see Ezard et al. 2012\n# for discussion of this definition). Thus, we would expect that\n# paleotree obtains very different trees for morphospecies versus\n# lineages, but very similar phylogenies for datasets where budding\n# taxa are retained or arbitrarily broken into bifurcating units.\n\n# We can use the function taxa2phylo to directly create\n# time-scaled phylogenies from the Aze et al. stratophenetic data\n\ntimetreeAM <- taxa2phylo(taxaAM)\ntimetreeAL <- taxa2phylo(taxaAL)\ntimetreeAMb <- taxa2phylo(taxaAMb)\ntimetreeALb <- taxa2phylo(taxaALb)\n\nlayout(matrix(1:4,2,2))\nplot(timetreeAM,main = 'timetreeAM',show.tip.label = FALSE)\naxisPhylo()\nplot(timetreeAL,main = 'timetreeAL',show.tip.label = FALSE)\naxisPhylo()\nplot(timetreeAMb,main = 'timetreeAMb',show.tip.label = FALSE)\naxisPhylo()\nplot(timetreeALb,main = 'timetreeALb',show.tip.label = FALSE)\naxisPhylo()\n\n#visually compare the two pairs we expect to be close to identical\n\n#morpospecies\nlayout(1:2)\nplot(timetreeAM,main = 'timetreeAM',show.tip.label = FALSE)\naxisPhylo()\nplot(timetreeAMb,main = 'timetreeAMb',show.tip.label = FALSE)\naxisPhylo()\n\n#lineages\nlayout(1:2)\nplot(timetreeAL,main = 'timetreeAL',show.tip.label = FALSE)\naxisPhylo()\nplot(timetreeALb,main = 'timetreeALb',show.tip.label = FALSE)\naxisPhylo()\n\nlayout(1)\n\n#compare the summary statistics of the trees\nNtip(timetreeAM)\nNtip(timetreeAMb)\nNtip(timetreeAL)\nNtip(timetreeALb)\n# very different!\n\n# after dropping anagenetic zero-length-terminal-edge ancestors\n# we would expect morphospecies and lineage phylogenies to be very similar\n\n#morphospecies\nNtip(dropZLB(timetreeAM))\nNtip(dropZLB(timetreeAMb))\n#identical!\n\n#lineages\nNtip(dropZLB(timetreeAL))\nNtip(dropZLB(timetreeALb))\n# ah, very close, off by a single tip\n# ...probably a very short ZLB outside tolerance\n\n#we can create some diversity plots to compare\n\nmultiDiv(data = list(timetreeAM,timetreeAMb),\n\tplotMultCurves = TRUE)\n\nmultiDiv(data = list(timetreeAL,timetreeALb),\n\tplotMultCurves = TRUE)\n\n# we can see that the morphospecies datasets are identical\n\t# that's why we can only see one line\n# some very slight disagreement between the lineage datasets\n\t# around ~30-20 Ma\n\n#can also compare morphospecies and lineages diversity curves\n\nmultiDiv(data = list(timetreeAM,timetreeAL),\n\tplotMultCurves = TRUE)\n\n#they are similar, but some peaks are missing from lineages\n\t# particularly around ~20-10 Ma\n\n\n## End(No test)\n\n\n\n\n\n"} {"package":"paleotree","topic":"makePBDBtaxonTree","snippet":"### Name: makePBDBtaxonTree\n### Title: Creating a Taxon-Tree from Taxonomic Data Downloaded from the\n### Paleobiology Database\n### Aliases: makePBDBtaxonTree plotTaxaTreePBDB\n\n### ** Examples\n\n# Note that most examples here use argument \n # failIfNoInternet = FALSE so that functions do\n # not error out but simply return NULL if internet\n # connection is not available, and thus\n # fail gracefully rather than error out (required by CRAN).\n# Remove this argument or set to TRUE so functions DO fail\n # when internet resources (paleobiodb) is not available.\n\nset.seed(1)\n\n## No test: \n\n#get some example occurrence and taxonomic data\ndata(graptPBDB)\n\n#get the taxon tree: Linnean method\ngraptTreeLinnean <- makePBDBtaxonTree(\n taxaDataPBDB = graptTaxaPBDB,\n rankTaxon = \"genus\",\n method = \"Linnean\", \n failIfNoInternet = FALSE)\n\n#get the taxon tree: parentChild method\ngraptTreeParentChild <- makePBDBtaxonTree(\n taxaDataPBDB = graptTaxaPBDB,\n rankTaxon = \"genus\",\n method = \"parentChild\", \n failIfNoInternet = FALSE)\n \nif(!is.null(graptTreeParentChild) & \n !is.null(graptTreeLinnean)){\n # if those functions worked...\n # let's plot these and compare them! \n plotTaxaTreePBDB(graptTreeParentChild)\n plotTaxaTreePBDB(graptTreeLinnean)\n }\n\n\n# pause 3 seconds so we don't spam the API\nSys.sleep(3)\n\n####################################################\n# let's try some other groups\n\n###################################\n#conodonts\n\nconoData <- getCladeTaxaPBDB(\"Conodonta\", \n failIfNoInternet = FALSE)\n\nif(!is.null(conoData)){ \n \nconoTree <- makePBDBtaxonTree(\n taxaDataPBDB = conoData,\n rankTaxon = \"genus\",\n method = \"parentChild\")\n\n# if it worked, plot it!\nplotTaxaTreePBDB(conoTree)\n\n}\n\n# pause 3 seconds so we don't spam the API\nSys.sleep(3)\n\n#############################\n#asaphid trilobites\n\nasaData <- getCladeTaxaPBDB(\"Asaphida\", \n failIfNoInternet = FALSE)\n \nif(!is.null(asaData)){\n\nasaTree <- makePBDBtaxonTree(\n taxaDataPBDB = asaData,\n rankTaxon = \"genus\",\n method = \"parentChild\")\n\n# if it worked, plot it!\nplotTaxaTreePBDB(asaTree)\n\n}\n\n# pause 3 seconds so we don't spam the API\nSys.sleep(3)\n\n###############################\n#Ornithischia\n\nornithData <- getCladeTaxaPBDB(\"Ornithischia\", \n failIfNoInternet = FALSE)\n\nif(!is.null(ornithData)){\n\nornithTree <- makePBDBtaxonTree(\n taxaDataPBDB = ornithData,\n rankTaxon = \"genus\",\n method = \"parentChild\")\n\n# if it worked, plot it!\nplotTaxaTreePBDB(ornithTree)\n\n# pause 3 seconds so we don't spam the API\nSys.sleep(3)\n\n#try Linnean!\n\n#but first... need to drop repeated taxon first: Hylaeosaurus\n # actually this taxon seems to have been repaired \n # as of September 2019 !\n# findHylaeo <- ornithData$taxon_name == \"Hylaeosaurus\"\n# there's actually only one accepted ID number\n# HylaeoIDnum <- unique(ornithData[findHylaeo,\"taxon_no\"])\n# HylaeoIDnum \n# so, take which one has occurrences listed\n# dropThis <- which((ornithData$n_occs < 1) & findHylaeo)\n# ornithDataCleaned <- ornithData[-dropThis,]\n\nornithTree <- makePBDBtaxonTree(\n ornithData,\n rankTaxon = \"genus\",\n method = \"Linnean\", \n failIfNoInternet = FALSE)\n\n# if it worked, plot it!\nplotTaxaTreePBDB(ornithTree)\n\n}\n\n# pause 3 seconds so we don't spam the API\nSys.sleep(3)\n\n#########################\n# Rhynchonellida\n\nrhynchData <- getCladeTaxaPBDB(\"Rhynchonellida\", \n failIfNoInternet = FALSE)\n \nif(!is.null(rhynchData)){ \n\nrhynchTree <- makePBDBtaxonTree(\n taxaDataPBDB = rhynchData,\n rankTaxon = \"genus\",\n method = \"parentChild\")\n\n # if it worked, plot it!\n plotTaxaTreePBDB(rhynchTree)\n }\n\n#some of these look pretty messy!\n\n## End(No test)\n\n\n\n"} {"package":"paleotree","topic":"minBranchLength","snippet":"### Name: minBranchLength\n### Title: Scales Edge Lengths of a Phylogeny to a Minimum Branch Length\n### Aliases: minBranchLength minBranchLen minimumBranchLen\n### minimumBranchLength\n\n### ** Examples\n\n\n#simulation with an example non-ultrametric tree\n\ntree <- rtree(20)\n# randomly replace edges with ZLBs\n # similar to multi2di output\ntree <- degradeTree(tree,0.3,\n leave.zlb = TRUE) \t\n\ntree2 <- minBranchLength(tree,0.1)\n\nlayout(1:2)\n\nplot(tree)\naxisPhylo()\nplot(tree2)\naxisPhylo()\n\nlayout(1)\n\n\n#now let's try it with an ultrametric case\n\n# get a random tree\ntree <- rtree(30)\n# randomly replace edges with ZLBs\n # similar to multi2di output\ntree <- degradeTree(tree,0.5,leave.zlb = TRUE) \n# now randomly resolve\t\ntree <- di2multi(tree)\n# give branch lengths so its ultrametric\ntree <- compute.brlen(tree)\n\n# and we have an ultrametric tree with polytomies, yay!\nplot(tree) \n\n# now randomly resolve\ntree2 <- multi2di(tree)\n# get new branch lengths as would with real data\ntree2 <- minBranchLength(tree2,0.1)\n\nlayout(1:2)\nplot(tree,show.tip.label = FALSE)\naxisPhylo()\nplot(tree2,show.tip.label = FALSE)\naxisPhylo()\n\nlayout(1)\n\n# check that root ages aren't being left unmodified\n # create a tree with lots of ZBLs at the root\nx <- stree(10)\nx$edge.length <- runif(Nedge(x))\nx <- multi2di(x)\n# give it a root age\nx$root.time <- max(node.depth.edgelength(x))\n\nz <- minBranchLength(tree = x, mbl = 1)\nplot(z)\n\n\n\n"} {"package":"paleotree","topic":"minCharChange","snippet":"### Name: minCharChange\n### Title: Estimating the Minimum Number of Character Transitions Using\n### Maximum Parsimony\n### Aliases: minCharChange ancPropStateMat\n\n### ** Examples\n\n## No test: \n# let's write a quick & dirty ancestral trait plotting function\n\nquickAncPlotter <- function(tree, ancData, cex){\n ancCol <- (1:ncol(ancData))+1\n plot(tree,\n show.tip.label = FALSE,\n no.margin = TRUE, \n direction = \"upwards\")\n tiplabels(pch = 16,\n pie = ancData[(1:Ntip(tree)),],\n cex = cex,\n piecol = ancCol,\n col = 0)\n nodelabels(pie = ancData[-(1:Ntip(tree)),],\n cex = cex,\n piecol = ancCol)\t\n }\n\n# example with retiolitid graptolite data\n\ndata(retiolitinae)\n\n#unordered, MPR\nancMPR <- ancPropStateMat(retioTree, \n trait = retioChar[,2], \n type = \"MPR\")\nquickAncPlotter(retioTree,\n ancMPR, cex = 0.5)\ntext(x = 4,y = 5,\n \"type = 'MPR'\", cex = 1.5)\n\nminCharChange(retioTree,\n trait = retioChar[,2],\n type = \"MPR\")\n\n# with simulated data\n\nset.seed(444)\ntree <- rtree(50)\n#simulate under a likelihood model\nchar <- rTraitDisc(tree, \n k = 3, rate = 0.7)\ntree$edge.length <- NULL\ntree <- ladderize(tree)\n\n#unordered, MPR\nancMPR <- ancPropStateMat(tree, \n trait = char, \n type = \"MPR\")\n#unordered, ACCTRAN\nancACCTRAN <- ancPropStateMat(tree, \n trait = char, \n type = \"ACCTRAN\")\n#ordered, MPR\nancMPRord <- ancPropStateMat(tree, \n trait = char, \n orderedChar = TRUE, \n type = \"MPR\")\n\n#let's compare MPR versus ACCTRAN results\nlayout(1:2)\nquickAncPlotter(tree,\n ancMPR, cex = 0.3)\ntext(x = 8, y = 15,\n \"type = 'MPR'\", cex = 1.5)\nquickAncPlotter(tree,\n ancACCTRAN, cex = 0.3)\ntext(x = 9, y = 15,\n \"type = 'ACCTRAN'\",cex = 1.5)\n \n# MPR has much more uncertainty in node estimates\n\t # but that doesn't mean ACCTRAN is preferable\n\n#let's compare unordered versus ordered under MPR\nlayout(1:2)\nquickAncPlotter(tree,\n ancMPR, cex = 0.3)\ntext(x = 8, y = 15,\n \"unordered char\\nMPR\", cex = 1.5)\nquickAncPlotter(tree,\n ancMPRord,cex = 0.3)\ntext(x = 9, y = 15,\n \"ordered char\\nMPR\", cex = 1.5)\nlayout(1)\n\n\n## Not run: \n##D # what ancPropStateMat automates (with lots of checks):\n##D \n##D require(phangorn)\n##D char1 <- matrix(char,,1)\n##D rownames(char1) <- names(char)\n##D #translate into something for phangorn to read\n##D char1 <- phangorn::phyDat(char1,\n##D type = \"USER\",\n##D levels = sort(unique(char1))\n##D )\n##D x <- phangorn::ancestral.pars(tree,\n##D char1,type = \"MPR\")\n##D y <- phangorn::ancestral.pars(tree,\n##D char1,type = \"ACCTRAN\")\n## End(Not run)\n\n#estimating minimum number of transitions with MPR \nminCharChange(tree,\n trait = char,\n type = \"MPR\")\n\n # and now with ACCTRAN\n minCharChange(tree,\n trait = char,\n type = \"ACCTRAN\")\n\n#POLYMORPHISM IN CHARACTER DATA\n\n\n# example trait data with a polymorphic taxon\n # separated with '&' symbol\n# similar to polymorphic data output by ReadMorphNexus from package Claddis\ncharPoly <- as.character(\n c(1,2,NA,0,0,1,\"1&2\",\n 2,0,NA,0,2,1,1,\"1&2\")\n )\n#simulate a tree with 16 taxa\nset.seed(444)\ntree <- rtree(15)\ntree$edge.length <- NULL\ntree <- ladderize(tree)\nnames(charPoly) <- tree$tip.label\ncharPoly\n\n# need a contrast matrix that takes this into account\n #can build row by row, by hand\n\n#first, build contrast matrix for basic states\ncontrast012 <- rbind(c(1,0,0),\n c(0,1,0),\n c(0,0,1))\ncolnames(contrast012) <- rownames(contrast012) <- 0:2\ncontrast012\n\n#add polymorphic state and NA ambiguity as new rows\ncontrastPoly <- c(0,1,1)\ncontrastNA <- c(1,1,1)\ncontrastNew <- rbind(contrast012,\n '1&2' = contrastPoly,\n contrastNA)\nrownames(contrastNew)[5] <- NA\n\n#let's look at contrast\ncontrastNew\n\n# now try this contrast table we've assembled\n # default: unordered, MPR\nancPoly <- ancPropStateMat(tree, \n trait = charPoly, \n contrast = contrastNew)\n\n# but...!\n# we can also do it automatically, \n # by default, states with '&' are automatically treated\n # as polymorphic character codings by ancPropStateMat\nancPolyAuto <- ancPropStateMat(tree,\n trait = charPoly, \n polySymbol = \"&\")\n\n# but does this match what the table we constructed?\nancPropStateMat(tree, \n trait = charPoly,\n\t\t polySymbol = \"&\", \n returnContrast = TRUE)\n\n# compare to contrastNew above!\n# only difference should be the default ambiguous\n\t# character '?' is added to the table\n\n#compare reconstructions\nlayout(1:2)\nquickAncPlotter(tree,\n ancPoly, cex = 0.5)\ntext(x = 3.5, y = 1.2,\n \"manually-constructed\\ncontrast\", cex = 1.3)\nquickAncPlotter(tree,\n ancPolyAuto, cex = 0.5)\ntext(x = 3.5, y = 1.2,\n \"auto-constructed\\ncontrast\", cex = 1.3)\nlayout(1)\n\n# look pretty similar!\n\n# i.e. the default polySymbol = \"&\", but could be a different symbol\n # such as \",\" or \"\\\"... it can only be *one* symbol, though\n\n# all of this machinery should function just fine in minCharChange\n\t\t# again, by default polySymbol = \"&\" (included anyway here for kicks)\nminCharChange(tree, \n trait = charPoly, \n polySymbol = \"&\")\n## End(No test)\n\n\n"} {"package":"paleotree","topic":"modelMethods","snippet":"### Name: modelMethods\n### Title: Model Function Methods: Parameter Names, Bounds and Initial\n### Values\n### Aliases: modelMethods parnames parbounds parbounds.constrained\n### parbounds.paleotreeFunc parInit parInit.constrained\n### parInit.paleotreeFunc parLower parLower.constrained\n### parLower.paleotreeFunc parnames.constrained parnames.paleotreeFunc\n### parUpper parUpper.constrained parUpper.paleotreeFunc parnames<-\n### parnames<-.constrained parnames<-.paleotreeFunc parbounds<-\n### parbounds<-.constrained parbounds<-.paleotreeFunc parLower<-\n### parLower<-.constrained parLower<-.paleotreeFunc parUpper<-\n### parUpper<-.constrained parUpper<-.paleotreeFunc\n\n### ** Examples\n\n#example with make_durationFreqCont\nset.seed(444)\nrecord <- simFossilRecord(p = 0.1, q = 0.1, nruns = 1,\n\tnTotalTaxa = c(30,40), nExtant = 0)\ntaxa <- fossilRecord2fossilTaxa(record)\nrangesCont <- sampleRanges(taxa,r = 0.5)\nlikFun <- make_durationFreqCont(rangesCont)\n\n#get parameter names\nparnames(likFun)\n\n#get the bounds for those parameters\nparbounds(likFun)\n\n#can also get these seperately\nparLower(likFun)\nparUpper(likFun)\n\n#initial parameter values\nparInit(likFun) #arbitrary midway value between par bounds\n\n#can then use these in optimizers, such as optim with L-BFGS-B\n#see the example for make_durationFreqCont\n\n#renaming parameter names\nlikFun2 <- likFun\nparnames(likFun2) <- c(\"extRate\",\"sampRate\")\nparnames(likFun2)\n#test if reset correctly\nparnames(likFun2) == c(\"extRate\",\"sampRate\")\n#also works for constrained functions\nconstrainFun <- constrainParPaleo(likFun,q.1~r.1)\nparnames(constrainFun)\n#also modified the parameter bounds, see!\nparbounds(constrainFun)\nparInit(constrainFun)\n#but cannot rename parameter for constrained function!\n\n\n\n"} {"package":"paleotree","topic":"modifyTerminalBranches","snippet":"### Name: modifyTerminalBranches\n### Title: Modify, Drop or Bind Terminal Branches of Various Types (Mainly\n### for Paleontological Phylogenies)\n### Aliases: modifyTerminalBranches dropZLB dropExtinct dropExtant\n### addTermBranchLength dropPaleoTip bindPaleoTip\n\n### ** Examples\n\n\nset.seed(444)\n# Simulate some fossil ranges with simFossilRecord\nrecord <- simFossilRecord(\n p = 0.1, q = 0.1, \n nruns = 1, \n nTotalTaxa = c(30,40), \n nExtant = 0\n )\ntaxa <- fossilRecord2fossilTaxa(record)\n# simulate a fossil record \n # with imperfect sampling with sampleRanges\nrangesCont <- sampleRanges(taxa,r = 0.5)\n# Now let's make a tree using taxa2phylo\ntree <- taxa2phylo(taxa,obs_time = rangesCont[,2])\n# compare the two trees\nlayout(1:2)\nplot(ladderize(tree))\nplot(ladderize(dropZLB(tree)))\n\n# reset\nlayout(1)\n\n\n# example using dropExtinct and dropExtant\nset.seed(444)\nrecord <- simFossilRecord(\n p = 0.1, q = 0.1, \n nruns = 1, \n nTotalTaxa = c(30,40), \n nExtant = c(10,20)\n )\ntaxa <- fossilRecord2fossilTaxa(record)\ntree <- taxa2phylo(taxa)\nphyloDiv(tree)\ntree1 <- dropExtinct(tree)\nphyloDiv(tree1)\ntree2 <- dropExtant(tree)\nphyloDiv(tree2)\n\n\n# example using addTermBranchLength\nset.seed(444)\ntreeA <- rtree(10)\ntreeB <- addTermBranchLength(treeA,1)\ncompareTermBranches(treeA,treeB)\n\n#########################\n# test dropPaleoTip\n\t# (and fixRootTime by extension...)\n\n# simple example\ntree <- read.tree(text = \"(A:3,(B:2,(C:5,D:3):2):3);\")\ntree$root.time <- 10\nplot(tree, no.margin = FALSE)\naxisPhylo()\n\n# now a series of tests, dropping various tips\n(test <- dropPaleoTip(tree,\"A\")$root.time) # = 7\n(test[2] <- dropPaleoTip(tree,\"B\")$root.time) # = 10\n(test[3] <- dropPaleoTip(tree,\"C\")$root.time) # = 10\n(test[4] <- dropPaleoTip(tree,\"D\")$root.time) # = 10\n(test[5] <- dropPaleoTip(tree,c(\"A\",\"B\"))$root.time) # = 5\n(test[6] <- dropPaleoTip(tree,c(\"B\",\"C\"))$root.time) # = 10\n(test[7] <- dropPaleoTip(tree,c(\"A\",\"C\"))$root.time) # = 7\n(test[8] <- dropPaleoTip(tree,c(\"A\",\"D\"))$root.time) # = 7\n\n# is it all good? if not, fail so paleotree fails...\nif(!identical(test,c(7,10,10,10,5,10,7,7))){\n stop(\"fixRootTime fails!\")\n }\n\n\n##############\n# testing bindPaleoTip\n\n# simple example \ntree <- read.tree(text = \"(A:3,(B:2,(C:5,D:3):2):3);\")\ntree$root.time <- 20\nplot(tree, no.margin = FALSE)\naxisPhylo()\n\n## Not run: \n##D \n##D require(phytools)\n##D \n##D # bindPaleoTip effectively wraps bind.tip from phytools\n##D # using a conversion like below\n##D \n##D tipAge <- 5\n##D node <- 6\n##D \n##D # the new tree length (tip to root depth) should be:\n##D # new length = the root time - tipAge - nodeheight(tree,node)\n##D \n##D newLength <- tree$root.time-tipAge-nodeheight(tree,node)\n##D tree1 <- bind.tip(tree,\n##D \"tip.label\",\n##D where = node,\\\n##D edge.length = newLength)\n##D \n##D layout(1:2)\n##D plot(tree)\n##D axisPhylo()\n##D plot(tree1)\n##D axisPhylo()\n##D \n##D # reset\n##D layout(1)\n##D \n## End(Not run)\n\n# now with bindPaleoTip\n\ntree1 <- bindPaleoTip(tree,\"new\",nodeAttach = 6,tipAge = 5)\n\nlayout(1:2)\nplot(tree)\naxisPhylo()\nplot(tree1)\naxisPhylo()\n\n# reset\nlayout(1)\n\n#then the tip age of \"new\" should 5\ntest <- dateNodes(tree1)[which(tree1$tip.label == \"new\")] == 5\nif(!test){\n stop(\"bindPaleoTip fails!\")\n }\n\n# with positionBelow\n\ntree1 <- bindPaleoTip(\n tree,\n \"new\",\n nodeAttach = 6,\n tipAge = 5,\n positionBelow = 1\n )\n\nlayout(1:2)\nplot(tree)\naxisPhylo()\nplot(tree1)\naxisPhylo()\n\n# reset\nlayout(1)\n\n# at the root\n\ntree1 <- bindPaleoTip(\n tree,\n \"new\", \n nodeAttach = 5,\n tipAge = 5)\n\nlayout(1:2)\nplot(tree)\naxisPhylo()\nplot(tree1)\naxisPhylo()\n\n# reset\nlayout(1)\n\n#then the tip age of \"new\" should 5\ntest <- dateNodes(tree1)[which(tree1$tip.label == \"new\")] == 5\nif(!test){\n stop(\"bindPaleoTip fails!\")\n }\n\n# at the root with positionBelow\n\ntree1 <- bindPaleoTip(tree,\"new\",nodeAttach = 5,tipAge = 5,\n\tpositionBelow = 3)\n\nlayout(1:2)\nplot(tree)\naxisPhylo()\nplot(tree1)\naxisPhylo()\n\n# reset\nlayout(1)\n\n#then the tip age of \"new\" should 5\ntest <- dateNodes(tree1)[which(tree1$tip.label == \"new\")] == 5\n#and the root age should be 23\ntest1 <- tree1$root.time == 23\nif(!test | !test1){\n stop(\"bindPaleoTip fails!\")\n }\n\n\n\n"} {"package":"paleotree","topic":"multiDiv","snippet":"### Name: multiDiv\n### Title: Calculating Diversity Curves Across Multiple Datasets\n### Aliases: multiDiv plotMultiDiv\n\n### ** Examples\n\n# let's look at this function\n # with some birth-death simulations\n\nset.seed(444)\n\n# multiDiv can take output from simFossilRecord\n # via fossilRecord2fossilTaxa\n\n# what do many simulations run under some set of\n # conditions 'look' like on average?\nset.seed(444)\nrecords <- simFossilRecord(\n p = 0.1, \n q = 0.1, \n nruns = 10,\n totalTime = 30, \n plot = TRUE\n )\n\ntaxa <- lapply(records, fossilRecord2fossilTaxa)\n\nmultiDiv(taxa)\n# increasing cone of diversity! \n\n# Its even better on a log scale:\nmultiDiv(taxa, plotLogRich = TRUE)\n\n#######################################\n# pure-birth example with simFossilRecord\n# note that conditioning is tricky\n\nset.seed(444)\nrecordsPB <- simFossilRecord(\n p = 0.1, \n q = 0, \n nruns = 10,\n totalTime = 30,\n plot = TRUE\n )\n \ntaxaPB <- lapply(recordsPB, fossilRecord2fossilTaxa)\nmultiDiv(taxaPB, plotLogRich = TRUE)\n\n#compare many discrete diversity curves\ndiscreteRanges <- lapply(taxaPB, function(x)\n binTimeData(\n sampleRanges(x, \n r = 0.5,\n min.taxa = 1\n ),\n int.length = 7)\n )\n\nmultiDiv(discreteRanges)\n\n#########################################\n# plotting a multi-diversity curve for\n # a sample of stochastic dated trees\n\nrecord <- simFossilRecord(\n p = 0.1, q = 0.1, \n nruns = 1,\n nTotalTaxa = c(30,40), \n nExtant = 0)\n \ntaxa <- fossilRecord2fossilTaxa(record)\nrangesCont <- sampleRanges(taxa, r = 0.5)\nrangesDisc <- binTimeData(rangesCont,\n int.length = 1)\n# get the cladogram\ncladogram <- taxa2cladogram(taxa, plot = TRUE)\n\n#using multiDiv with samples of trees\nttrees <- timePaleoPhy(\n cladogram, \n rangesCont, \n type = \"basic\",\n randres = TRUE, \n ntrees = 10, \n add.term = TRUE\n )\n \nmultiDiv(ttrees)\n\n# uncertainty in diversity history is solely due to \n # the random resolution of polytomies\n\n######################################################### \n\n#using multiDiv to compare very different data types:\n # continuous ranges, discrete ranges, dated tree\n\n# get a single dated tree\nttree <- timePaleoPhy(\n cladogram, \n rangesCont, \n type = \"basic\", \n add.term = TRUE, \n plot = FALSE\n )\n \n# put them altogether in a list\ninput <- list(rangesCont, rangesDisc, ttree)\n\nmultiDiv(input, plot = TRUE)\n\n# what happens if we use fixed interval times?\nmultiDiv(input, \n int.times = rangesDisc[[1]], \n plot = TRUE)\n\nlayout(1)\n\n\n\n"} {"package":"paleotree","topic":"nearestNeighborDist","snippet":"### Name: nearestNeighborDist\n### Title: Nearest Neighbor Distances for Morphological Disparity Studies\n### Aliases: nearestNeighborDist\n\n### ** Examples\n\n#example using graptolite disparity data from Bapst et al. 2012\n\n#load data\ndata(graptDisparity)\n\n#calculate mean NND\nNND <- nearestNeighborDist(graptDistMat)\nmean(NND)\n\n#calculate NND for different groups\n\n#group (clade/paraclade) coding\ngroupID <- graptCharMatrix[,54]+1\n\ngroupNND <- numeric(7)\nnames(groupNND) <- c(\"Normalo.\",\"Monogr.\",\"Climaco.\",\n \"Dicrano.\",\"Lasiogr.\",\"Diplogr.\",\"Retiol.\")\nfor(i in unique(groupID)){\n groupNND[i] <- mean(nearestNeighborDist(\n graptDistMat[groupID == i,groupID == i]))\n }\ngroupNND\n\n#the paraphyletic Normalograptids that survived the HME are most clustered\n #but this looks at all the species at once\n #and doesn't look for the nearest *co-extant* neighbor!\n #need to bring in temporal info to test that\n\n\n\n"} {"package":"paleotree","topic":"nodeDates2branchLengths","snippet":"### Name: nodeDates2branchLengths\n### Title: Obtaining Edge Lengths for Undated Phylogenies Using Known\n### Branching Node and Tip Ages\n### Aliases: nodeDates2branchLengths\n\n### ** Examples\n\nset.seed(444)\n\n# we'll do a number of tests, let's check at the end that all are TRUE\ntests <- logical()\n\n# with a non-ultrametric tree\nchrono <- rtree(10)\n# make an undated tree\nnotChrono <- chrono\nnotChrono$edge.length <- NULL\n\n# now lets try with dateNodes in paleotree\nnodeTimes <- dateNodes(chrono)\n# need to use allTipsModern = FALSE because tip ages are included\nchronoRedux <- nodeDates2branchLengths(tree = notChrono,\n nodeDates = nodeTimes, allTipsModern = FALSE)\n# test that its the same\n(tests <- c(tests,all.equal.numeric(chrono$edge.length,chronoRedux$edge.length)))\n\n######################################\n# modern ultrametric tree\nchrono <- rcoal(10)\n# make an undated tree\nnotChrono <- chrono\nnotChrono$edge.length <- NULL\n\n# with ultrametric trees, you could just use ape's compute.brtime \n\n# getting branching times with ape\nbranchingTimes <- branching.times(chrono)\t\n# setting those branching times with ape\nchronoRedux <- compute.brtime(notChrono, branchingTimes)\n# test that its the same\n(tests <- c(tests,all.equal.numeric(chrono$edge.length,chronoRedux$edge.length)))\n\n# lets do the same thing but with nodeDates2branchLengths\n\n# can use branching.times from ape \n\t# (but only for ultrametric trees!)\nchronoRedux <- nodeDates2branchLengths(tree = notChrono,\n nodeDates = branchingTimes, allTipsModern = TRUE)\n# test that its the same\n(tests <- c(tests,all.equal.numeric(chrono$edge.length,chronoRedux$edge.length)))\n\n# now lets try with dateNodes in paleotree\nnodeTimes <- dateNodes(chrono)\n# need to use allTipsModern = FALSE because tip ages are included\nchronoRedux <- nodeDates2branchLengths(tree = notChrono,\n nodeDates = nodeTimes, allTipsModern = FALSE)\n# test that its the same\n(tests <- c(tests,all.equal.numeric(chrono$edge.length,chronoRedux$edge.length)))\n\n# get just the node times (remove tip dates)\nnodeOnlyTimes <- nodeTimes[-(1:Ntip(chrono))]\n# let's use the allTipsModern = TRUE setting\nchronoRedux <- nodeDates2branchLengths(tree = notChrono,\n nodeDates = nodeOnlyTimes, allTipsModern = TRUE)\n# test that its the same\n(tests <- c(tests,all.equal.numeric(chrono$edge.length,chronoRedux$edge.length)))\n\n# did all tests come out as TRUE?\nif(!all(tests)){stop(\"nodeDates2branchLengths isn't functioning correctly\")}\n\n\n\n"} {"package":"paleotree","topic":"obtainDatedPosteriorTreesMrB","snippet":"### Name: obtainDatedPosteriorTreesMrB\n### Title: Get the Sample of Posterior Trees from a Dated Phylogenetic\n### Analysis with MrBayes (Or a Summary Tree, such as the MCCT)\n### Aliases: obtainDatedPosteriorTreesMrB\n\n### ** Examples\n\n## Not run: \n##D \n##D MCCT <- obtainDatedPosteriorTreesMrB(\n##D \trunFile = \"C:\\\\myTipDatingAnalysis\\\\MrB_run_fossil_05-10-17.nex.run1.t\",\n##D \tnRuns = 2, \n##D \tburnin = 0.5,\n##D \toutputTrees = \"MCCT\", \n##D \tfile = NULL)\n##D \n##D MAP <- obtainDatedPosteriorTreesMrB(\n##D \trunFile = \"C:\\\\myTipDatingAnalysis\\\\MrB_run_fossil_05-10-17.nex.run1.t\",\n##D \tnRuns = 2, \n##D \tburnin = 0.5, \n##D \tgetFixedTimes = TRUE,\n##D \toutputTrees = \"MAPosteriori\", \n##D \tfile = NULL)\n##D \n##D # get a root age from the fixed ages for tips\n##D setRootAge(tree = MAP)\n##D \n##D #pull a hundred trees randomly from the posterior\n##D hundredRandomlySelectedTrees <- obtainDatedPosteriorTreesMrB(\n##D \trunFile = \"C:\\\\myTipDatingAnalysis\\\\MrB_run_fossil_05-10-17.nex.run1.t\",\n##D \tnRuns = 2, \n##D \tburnin = 0.5, \n##D \tgetFixedTimes = TRUE,\n##D \tgetRootAges = TRUE,\n##D \toutputTrees = 100, \n##D \tfile = NULL)\n##D \n##D \n## End(Not run)\n\n\n\n"} {"package":"paleotree","topic":"occData2timeList","snippet":"### Name: occData2timeList\n### Title: Converting Occurrences Data to a 'timeList' Data Object\n### Aliases: occData2timeList\n\n### ** Examples\n\ndata(graptPBDB)\n\ngraptOccSpecies <- taxonSortPBDBocc(\n data = graptOccPBDB,\n rank = \"species\",\n onlyFormal = FALSE)\ngraptTimeSpecies <- occData2timeList(occList = graptOccSpecies)\n\nhead(graptTimeSpecies[[1]])\nhead(graptTimeSpecies[[2]])\n\ngraptOccGenus <- taxonSortPBDBocc(\n data = graptOccPBDB,\n rank = \"genus\",\n onlyFormal = FALSE\n )\ngraptTimeGenus <- occData2timeList(occList = graptOccGenus)\n\nlayout(1:2)\ntaxicDivDisc(graptTimeSpecies)\ntaxicDivDisc(graptTimeGenus)\n\n# the default interval calculation is \"dateRange\"\n# let's compare to the other option, \"occRange\"\n # but now for graptolite *species*\n\ngraptOccRange <- occData2timeList(\n occList = graptOccSpecies, \n intervalType = \"occRange\"\n )\n\n#we would expect no change in the diversity curve\n #because there are only changes in th\n #earliest bound for the FAD\n #latest bound for the LAD\n#so if we are depicting ranges within maximal bounds\n #dateRanges has no effect\nlayout(1:2)\ntaxicDivDisc(graptTimeSpecies)\ntaxicDivDisc(graptOccRange)\n#yep, identical!\n\n#so how much uncertainty was gained by using dateRange?\n\n# write a function for getting uncertainty in first and last\n # appearance dates from a timeList object\nsumAgeUncert <- function(timeList){\n fourDate <- timeList2fourDate(timeList)\n perOcc <- (fourDate[,1] - fourDate[,2]) +\n (fourDate[,3] - fourDate[,4])\n sum(perOcc)\n }\n\n#total amount of uncertainty in occRange dataset\nsumAgeUncert(graptOccRange)\n#total amount of uncertainty in dateRange dataset\nsumAgeUncert(graptTimeSpecies)\n#the difference\nsumAgeUncert(graptOccRange) - sumAgeUncert(graptTimeSpecies)\n#as a proportion\n1 - (sumAgeUncert(graptTimeSpecies) / sumAgeUncert(graptOccRange))\n\n#a different way of doing it\ndateChange <- timeList2fourDate(graptTimeSpecies) - \n timeList2fourDate(graptOccRange)\napply(dateChange, 2, sum)\n#total amount of uncertainty removed by dateRange algorithm\nsum(abs(dateChange))\n\nlayout(1)\n\n\n\n"} {"package":"paleotree","topic":"optimPaleo","snippet":"### Name: optimPaleo\n### Title: Simplified Optimizer for 'paleotree' Likelihood Functions\n### Aliases: optimPaleo\n\n### ** Examples\n\n\n# This function simply replicates optim() as shown below\n # where modelFun is the likelihood function\n\n#optim(parInit(modelFun),modelFun,\n#\t\tlower = parLower(modelFun),upper = parUpper(modelFun), \n#\t\tmethod = \"L-BFGS-B\",control = list(maxit = 1000000))\n\n\n"} {"package":"paleotree","topic":"paleotree-package","snippet":"### Name: paleotree-package\n### Title: paleotree: Paleontological and Phylogenetic Analyses of\n### Evolution\n### Aliases: paleotree-package paleotree\n\n### ** Examples\n\n\n# get the package version of paleotree\npackageVersion(\"paleotree\")\n\n# get the citation for paleotree\ncitation(\"paleotree\")\n\n## Simulate some fossil ranges with simFossilRecord\nset.seed(444);\nrecord <- simFossilRecord(\n p = 0.1, q = 0.1, \n nruns = 1,\n nTotalTaxa = c(30,40), \n nExtant = 0\n )\ntaxa <- fossilRecord2fossilTaxa(record)\n\n# let's see what the 'true' diversity curve looks like in this case\n # plot the FADs and LADs with taxicDivCont()\ntaxicDivCont(taxa)\n\n# simulate a fossil record with imperfect sampling with sampleRanges\nrangesCont <- sampleRanges(taxa,r = 0.5)\n\n# plot the diversity curve based on the sampled ranges\nlayout(1:2)\ntaxicDivCont(rangesCont)\n\n# Now let's use binTimeData to bin in intervals of 10 time units\nrangesDisc <- binTimeData(rangesCont,int.length = 10)\n\n# plot with taxicDivDisc\ntaxicDivDisc(rangesDisc)\n\n#compare to the continuous time diversity curve above!\n\nlayout(1)\n\n# taxa2phylo assumes we know speciation events perfectly... what if we don't?\n\n# first, let's use taxa2cladogram to get the 'ideal' cladogram of the taxa\ncladogram <- taxa2cladogram(taxa,plot = TRUE)\n\n# Now let's try timePaleoPhy using the continuous range data\nttree <- timePaleoPhy(cladogram,rangesCont,type = \"basic\",plot = TRUE)\n\n# plot diversity curve\nphyloDiv(ttree,drop.ZLB = TRUE)\n\n# that tree lacked the terminal parts of ranges (tips stops at the taxon FADs)\n# let's add those terminal ranges back on with add.term\nttree <- timePaleoPhy(\n cladogram,\n rangesCont,\n type = \"basic\",\n add.term = TRUE,\n plot = TRUE\n )\n\n# plot diversity curve \nphyloDiv(ttree)\n\n\n\n"} {"package":"paleotree","topic":"parentChild2taxonTree","snippet":"### Name: parentChild2taxonTree\n### Title: Create a Taxonomy-Based Phylogeny ('Taxon Tree') from a Table of\n### Parent-Child Taxon Relationships\n### Aliases: parentChild2taxonTree\n\n### ** Examples\n\n\n#let's create a small, really cheesy example\npokexample <- rbind(\n cbind(\"Squirtadae\", c(\"Squirtle\",\"Blastoise\",\"Wartortle\")),\n c(\"Shelloidea\",\"Lapras\"), c(\"Shelloidea\",\"Squirtadae\"),\n c(\"Pokezooa\",\"Shelloidea\"), c(\"Pokezooa\",\"Parasect\"),\n c(\"Rodentapokemorpha\",\"Linoone\"), c(\"Rodentapokemorpha\",\"Sandshrew\"),\n c(\"Rodentapokemorpha\",\"Pikachu\"), c(\"Hirsutamona\",\"Ursaring\"),\n c(\"Hirsutamona\",\"Rodentapokemorpha\"), c(\"Pokezooa\",\"Hirsutamona\")\n )\n\n#Default: tipSet = 'nonParents'\npokeTree <- parentChild2taxonTree(\n parentChild = pokexample,\n tipSet = \"nonParents\")\nplot(pokeTree)\nnodelabels(pokeTree$node.label)\n\n#Get ALL taxa as tips with tipSet = 'all'\npokeTree <- parentChild2taxonTree(\n parentChild = pokexample,\n tipSet = \"all\")\nplot(pokeTree)\nnodelabels(pokeTree$node.label)\n\n\n## Not run: \n##D \n##D # let's try a dataset where not all the\n##D # taxon relationships lead to a common root\n##D \n##D pokexample_bad <- rbind(\n##D cbind(\"Squirtadae\", c(\"Squirtle\",\"Blastoise\",\"Wartortle\")),\n##D c(\"Shelloidea\",\"Lapras\"), c(\"Shelloidea\",\"Squirtadae\"),\n##D c(\"Pokezooa\",\"Shelloidea\"), c(\"Pokezooa\",\"Parasect\"),\n##D c(\"Rodentapokemorpha\",\"Linoone\"), c(\"Rodentapokemorpha\",\"Sandshrew\"),\n##D c(\"Rodentapokemorpha\",\"Pikachu\"), c(\"Hirsutamona\",\"Ursaring\"),\n##D c(\"Hirsutamona\",\"Rodentapokemorpha\"), c(\"Pokezooa\",\"Hirsutamona\"),\n##D c(\"Umbrarcheota\",\"Gengar\")\n##D )\n##D \n##D # this should return an error\n##D # as Gengar doesn't share common root\n##D pokeTree <- parentChild2taxonTree(parentChild = pokexample_bad)\n##D \n##D \n##D # another example, where a taxon is listed as both parent and child\n##D pokexample_bad2 <- rbind(\n##D cbind(\"Squirtadae\", c(\"Squirtle\",\"Blastoise\",\"Wartortle\")),\n##D c(\"Shelloidea\", c(\"Lapras\",\"Squirtadae\",\"Shelloidea\")),\n##D c(\"Pokezooa\",\"Shelloidea\"), c(\"Pokezooa\",\"Parasect\"),\n##D c(\"Rodentapokemorpha\",\"Linoone\"), c(\"Rodentapokemorpha\",\"Sandshrew\"),\n##D c(\"Rodentapokemorpha\",\"Pikachu\"), c(\"Hirsutamona\",\"Ursaring\"),\n##D c(\"Hirsutamona\",\"Rodentapokemorpha\"), c(\"Pokezooa\",\"Hirsutamona\"),\n##D c(\"Umbrarcheota\",\"Gengar\")\n##D )\n##D \n##D #this should return an error, as Shelloidea is its own parent\n##D pokeTree <- parentChild2taxonTree(parentChild = pokexample_bad2)\n##D \n## End(Not run)\n\n\n\n# note that we should even be able to do this\n # with ancestor-descendent pairs from\n # simulated datasets from simFossilRecord, like so:\nset.seed(444)\nrecord <- simFossilRecord(\n p = 0.1, q = 0.1, nruns = 1,\n nTotalTaxa = c(30, 40), \n nExtant = 0\n )\ntaxa <- fossilRecord2fossilTaxa(record)\n# need to reorder the columns so parents\n # (ancestors) first, then children \nparentChild2taxonTree(parentChild = taxa[,2:1])\n# now note that it issues a warning that\n # the input wasn't type character\n # and it will be coerced to be such\n\n\n\n"} {"package":"paleotree","topic":"perCapitaRates","snippet":"### Name: perCapitaRates\n### Title: Instantaneous _per-Capita_ Rates of Origination and Extinction\n### from the Fossil Record\n### Aliases: perCapitaRates\n\n### ** Examples\n\n\n## No test: \n\n#with the retiolinae dataset\ndata(retiolitinae)\nperCapitaRates(retioRanges)\n\n# Simulate some fossil ranges with simFossilRecord\nset.seed(444)\nrecord <- simFossilRecord(\n p = 0.1, q = 0.1, nruns = 1, nTotalTaxa = c(80,100), nExtant = 0)\ntaxa <- fossilRecord2fossilTaxa(record)\n\n#simulate a fossil record with imperfect sampling with sampleRanges()\nrangesCont <- sampleRanges(taxa,r = 0.5)\n\n#Now let's use binTimeData() to bin in intervals of 5 time units\nrangesDisc <- binTimeData(rangesCont,int.length = 5)\n\n#and get the per-capita rates\nperCapitaRates(rangesDisc)\n\n#on a log scale\nperCapitaRates(rangesDisc,logRates = TRUE)\n\n\n#get mean and median per-capita rates\nres <- perCapitaRates(rangesDisc,plot = FALSE)\n\napply(res[,c(\"pRate\",\"qRate\")],2,mean,na.rm = TRUE)\n\napply(res[,c(\"pRate\",\"qRate\")],2,median,na.rm = TRUE)\n\n##############################\n#with modern taxa\nset.seed(444)\n\nrecord <- simFossilRecord(\n p = 0.1, \n q = 0.1, \n nruns = 1,\n nExtant = c(10,50)\n )\n \ntaxa <- fossilRecord2fossilTaxa(record)\n\n#simulate a fossil record with imperfect sampling with sampleRanges()\nrangesCont <- sampleRanges(taxa,r = 0.5,,modern.samp.prob = 1)\n\n#Now let's use binTimeData() to bin in intervals of 5 time units\nrangesDisc <- binTimeData(rangesCont,int.length = 5)\n\n#and now get per-capita rates\nperCapitaRates(rangesDisc)\n\n## End(No test)\n\n\n"} {"package":"paleotree","topic":"perfectParsCharTree","snippet":"### Name: perfectParsCharTree\n### Title: Simulate a Set of Parsimony-Informative Characters for a\n### Phylogeny\n### Aliases: perfectParsCharTree\n\n### ** Examples\n\ndata(retiolitinae)\n\n#fewer characters than nodes\nperfectParsCharTree(retioTree,nchar = 10)\n\n#same as number of nodes (minus root)\nperfectParsCharTree(retioTree,nchar = 12)\n\n#more characters than the number of nodes\nperfectParsCharTree(retioTree,nchar = 20)\n\n\n"} {"package":"paleotree","topic":"plotOccData","snippet":"### Name: plotOccData\n### Title: Plotting Occurrence Data Across Taxa\n### Aliases: plotOccData\n\n### ** Examples\n\n#load example graptolite PBDB occ dataset\ndata(graptPBDB)\n\n#get formal genera\noccSpecies <- taxonSortPBDBocc(graptOccPBDB, rank = \"species\")\n\n#plot it!\nplotOccData(occSpecies)\n\n#this isn't too many occurrences, because there are so few\n #formal grapt species in the PBDB\n\n#genera is messier...\n\n#get formal genera\noccGenus <- taxonSortPBDBocc(graptOccPBDB, rank = \"genus\")\n\n#plot it!\nplotOccData(occGenus)\n\n#some of those genera have occurrences with very large\n #age uncertainties on them!\n\n\n\n"} {"package":"paleotree","topic":"plotPhyloPicTree","snippet":"### Name: plotPhyloPicTree\n### Title: Plot a Phylogeny with Organismal Silhouettes from PhyloPic,\n### Called Via the Paleobiology Database API\n### Aliases: plotPhyloPicTree\n\n### ** Examples\n\n# Note that some examples here use argument \n # failIfNoInternet = FALSE so that functions do\n # not error out but simply return NULL if internet\n # connection is not available, and thus\n # fail gracefully rather than error out (required by CRAN).\n# Remove this argument or set to TRUE so functions DO fail\n # when internet resources (paleobiodb) is not available.\n\n## No test: \n\nlibrary(paleotree)\n\ntaxaAnimals<-c(\"Archaeopteryx\", \"Eldredgeops\",\n \"Corvus\", \"Acropora\", \"Velociraptor\", \"Gorilla\", \n \"Olenellus\", \"Lingula\", \"Dunkleosteus\",\n \"Tyrannosaurus\", \"Triceratops\", \"Giraffa\",\n \"Megatheriidae\", \"Aedes\", \"Histiodella\",\n \"Rhynchotrema\", \"Pecten\", \"Homo\", \"Dimetrodon\",\n \"Nemagraptus\", \"Panthera\", \"Anomalocaris\")\n\nanimalData <-getSpecificTaxaPBDB(taxaAnimals, \n failIfNoInternet = FALSE)\n \nif(!is.null(animalData)){ \n \ntree <- makePBDBtaxonTree(\n animalData, \n rankTaxon = \"genus\", \n failIfNoInternet = FALSE\n ) \n\nplotPhyloPicTree(tree = tree, \n failIfNoInternet = FALSE)\n\n# let's plot upwards but at a funny size\ndev.new(height = 5, width = 10)\nplotPhyloPicTree(tree = tree,\n orientation = \"upwards\", \n failIfNoInternet = FALSE) \n\n# dated tree plotting\n\n#date the tree \ntimeTree <- dateTaxonTreePBDB(tree, minBranchLen = 10)\n\nplotPhyloPicTree(tree = timeTree)\n\n# plotting the dated tree with an axis\nplotPhyloPicTree(\n tree = timeTree,\n depthAxisPhylo = TRUE)\n\n# now upwards!\nplotPhyloPicTree(tree = timeTree,\n orientation = \"upwards\",\n depthAxisPhylo= TRUE)\n\n###################################\n\n# plotting a time tree with stratigraphic ranges\n\nplotPhyloPicTree(tree = timeTree,\n addTaxonStratDurations = TRUE)\n\nplotPhyloPicTree(tree = timeTree,\n addTaxonStratDurations = TRUE,\n orientation = \"upwards\",\n depthAxisPhylo= TRUE)\n\n################################################\n\n# adjusting a tree to ignore a very old root\n\n# let's pretend that metazoans are extremely old\ntreeOldRoot <- timeTree\nrootEdges <- timeTree$edge[,1] == (Ntip(timeTree)+1)\nrootEdgeLen <- timeTree$edge.length[rootEdges]\ntreeOldRoot$edge.length[rootEdges] <- rootEdgeLen + 1500\ntreeOldRoot$root.time <- NULL\n\n# plot it\nplot(treeOldRoot)\naxisPhylo()\n# yep, that's really old\n\n# let's plot it now with the PhyloPic\nplotPhyloPicTree(tree = treeOldRoot,\n depthAxisPhylo = TRUE)\n\n# let's crop that old lineage\nplotPhyloPicTree(tree = treeOldRoot,\n maxAgeDepth = 500,\n depthAxisPhylo = TRUE)\n# cool!\n\n##################################\n# playing with colors\nplotPhyloPicTree(tree = tree,\n taxaColor = \"green\")\n\n# inverting the colors\npar(bg=\"black\")\ntaxaColors <- rep(\"white\",Ntip(tree))\n# making a red giraffe\ntaxaColors[4] <- \"red\"\nplotPhyloPicTree(\n tree = tree, \n orientation = \"upwards\",\n edge.color = \"white\",\n taxaColor=taxaColors)\n\n\n} # end if to test if animalData was NULL\n## End(No test) # end donttest segment\n\n######################################\n## Not run: \n##D \n##D # let's try some different phylopics\n##D # like a nice tree of commonly known tetrapods\n##D \n##D tetrapodList<-c(\"Archaeopteryx\", \"Columba\", \"Ectopistes\",\n##D \"Corvus\", \"Velociraptor\", \"Baryonyx\", \"Bufo\",\n##D \"Rhamphorhynchus\", \"Quetzalcoatlus\", \"Natator\",\n##D \"Tyrannosaurus\", \"Triceratops\", \"Gavialis\",\n##D \"Brachiosaurus\", \"Pteranodon\", \"Crocodylus\",\n##D \"Alligator\", \"Giraffa\", \"Felis\", \"Ambystoma\",\n##D \"Homo\", \"Dimetrodon\", \"Coleonyx\", \"Equus\",\n##D \"Sphenodon\", \"Amblyrhynchus\")\n##D \n##D tetrapodData <-getSpecificTaxaPBDB(tetrapodList)\n##D \n##D tree <- makePBDBtaxonTree(tetrapodData, rankTaxon = \"genus\")\n##D \n##D plotPhyloPicTree(tree = tree)\n##D \n##D ####################################\n##D \n##D # let's check our speed increase from caching!\n##D # can try this on your own machine\n##D \n##D #first time\n##D system.time(plotPhyloPicTree(tree = tree))\n##D # second time\n##D system.time(plotPhyloPicTree(tree = tree))\n##D \n##D ##################################\n##D # make a pretty plot\n##D \n##D taxaSeventyEight <- c(\n##D \"Archaeopteryx\", \"Pinus\", \"Procoptodon\", \"Olenellus\", \"Eldredgeops\",\n##D \"Quetzalcoatlus\", \"Homo\", \"Tyrannosaurus\", \"Triceratops\", \"Giraffa\",\n##D \"Bolivina\", \"Cancer\", \"Dicellograptus\", \"Dunkleosteus\", \"Solanum\",\n##D \"Anomalocaris\", \"Climacograptus\", \"Halysites\", \"Cyrtograptus\", \n##D \"Procoptodon\", \"Megacerops\", \"Moropus\", \"Dimetrodon\", \"Lingula\",\n##D \"Rhynchosaurus\", \"Equus\", \"Megaloceros\", \"Rhynchotrema\", \"Pecten\",\n##D \"Echinaster\", \"Eocooksonia\", \"Neospirifer\", # \"Prototaxites\", \n##D \"Cincinnaticrinus\", \"Nemagraptus\", \"Monograptus\", \"Pongo\", \"Acropora\",\n##D \"Histiodella\", \"Agathiceras\", \"Juramaia\", \"Opabinia\", \"Arandaspis\",\n##D \"Corvus\", \"Plethodon\", \"Latimeria\", \"Phrynosoma\", \"Araucarioxylon\",\n##D \"Velociraptor\", \"Hylonomus\", \"Elginerpeton\", \"Rhyniognatha\",\n##D \"Tyto\", \"Dromaius\", \"Solenopsis\", \"Gorilla\", \"Ginkgo\", \"Terebratella\", \n##D \"Caretta\", \"Crocodylus\", \"Rosa\", \"Prunus\", \"Lycopodium\", \"Meganeura\",\n##D \"Diplodocus\", \"Brachiosaurus\", \"Hepaticae\", \"Canadaspis\", \"Pikaia\",\n##D \"Smilodon\", \"Mammuthus\", \"Exaeretodon\", \"Redondasaurus\", \"Dimetrodon\",\n##D \"Megatheriidae\", \"Metasequoia\", \"Aedes\", \"Panthera\", \"Megalonyx\")\n##D \n##D dataSeventyEight <-getSpecificTaxaPBDB(taxaSeventyEight)\n##D tree <- makePBDBtaxonTree(dataSeventyEight, rankTaxon = \"genus\") \n##D \n##D timeTree <- dateTaxonTreePBDB(tree,\n##D minBranchLen = 10)\n##D \n##D date <- format(Sys.time(), \"%m-%d-%y\")\n##D file <- paste0(\n##D \"tree_taxa78_phylopic_stratTree_\",\n##D date, \".pdf\")\n##D \n##D png(file = file,\n##D height = 5, width = 12, \n##D units = \"in\", res = 300)\n##D par(bg=\"black\")\n##D par(mar=c(0,0,3,0))\n##D taxaColors <- rep(\"white\", Ntip(timeTree))\n##D taxaColors[4] <- \"red\"\n##D \n##D plotPhyloPicTree(\n##D tree = timeTree, \n##D orientation = \"upwards\",\n##D addTaxonStratDurations = TRUE,\n##D edge.color = \"white\",\n##D maxAgeDepth = 700,\n##D taxaColor=taxaColors,\n##D depthAxisPhylo = TRUE,\n##D colorAxisPhylo = \"white\")\n##D dev.off()\n##D shell.exec(file)\n##D \n## End(Not run)\n\n\n\n"} {"package":"paleotree","topic":"plotTraitgram","snippet":"### Name: plotTraitgram\n### Title: Plot a Traitgram for Continuous Traits\n### Aliases: plotTraitgram\n\n### ** Examples\n\n\nset.seed(444)\ntree <- rtree(10)\ntrait <- rTraitCont(tree)\n\n#first, traitgram without conf intervals\nplotTraitgram(trait,tree,conf.int = FALSE)\n\n#now, with\nplotTraitgram(trait,tree)\n#not much confidence, eh?\n\n# plotting simulated data\n # with values for ancestral nodes as input\ntrait <- rTraitCont(tree, ancestor = TRUE)\nplotTraitgram(tree = tree,trait = trait)\n\n\n\n"} {"package":"paleotree","topic":"pqr2Ps","snippet":"### Name: pqr2Ps\n### Title: Joint Probability of A Clade Surviving Infinitely or Being\n### Sampled Once\n### Aliases: pqr2Ps\n\n### ** Examples\n\n#with exact solution\npqr2Ps(\n p = 0.1,\n q = 0.1,\n r = 0.1,\n useExact = TRUE\n )\n\n#with inexact solution\npqr2Ps(\n p = 0.1,\n q = 0.1,\n r = 0.1,\n useExact = TRUE\n )\n\n\n\n"} {"package":"paleotree","topic":"probAnc","snippet":"### Name: probAnc\n### Title: Probability of being a sampled ancestor of another sampled taxon\n### Aliases: probAnc\n\n### ** Examples\n\n# examples, run at very low nrep for sake of speed (examples need to be fast)\n\n# default options \n # probability of sampling a direct descendant\nprobAnc(p = 0.1, q = 0.1, R = 0.5, \n mode = \"budding\", \n analysis = \"directDesc\",\n nrep = 100)\n\n# other modes\nprobAnc(p = 0.1, q = 0.1, R = 0.5, \n mode = \"bifurcating\", \n analysis = \"directDesc\",\n nrep = 100)\nprobAnc(p = 0.1, q = 0.1, R = 0.5, \n mode = \"anagenesis\", \n analysis = \"directDesc\",\n nrep = 100)\n\n# probability of having sampled indirect descendants of a taxon\n\n# first, the default\nprobAnc(p = 0.1, q = 0.1, R = 0.5, \n mode = \"budding\", \n analysis = \"indirectDesc\",\n nrep = 100)\n \t\nprobAnc(p = 0.1, q = 0.1, R = 0.5, \n mode = \"bifurcating\", \n analysis = \"indirectDesc\", \n nrep = 100)\n \nprobAnc(p = 0.1, q = 0.1, R = 0.5, \n mode = \"anagenesis\", \n analysis = \"indirectDesc\",\n nrep = 100)\n\n\n\n"} {"package":"paleotree","topic":"resolveTreeChar","snippet":"### Name: resolveTreeChar\n### Title: Resolve Polytomies Using Parsimony-Based Reconstruction of a\n### Discrete Character\n### Aliases: resolveTreeChar\n\n### ** Examples\n\n\n## No test: \n\n# let's write a quick&dirty ancestral trait plotting function\n\nquickAncPlot <- function(tree, trait, cex, orderedChar = FALSE, type = \"MPR\", cost = NULL){\n\t ancData <- ancPropStateMat(tree = tree, trait = trait, orderedChar = orderedChar)\n\t ancCol <- (1:ncol(ancData))+1\n \tplot(tree,show.tip.label = FALSE,no.margin = TRUE,direction = \"upwards\")\n \ttiplabels(pch = 16,pie = ancData[(1:Ntip(tree)),],cex = cex,piecol = ancCol,\n\t\tcol = 0)\n \tnodelabels(pie = ancData[-(1:Ntip(tree)),],cex = cex,piecol = ancCol)\t\n \t}\n\n##########\n\n# examples with simulated data\n\nset.seed(2)\ntree <- rtree(50)\n#simulate under a likelihood model\ntrait <- rTraitDisc(tree,k = 3,rate = 0.7)\ntree <- degradeTree(tree,prop_collapse = 0.6)\ntree <- ladderize(tree,right = FALSE)\n\n#a bunch of type = MPR (default) examples\ntreeUnord <- resolveTreeChar(tree,trait,orderedChar = FALSE)\ntreeOrd <- resolveTreeChar(tree,trait,orderedChar = TRUE,stateBias = NULL)\ntreeOrdPrim <- resolveTreeChar(tree,trait,orderedChar = TRUE,stateBias = \"primitive\")\ntreeOrdDer <- resolveTreeChar(tree,trait,orderedChar = TRUE,stateBias = \"derived\")\n\n#compare number of nodes\nNnode(tree)\t\t\t#original\nNnode(treeUnord)\t\t#unordered, biasStates = NULL, MPR\nNnode(treeOrd)\t\t#ordered, biasStates = NULL\nNnode(treeOrdPrim)\t#ordered, biasStates = 'primitive'\nNnode(treeOrdDer)\t#ordered, biasStates = 'derived'\n\n#let's compare original tree with unordered-resolved tree\nlayout(1:2)\nquickAncPlot(tree,trait,orderedChar = FALSE,cex = 0.3)\ntext(x = 43,y = 10,\"Original\",cex = 1.5)\nquickAncPlot(treeUnord,trait,orderedChar = FALSE,cex = 0.3)\ntext(x = 43,y = 10,\"orderedChar = FALSE\",cex = 1.5)\n#some resolution gained\n\n#now let's compare the original and ordered, both biasStates = NULL\nlayout(1:2)\nquickAncPlot(tree,trait,orderedChar = FALSE,cex = 0.3)\ntext(x = 43,y = 10,\"Original\",cex = 1.5)\nquickAncPlot(treeOrd,trait,orderedChar = TRUE,cex = 0.3)\ntext(x = 43,y = 10,\"orderedChar = TRUE\",cex = 1.5)\n\n#now let's compare the three ordered trees\nlayout(1:3)\nquickAncPlot(treeOrd,trait,orderedChar = TRUE,cex = 0.3)\ntext(x = 41,y = 8,\"ordered, biasStates = NULL\",cex = 1.5)\nquickAncPlot(treeOrdPrim,trait,orderedChar = TRUE,cex = 0.3)\ntext(x = 41.5,y = 8,\"ordered, biasStates = 'primitive'\",cex = 1.5)\nquickAncPlot(treeOrdDer,trait,orderedChar = TRUE,cex = 0.3)\ntext(x = 42,y = 8,\"ordered, biasStates = 'derived'\",cex = 1.5)\n\n#let's compare unordered with ordered, biasStates = 'primitive'\nlayout(1:2)\nquickAncPlot(treeUnord,trait,orderedChar = FALSE,cex = 0.3)\ntext(x = 41,y = 8,\"orderedChar = FALSE\",cex = 1.5)\nquickAncPlot(treeOrdPrim,trait,orderedChar = TRUE,cex = 0.3)\ntext(x = 40,y = 11,\"orderedChar = TRUE\",cex = 1.5)\ntext(x = 40,y = 4,\"biasStates = 'primitive'\",cex = 1.5)\n\n\n#these comparisons will differ greatly between datasets\n\t# need to try them on your own\n\nlayout(1)\n\n## End(No test)\n\n\n\n"} {"package":"paleotree","topic":"retiolitinae","snippet":"### Name: retiolitinae\n### Title: Cladogram and Range Data for the Retiolitinae\n### Aliases: retiolitinae retioRanges retioTree retioChar\n### Keywords: datasets\n\n### ** Examples\n\n\n#load data\ndata(retiolitinae)\n\n#Can plot discrete time interval diversity curve with retioRanges\ntaxicDivDisc(retioRanges)\n\n#Can plot the unscaled cladogram\nplot(retioTree)\n#Can plot the determinant growth character on the cladogram\ntiplabels(pch = 16, col = (retioChar[,2]+1),adj = 0.25)\n\n#Use basic time-scaling (terminal branches only go to FADs)\nttree <- bin_timePaleoPhy(tree = retioTree,\n timeList = retioRanges,\n type = \"basic\",\n\t ntrees = 1,plot = TRUE)\n\n#Note that this function creates stochastic time-scaled trees...\n\t#A sample of 1 is not representative!\n\n#phylogenetic diversity curve\nphyloDiv(ttree)\n\n\n\n\n"} {"package":"paleotree","topic":"reverseList","snippet":"### Name: reverseList\n### Title: Reverse List Structure\n### Aliases: reverseList\n\n### ** Examples\n\n\nlist1 <- list(list(1:3),list(1:3),list(1:3))\nreverseList(list1,simplify = FALSE)\nreverseList(list1,simplify = TRUE)\n\n\n\n"} {"package":"paleotree","topic":"rootSplit","snippet":"### Name: rootSplit\n### Title: Split Tip Taxa by Root Divergence\n### Aliases: rootSplit\n\n### ** Examples\n\n\ntree <- rtree(100)\nrootSplit(tree)\n\n\n"} {"package":"paleotree","topic":"sampleRanges","snippet":"### Name: sampleRanges\n### Title: Sampling Taxon Ranges\n### Aliases: sampleRanges\n\n### ** Examples\n\n\nset.seed(444)\nrecord <- simFossilRecord(\n p = 0.1, \n q = 0.1, \n nruns = 1,\n\t nTotalTaxa = c(30,40), \n\t nExtant = 0\n\t )\ntaxa <- fossilRecord2fossilTaxa(record)\n\n# let's see what the 'true' diversity curve looks like in this case\nlayout(1:2)\ntaxicDivCont(taxa)\n# simulate a fossil record with imperfect sampling with sampleRanges\nrangesCont <- sampleRanges(taxa,r = 0.5)\n# plot the diversity curve based on the sampled ranges\ntaxicDivCont(rangesCont)\n\n# compare the true history to what we might observe!\n\n#let's try more complicated models!\n\n# a pull-to-the-recent model with x5 increase over time\n # similar to Liow et al.'s incP\nlayout(1:2)\nrangesCont1 <- sampleRanges(taxa,\n r = 0.5,\n rTimeRatio = 5,\n plot = TRUE\n )\ntaxicDivCont(rangesCont1)\n\n# a hat-shaped model\nlayout(1:2)\nrangesCont1 <- sampleRanges(taxa,\n r = 0.5,\n alpha = 4,\n beta = 4,\n plot = TRUE\n )\ntaxicDivCont(rangesCont1)\n\n# a combination of these\nlayout(1:2)\nrangesCont1 <- sampleRanges(taxa,\n r = 0.5,\n alpha = 4,\n beta = 4,\n rTimeRatio = 5,\n plot = TRUE\n )\ntaxicDivCont(rangesCont1)\n\n# testing with cryptic speciation\nlayout(1)\nrecordCrypt <- simFossilRecord(p = 0.1, q = 0.1, \n prop.cryptic = 0.5, \n nruns = 1,\n nTotalTaxa = c(20,30), \n nExtant = 0\n )\ntaxaCrypt <- fossilRecord2fossilTaxa(recordCrypt)\nrangesCrypt <- sampleRanges(taxaCrypt,r = 0.5)\ntaxicDivCont(rangesCrypt)\n\n#an example of hat-shaped models (beta distributions) when there are live taxa\nset.seed(444)\nrecordLive <- simFossilRecord(p = 0.1, \n q = 0.05, \n nruns = 1,\n nTotalTaxa = c(5,100),\n nExtant = c(10,100)\n )\ntaxaLive <- fossilRecord2fossilTaxa(recordLive)\n#with end-points of live taxa at random points in the hat\nrangesLive <- sampleRanges(taxaLive,\n r = 0.1,\n alpha = 4,\n beta = 4,\n randLiveHat = TRUE,\n plot = TRUE\n )\n#with all taxa end-points at end-point of hat\nrangesLive <- sampleRanges(taxaLive,\n r = 0.1,\n alpha = 4,\n beta = 4,\n randLiveHat = FALSE,\n plot = TRUE\n )\n\n\n## No test: \n#simulate a model where sampling rate evolves under brownian motion\ntree <- taxa2phylo(taxa,obs = taxa[,3])\nsampRateBM <- rTraitCont(tree)\nsampRateBM <- sampRateBM-min(sampRateBM)\nlayout(1:2)\nrangesCont1 <- sampleRanges(taxa,r = sampRateBM,plot = TRUE)\ntaxicDivCont(rangesCont1)\n\n#evolving sampling rate, hat model and pull of the recent\nlayout(1:2)\nrangesCont1 <- sampleRanges(taxa,\n r = sampRateBM,\n alpha = 4,\n beta = 4,\n rTimeRatio = 5,\n plot = TRUE\n )\ntaxicDivCont(rangesCont1)\nlayout(1)\n\n#the simpler model is simulated by pulling waiting times from an exponential\n#more complicated models are simulated by discretizing time into small intervals\n#are these two methods comparable?\n\n#let's look at the number of taxa sampled under both methods\n\nsummary(replicate(100,sum(!is.na(\n sampleRanges(taxa,\n r = 0.5,\n alt.method = FALSE\n )\n [,1]))))\n\nsummary(replicate(100,sum(!is.na(\n sampleRanges(taxa,\n r = 0.5,\n alt.method = TRUE\n )\n [,1]))))\n \n#they look pretty similar!\n## End(No test)\n\n\n"} {"package":"paleotree","topic":"seqTimeList","snippet":"### Name: seqTimeList\n### Title: Construct a Stochastic Sequenced Time-List from an Unsequenced\n### Time-List\n### Aliases: seqTimeList\n\n### ** Examples\n\n# Simulate some fossil ranges with simFossilRecord\nset.seed(444)\nrecord <- simFossilRecord(\n p = 0.1, \n q = 0.1, \n nruns = 1,\n nTotalTaxa = c(60,80), \n nExtant = 0\n )\ntaxa <- fossilRecord2fossilTaxa(record)\n# simulate a fossil record with imperfect sampling with sampleRanges()\nrangesCont <- sampleRanges(taxa,r = 0.1)\n\n# Now let's use binTimeData to get ranges in discrete overlapping intervals\n # via pre-set intervals input\npresetIntervals <- cbind(\n c(1000, 995, 990, 980, 970, 975, 960, 950, 940, 930, 900, 890, 888, 879, 875),\n c(995, 989, 960, 975, 960, 950, 930, 930, 930, 900, 895, 888, 880, 875, 870)\n )\nrangesDisc1 <- binTimeData(rangesCont, int.times = presetIntervals)\n\nseqLists <- seqTimeList(rangesDisc1, nruns = 10)\nseqLists$nTaxa\nseqLists$nIntervals\n\n#apply freqRat as an example analysis\nsapply(seqLists$timeLists, freqRat)\n\n# notice the zero and infinite freqRat estimates? What's going on?\n\nfreqRat(seqLists$timeLists[[4]], plot = TRUE)\n\n# too few taxa of two or three interval durations for the ratio to work properly\n # perhaps ignore these estimates\n\n# with weighted selection of intervals\nseqLists <- seqTimeList(rangesDisc1, nruns = 10, weightSampling = TRUE)\n\nseqLists$nTaxa\nseqLists$nIntervals\nsapply(seqLists$timeLists, freqRat)\n\n# didn't have much effect in this simulated example\n\n\n"} {"package":"paleotree","topic":"setRootAge","snippet":"### Name: setRootAge\n### Title: Place a Non-Ultrametric Tree of Fossil Taxa on Absolute Time\n### Aliases: setRootAge setRootAges\n\n### ** Examples\n\n\nset.seed(444)\ntree <- rtree(10)\ntipAges <- cbind(c(\"t1\",\"t2\"), c(15,10))\n\nabsTimeTree <- setRootAge(tree = tree,tipAges)\n\nplot(absTimeTree)\naxisPhylo()\n\n\n\n"} {"package":"paleotree","topic":"simFossilRecord","snippet":"### Name: simFossilRecord\n### Title: Full-Scale Simulations of the Fossil Record with Birth, Death\n### and Sampling of Morphotaxa\n### Aliases: simFossilRecord\n\n### ** Examples\n\n\nset.seed(2)\n\n# quick birth-death-sampling run\n # with 1 run, 50 taxa\n\nrecord <- simFossilRecord(\n p = 0.1, \n q = 0.1, \n r = 0.1, \n nruns = 1,\n nTotalTaxa = 50, \n plot = TRUE\n )\n \n################\n## No test: \n\n# Now let's examine with multiple runs of simulations\n\n# example of repeated pure birth simulations over 50 time-units\nrecords <- simFossilRecord(\n p = 0.1, \n q = 0, \n nruns = 10,\n totalTime = 50, \n plot = TRUE\n )\n\n# plot multiple diversity curves on a log scale\nrecords <- lapply(records, \n fossilRecord2fossilTaxa)\nmultiDiv(records,\n plotMultCurves = TRUE,\n plotLogRich = TRUE\n )\n\n# histogram of total number of taxa\nhist(sapply(records, nrow))\n\n##############################################\n# example of repeated birth-death-sampling\n # simulations over 50 time-units\n \nrecords <- simFossilRecord(\n p = 0.1, \n q = 0.1, \n r = 0.1, \n nruns = 10,\n totalTime = 50, \n plot = TRUE)\n \nrecords <- lapply(records,\n fossilRecord2fossilTaxa)\n \nmultiDiv(records,\n plotMultCurves = TRUE)\n\n# like above...\n # but conditioned instead on having 10 extant taxa\n # between 1 and 100 time-units\n \nset.seed(4)\n \nrecords <- simFossilRecord(\n p = 0.1, \n q = 0.1, \n r = 0.1, \n nruns = 10,\n totalTime = c(1,300), \n nExtant = 10, \n plot = TRUE\n )\n \nrecords <- lapply(records, \n fossilRecord2fossilTaxa)\n \nmultiDiv(records,\n plotMultCurves = TRUE\n )\n\n################################################\n\n# How probable were the runs I accepted?\n # The effect of conditions\n\nset.seed(1)\n\n# Let's look at an example of a birth-death process\n # with high extinction relative to branching\n\n# notes: \n # a) use default run conditions (barely any conditioning)\n # b) use print.runs to look at acceptance probability\n \nrecords <- simFossilRecord(\n p = 0.1, \n q = 0.8, \n nruns = 10,\n print.runs = TRUE, \n plot = TRUE\n )\n \n# 10 runs accepted from a total of 10 !\n\n# now let's give much more stringent run conditions\n # require 3 extant taxa at minimum, 5 taxa total minimum\n \nrecords <- simFossilRecord(\n p = 0.1, \n q = 0.8, \n nruns = 10,\n nExtant = c(3,100), \n nTotalTaxa = c(5,100),\n print.runs = TRUE, \n plot = TRUE\n )\n \n# thousands of simulations to just obtail 10 accepable runs!\n # most ended in extinction before minimums were hit\n\n# beware analysis of simulated where acceptance conditions \n # are too stringent: your data will be a 'special case'\n # of the simulation parameters\n# it will also take you a long time to generate reasonable\n # numbers of replicates for whatever analysis you are doing\n\n# TLDR: You should look at print.runs = TRUE\n\n##################################################################\n\n# Using the rate equation-input for complex diversification models\n\n# First up... Diversity Dependent Models!\n# Let's try Diversity-Dependent Branching over 50 time-units\n\n# first, let's write the rate equation\n# We'll use the diversity dependent rate equation model\n # from Ettienne et al. 2012 as an example here\n # Under this equation, p = q at carrying capacity K\n# Many others are possible!\n# Note that we don't need to use max(0,rate) as negative rates\n # are converted to zero by default, as controlled by\n # the argument negRatesAsZero\n\n# From Ettiene et al.\n # lambda = lambda0 - (lambda0 - mu)*(n/K)\n# lambda and mu are branching rate and extinction rate\n # lambda and mu == p and q in paleotree (i.e. Foote convention)\n# lambda0 is the branching rate at richness = 0\n# K is the carrying capacity\n# n is the richness\n\n# 'N' is the algebra symbol for standing taxonomic richness \n # for simFossilRecord's simulation capabilities\n# also branching rate cannot reference extinction rate\n# we'll have to set lambda0, mu and K in the rate equation directly\n\nlambda0 <- 0.3 # branching rate at 0 richness in Ltu\nK <- 40 # carrying capacity \nmu <- 0.1 # extinction rate will 0.1 Ltu ( = 1/3 of lambda0 )\n\n# technically, mu here represents the lambda at richness = K\n # i.e. lambdaK\n# Ettienne et al. are just implicitly saying that the carrying capacity\n # is the richness at which lambda == mu\n\n# construct the equation programmatically using paste0\nbranchingRateEq <- paste0(lambda0, \"-(\", lambda0, \"-\", mu, \")*(N/\", K, \")\")\n# and take a look at it...\nbranchingRateEq\n# its a thing of beauty, folks!\n\n# now let's try it\nrecords <- simFossilRecord(\n p = branchingRateEq, \n q = mu, \n nruns = 3,\n totalTime = 100, \n plot = TRUE, \n print.runs = TRUE\n )\n \nrecords <- lapply(records,\n fossilRecord2fossilTaxa)\n \nmultiDiv(records,\n plotMultCurves = TRUE)\n \n# those are some happy little diversity plateaus!\n\n\n# now let's do diversity-dependent extinction\n\n# let's slightly modify the model from Ettiene et al.\n # mu = mu0 + (mu0 - muK)*(n/K)\n\nmu0 <- 0.001 # mu at n = 0\nmuK <- 0.1 # mu at n = K (should be equal to lambda at K)\nK <- 40 # carrying capacity (like above)\nlambda <- muK # equal to muK\n\n# construct the equation programmatically using paste0\nextRateEq <- paste0(mu0, \"-(\", mu0, \"-\", muK, \")*(N/\" ,K, \")\")\nextRateEq\n\n# now let's try it\nrecords <- simFossilRecord(\n p = lambda, \n q = extRateEq, \n nruns = 3,\n totalTime = 100, \n plot = TRUE, \n print.runs = TRUE)\n \nrecords <- lapply(records,\n fossilRecord2fossilTaxa)\n \nmultiDiv(records,\n plotMultCurves = TRUE)\n\n# these plateaus looks a little more spiky \n #( maybe there is more turnover at K? )\n# also, it took a longer for the rapid rise to occur\n\n##########################################################\n\n# Now let's try an example with time-dependent origination\n # and extinction constrained to equal origination\n\n# Note! Use of time-dependent parameters \"D\" and \"T\" may\n# result in slower than normal simulation run times\n# as the time-scale has to be discretized; see\n# info for argument maxTimeStep above\n\n# First, let's define a time-dependent rate equation\n # \"T\" is the symbol for time passed\ntimeEquation <- \"0.4-(0.007*T)\"\n\n#in this equation, 0.4 is the rate at time = 0\n # and it will decrease by 0.007 with every time-unit\n # at time = 50, the final rate will be 0.05\n# We can easily make it so extinction\n # is always equal to branching rate\n# \"P\" is the algebraic equivalent for\n # \"branching rate\" in simFossilRecord\n\n# now let's try it\nrecords <- simFossilRecord(\n p = timeEquation, \n q = \"P\", \n nruns = 3,\n totalTime = 50, \n plot = TRUE, \n print.runs = TRUE\n )\n \nrecords <- lapply(records,\n fossilRecord2fossilTaxa)\n \nmultiDiv(records,\n plotMultCurves = TRUE)\n \n# high variability that seems to then smooth out as turnover decreases\n\n# And duration what about duration-dependent processes?\n # let's do a duration-dep extinction equation:\ndurDepExt <- \"0.01+(0.01*D)\"\n\n# okay, let's take it for a spin\nrecords <- simFossilRecord(\n p = 0.1, \n q = durDepExt, \n nruns = 3,\n totalTime = 50,\n plot = TRUE, \n print.runs = TRUE\n )\n \nrecords <- lapply(records,\n fossilRecord2fossilTaxa)\n \nmultiDiv(records,\n plotMultCurves = TRUE)\n \n# creates runs full of short lived taxa\n\n# Some more stuff to do with rate formulae!\n\n# The formulae input method for rates allows\n\t# for the rate to be a random variable\n\n# For example, we could constantly redraw\n \t\t# the branching rate from an exponential\n\nrecord <- simFossilRecord(\n p = \"rexp(n = 1,rate = 10)\",\n q = 0.1, r = 0.1, nruns = 1,\n\t nTotalTaxa = 50, plot = TRUE)\n\n# Setting up specific time-variable rates can be laborious though\n # e.g. one rate during this 10 unit interval, \n # another during this interval, etc\n\t# The problem is setting this up within a fixed function\n\n#############################################################\n# Worked Example\n# What if we want to draw a new rate from a\n # lognormal distribution every 10 time units?\n\n# Need to randomly draw these rates *before* running simFossilTaxa\n# This means also that we will need to individually do each simFossilTaxa run\n # since the rates are drawn outside of simFossilTaxa\n\n# Get some reasonable log normal rates:\nrates <- 0.1+rlnorm(100,meanlog = 1,sdlog = 1)/100\n\n# Now paste it into a formulae that describes a function that\n # will change the rate output every 10 time units\nrateEquation <- paste0(\n \"c(\",\n paste0(rates,collapse = \",\"),\n \")[1+(T%/%10)]\"\n )\n\n# and let's run it\nrecord <- simFossilRecord(\n p = rateEquation, \n q = 0.1, \n r = 0.1, \n nruns = 1,\n totalTime = c(30,40), \n plot = TRUE\n )\n \n#####################################################################\n\n# Speciation Modes\n\n# Some examples of varying the 'speciation modes' in simFossilRecord\n\n# The default is pure budding cladogenesis\n # anag.rate = prop.bifurc = prop.cryptic = 0\n# let's just set those for the moment anyway\nrecord <- simFossilRecord(p = 0.1, q = 0.1, r = 0.1,\n anag.rate = 0, prop.bifurc = 0, prop.cryptic = 0,\n nruns = 1, nTotalTaxa = c(20,30) ,nExtant = 0, plot = TRUE)\n\n#convert and plot phylogeny\n # note this will not reflect the 'budding' pattern\n # branching events will just appear like bifurcation\n # its a typical convention for phylogeny plotting\nconverted <- fossilRecord2fossilTaxa(record)\ntree <- taxa2phylo(converted,plot = TRUE)\n\n#now, an example of pure bifurcation\nrecord <- simFossilRecord(p = 0.1, q = 0.1, r = 0.1,\n anag.rate = 0, prop.bifurc = 1, prop.cryptic = 0,\n nruns = 1, nTotalTaxa = c(20,30) ,nExtant = 0)\ntree <- taxa2phylo(fossilRecord2fossilTaxa(record),plot = TRUE)\n\n# all the short branches are due to ancestors that terminate\n # via pseudoextinction at bifurcation events\n\n# an example with anagenesis = branching\nrecord <- simFossilRecord(\n p = 0.1, q = 0.1, r = 0.1,\n anag.rate = 0.1, \n prop.bifurc = 0, \n prop.cryptic = 0,\n nruns = 1, \n nTotalTaxa = c(20,30),\n nExtant = 0\n )\ntree <- taxa2phylo(fossilRecord2fossilTaxa(record),\n plot = TRUE)\n# lots of pseudoextinction\n\n# an example with anagenesis, pure bifurcation\nrecord <- simFossilRecord(\n p = 0.1, q = 0.1, r = 0.1,\n anag.rate = 0.1, \n prop.bifurc = 1, \n prop.cryptic = 0,\n nruns = 1, \n nTotalTaxa = c(20,30) ,\n nExtant = 0\n )\ntree <- taxa2phylo(\n fossilRecord2fossilTaxa(record),\n plot = TRUE\n )\n# lots and lots of pseudoextinction\n\n# an example with half cryptic speciation\nrecord <- simFossilRecord(\n p = 0.1, \n q = 0.1, \n r = 0.1,\n anag.rate = 0, \n prop.bifurc = 0, \n prop.cryptic = 0.5,\n nruns = 1, \n nTotalTaxa = c(20,30), \n nExtant = 0\n )\n\ntree <- taxa2phylo(\n fossilRecord2fossilTaxa(record),\n plot = TRUE)\n\n# notice that the tree has many more than the maximum of 30 tips:\n # that's because the cryptic taxa are not counted as\n # separate taxa by default, as controlled by count.cryptic\n\n# an example with anagenesis, bifurcation, cryptic speciation\nrecord <- simFossilRecord(\n p = 0.1, q = 0.1, r = 0.1,\n anag.rate = 0.1, \n prop.bifurc = 0.5, \n prop.cryptic = 0.5,\n nruns = 1, \n nTotalTaxa = c(20,30), \n nExtant = 0\n )\n\ntree <- taxa2phylo(\n fossilRecord2fossilTaxa(record),\n plot = TRUE)\n\n# note in this case, 50% of branching is cryptic\n # 25% is bifurcation, 25% is budding\n\n# an example with anagenesis, pure cryptic speciation\n # morphotaxon identity will thus be entirely indep of branching!\n # I wonder if this is what is really going on, sometimes...\nrecord <- simFossilRecord(\n p = 0.1, q = 0.1, r = 0.1,\n anag.rate = 0.1, \n prop.bifurc = 0, \n prop.cryptic = 1,\n nruns = 1, \n nTotalTaxa = c(20,30), \n nExtant = 0\n )\ntree <- taxa2phylo(fossilRecord2fossilTaxa(record),\n plot = TRUE)\n\n# merging cryptic taxa when all speciation is cryptic\nset.seed(1)\nrecord <- simFossilRecord(\n p = 0.1,\n q = 0.1, \n r = 0.1,\n prop.crypt = 1,\n totalTime = 50, \n plot = TRUE\n )\n\n# there looks like there is only a single taxon, but...\nlength(record)\t\n\n#the above is the *actual* number of cryptic lineages\n\n#########################################################################\n\n# playing with count.cryptic with simulations of pure cryptic speciation\n # what if we had fossil records with NO morphological differentiation?\n\n# We can choose to condition on total morphologically-distinguishable taxa\n # or total taxa including cryptic taxa with count.cryptic = FALSE\n\n# an example with pure cryptic speciation with count.cryptic = TRUE\nrecord <- simFossilRecord(\n p = 0.1, q = 0.1, r = 0.1,\n anag.rate = 0, \n prop.bifurc = 0, \n prop.cryptic = 1,\n nruns = 1, \n totalTime = 50, \n nTotalTaxa = c(10,100), \n count.cryptic = TRUE\n )\n \ntree <- taxa2phylo(fossilRecord2fossilTaxa(record))\n\n# plot the tree\nplot(tree)\naxisPhylo()\n\n# notice how the tip labels indicate all are the same morphotaxon?\n\n#################\n# an example with pure cryptic speciation with count.cryptic = FALSE\n # Need to be careful with this!\n\n# We'll have to replace the # of taxa constraints with a time constraint\n # or else the count.cryptic = FALSE simulation will never end!\n\nrecord <- simFossilRecord(\n p = 0.1, q = 0.1, r = 0.1,\n anag.rate = 0, \n prop.bifurc = 0, \n prop.cryptic = 1,\n nruns = 1, \n totalTime = 50, \n count.cryptic = FALSE\n )\ntree <- taxa2phylo(fossilRecord2fossilTaxa(record))\n\n# plot it\nplot(tree)\naxisPhylo()\n\n###########################################\n# let's look at numbers of taxa returned when varying count.cryptic\n # with prop.cryptic = 0.5\n\n# Count Cryptic Example Number One\n# simple simulation going for 50 total taxa\t\n\n# first, count.cryptic = FALSE (default)\nrecord <- simFossilRecord(\n p = 0.1, \n q = 0.1, \n r = 0.1,\n anag.rate = 0, \n prop.bifurc = 0, \n prop.cryptic = 0.5,\n nruns = 1, \n nTotalTaxa = 50, \n count.cryptic = FALSE\n )\n \ntaxa <- fossilRecord2fossilTaxa(record)\n\n#### Count the taxa/lineages !\n# number of lineages (inc. cryptic)\nnrow(taxa) \n\n# number of morph-distinguishable taxa\nlength(unique(taxa[,6])) \n\n###################\n\n# Count Cryptic Example Number Two\n# Now let's try with count.cryptic = TRUE\n\nrecord <- simFossilRecord(\n p = 0.1, \n q = 0.1, \n r = 0.1,\n anag.rate = 0, \n prop.bifurc = 0, \n prop.cryptic = 0.5,\n nruns = 1, \n nTotalTaxa = 50, \n count.cryptic = TRUE\n )\n \ntaxa <- fossilRecord2fossilTaxa(record)\n\n### Count the taxa/lineages !\n# number of lineages (inc. cryptic)\nnrow(taxa) \n\n# number of morph-distinguishable taxa\nlength(unique(taxa[,6])) \n# okay...\n\n###########\n\n# Count Cryptic Example Number Three \n# now let's try cryptic speciation *with* 50 extant taxa!\n\n# first, count.cryptic = FALSE (default)\nrecord <- simFossilRecord(\n p = 0.1, \n q = 0.1, \n r = 0.1,\n anag.rate = 0, \n prop.bifurc = 0, \n prop.cryptic = 0.5,\n nruns = 1, \n nExtant = 10, \n totalTime = c(1,100), \n count.cryptic = FALSE\n )\n \ntaxa <- fossilRecord2fossilTaxa(record)\n\n### Count the taxa/lineages !\n# number of still-living lineages (inc. cryptic)\nsum(taxa[,5]) \n\n# number of still-living morph-dist. taxa\nlength(unique(taxa[taxa[,5] == 1,6]))\t\n\n##############\n\n# Count Cryptic Example Number Four\n# like above with count.cryptic = TRUE\n\nrecord <- simFossilRecord(\n p = 0.1, \n q = 0.1, \n r = 0.1,\n anag.rate = 0, \n prop.bifurc = 0, \n prop.cryptic = 0.5,\n nruns = 1, \n nExtant = 10, \n totalTime = c(1,100), \n count.cryptic = TRUE\n )\ntaxa <- fossilRecord2fossilTaxa(record)\n\n### Count the taxa/lineages !\n# number of still-living lineages (inc. cryptic)\nsum(taxa[,5]) \n# number of still-living morph-dist. taxa\nlength(unique(taxa[taxa[,5] == 1,6]))\t\n\n#################################################\n\n# Specifying Number of Initial Taxa\n # Example using startTaxa to have more initial taxa\n\nrecord <- simFossilRecord(\n p = 0.1, \n q = 0.1, \n r = 0.1, \n nruns = 1,\n nTotalTaxa = 100, \n startTaxa = 20, \n plot = TRUE\n )\n\n######################################################\n\n# Specifying Combinations of Simulation Conditions\n\n# Users can generate datasets that meet multiple conditions:\n # such as time, number of total taxa, extant taxa, sampled taxa\n# These can be set as point conditions or ranges\n\n# let's set time = 10-100 units, total taxa = 30-40, extant = 10\n #and look at acceptance rates with print.run\nrecord <- simFossilRecord(\n p = 0.1, \n q = 0.1, \n r = 0.1, \n nruns = 1, \n totalTime = c(10,100), \n nTotalTaxa = c(30,40), \n nExtant = 10,\n print.runs = TRUE, \n plot = TRUE\n )\n\n# let's make the constraints on totaltaxa a little tighter\nrecord <- simFossilRecord(\n p = 0.1, \n q = 0.1, \n r = 0.1, \n nruns = 1, \n totalTime = c(50,100), \n nTotalTaxa = 30, \n nExtant = 10,\n print.runs = TRUE, \n plot = TRUE\n )\n \n# still okay acceptance rates\n\n# alright, now let's add a constraint on sampled taxa\nrecord <- simFossilRecord(\n p = 0.1, \n q = 0.1, \n r = 0.1, \n nruns = 1, \n totalTime = c(50,100), \n nTotalTaxa = 30, \n nExtant = 10,\n nSamp = 15, \n print.runs = TRUE, \n plot = TRUE\n )\n\n# still okay acceptance rates\n\n# we can be really odd and instead condition on having a single taxon\nset.seed(1)\n\nrecord <- simFossilRecord(\n p = 0.1,\n q = 0.1, \n r = 0.1, \n nTotalTaxa = 1,\n totalTime = c(10,20), \n plot = TRUE\n )\n\n########################################################\n\n# Simulations of Entirely Extinct Taxa\n\n# Typically, a user may want to condition on a precise\n # number of sampled taxa in an all-extinct simulation\n\nrecord <- simFossilRecord(\n p = 0.1, \n q = 0.1, \n r = 0.1, \n nruns = 1, \n nTotalTaxa = c(1,100), \n nExtant = 0, \n nSamp = 20,\n print.runs = TRUE, \n plot = TRUE\n )\n\n# Note that when simulations don't include\n # sampling or extant taxa, the plot \n # functionality changes\nrecord <- simFossilRecord(\n p = 0.1, \n q = 0.1, \n r = 0, \n nruns = 1, \n nExtant = 0, \n print.runs = TRUE, \n plot = TRUE\n )\n\n# Something similar happens when there is no sampling\n # and there are extant taxa but they aren't sampled\n\nrecord <- simFossilRecord(\n p = 0.1, \n q = 0.1, \n r = 0, \n nruns = 1, \n nExtant = 10, \n nTotalTaxa = 100, \n modern.samp.prob = 0,\n print.runs = TRUE, \n plot = TRUE\n )\n\n########################################################\n# Retaining Rejected Simulations\n\n# sometimes we might want to look at all the simulations\n # that don't meet acceptability criteria\n\n# In particular, look at simulated clades that go extinct\n # rather than surviving long enough to satisfy \n # conditioning on temporal duration.\n\n# Let's look for 10 simulations with following conditioning:\n # that are exactly 10 time-units in duration\n # that have between 10 and 30 total taxa\n # and have 1 to 30 extant taxa after 10 time-units\n\nset.seed(4)\n\nrecord <- simFossilRecord(\n p = 0.1, \n q = 0.1, \n r = 0.1, \n nruns = 10, \n totalTime = 10, \n nTotalTaxa = c(10,30), \n nExtant = c(1,30),\n returnAllRuns = TRUE,\n print.runs = TRUE, \n plot = TRUE\n )\n\n# when returnAllRuns = TRUE, the length of record is 2\n # named 'accepted' and 'rejected'\n\n# all the accepted runs (all 10) are in 'accepted'\nlength(record$accepted) \n\n# all the rejected runs are in 'rejected'\nlength(record$rejected) \n\n# probably many more than 10! \n # (I got 1770!)\n\n# how many taxa are in each rejected simulation run?\ntotalTaxa_rej <- sapply(record$rejected, length)\n\n# plot as a histogram\nhist(totalTaxa_rej)\n# a very nice exponential distribution...\n\n# plot the rejected simulation with the most taxa\n\ndivCurveFossilRecordSim(\n fossilRecord = record$rejected[[\n which(max(totalTaxa_rej) == totalTaxa_rej)[1]\n ]]\n )\n\n# we can plot all of these too...\nresult <- sapply(record$rejected, \n divCurveFossilRecordSim)\n\n# let's look at the temporal duration of rejected clades\n\n# need to write a function\ngetDuration <- function(record){\n taxa <- fossilRecord2fossilTaxa(record)\n maxAge <- max(taxa[,\"orig.time\"], na.rm = TRUE)\n minAge <- min(taxa[,\"ext.time\"], na.rm = TRUE)\n cladeDuration <- maxAge - minAge\n return(cladeDuration)\n }\n\n# all the accepted simulations should have\n # identical durations (10 time-units)\nsapply(record$accepted, getDuration)\n\n# now the rejected set\ndurations_rej <- sapply(record$rejected, getDuration)\n# plot as a histogram\nhist(durations_rej)\n\n# Most simulations hit the max time without\n # satisfying the other specified constraints\n # (probably they didn't have the min of 10 taxa total)\n\n## End(No test) \n\n\n"} {"package":"paleotree","topic":"simFossilRecordMethods","snippet":"### Name: simFossilRecordMethods\n### Title: Methods for Editing or Converting Output from Simulated Fossil\n### Record Objects\n### Aliases: simFossilRecordMethods timeSliceFossilRecord\n### fossilRecord2fossilTaxa fossilRecord2fossilRanges\n### fossilTaxa2fossilRecord\n\n### ** Examples\n\n\nset.seed(44)\nrecord <- simFossilRecord(\n p = 0.1, q = 0.1, r = 0.1, \n nruns = 1,\n nTotalTaxa = c(20,30),\n nExtant = 0, \n plot = TRUE\n )\n\n##################################################\n# time-slicing simulations at particular dates\n\n# let's try slicing this record at 940 time-units\nslicedRecord <- timeSliceFossilRecord(\n fossilRecord = record, \n sliceTime = 940\n )\n# and let's plot it\ndivCurveFossilRecordSim(slicedRecord)\n\n# now with shiftRoot4TimeSlice = TRUE to shift the root age\nslicedRecord <- timeSliceFossilRecord(\n fossilRecord = record, \n sliceTime = 940,\n shiftRoot4TimeSlice = TRUE\n )\n# and let's plot it\ndivCurveFossilRecordSim(slicedRecord)\n\n# the last two plots look a little different\n # due to how axis limits are treated...\n# notice that in both, 'modern' (extant) taxa\n # are sampled with probability = 1\n \n########\n# let's try it again, make that probability = 0\n# now with shiftRoot4TimeSlice = TRUE\n \nslicedRecord <- timeSliceFossilRecord(\n fossilRecord = record, \n sliceTime = 940,\n shiftRoot4TimeSlice = TRUE, \n modern.samp.prob = 0\n )\n \n# and let's plot it\ndivCurveFossilRecordSim(slicedRecord)\n\n############################\n\n# converting to taxa objects and observed ranges\n\n# convert to taxa data\ntaxa <- fossilRecord2fossilTaxa(record)\n# convert to ranges\nranges <- fossilRecord2fossilRanges(record)\n\n# plot diversity curves with multiDiv\nmultiDiv(list(taxa,ranges),\n plotMultCurves = TRUE)\n# should look a lot like what we got earlier\n\n# get the cladogram we'd obtain for these taxa with taxa2cladogram\ncladogram <- taxa2cladogram(taxa,\n plot = TRUE)\n\n# now get the time-scaled phylogenies with taxa2phylo\n\n# first, with tips extending to the true times of extinction\ntreeExt <- taxa2phylo(taxa,\n plot = TRUE)\n\n# now, with tips extending to the first appearance dates (FADs) of taxa\n\t# get the FADs from the ranges\nFADs <- ranges[,1]\ntreeFAD <- taxa2phylo(taxa,\n FADs,plot = TRUE)\n\n\n\n"} {"package":"paleotree","topic":"taxa2cladogram","snippet":"### Name: taxa2cladogram\n### Title: Convert Simulated Taxon Data into a Cladogram\n### Aliases: taxa2cladogram\n\n### ** Examples\n\n## No test: \nset.seed(444)\nrecord <- simFossilRecord(p = 0.1, q = 0.1, nruns = 1,\n\tnTotalTaxa = c(30,40), nExtant = 0)\ntaxa <- fossilRecord2fossilTaxa(record)\n#let's use taxa2cladogram to get the 'ideal' cladogram of the taxa\nlayout(1:2)\ncladogram <- taxa2cladogram(taxa,plot = TRUE)\n#compare the \"real\" time-scaled tree of taxon last occurrences (taxa2phylo) \n #to the 'ideal' cladogram\ntree <- taxa2phylo(taxa,plot = TRUE)\n\n#testing with cryptic speciation\nrecordCrypt <- simFossilRecord(p = 0.1, q = 0.1, prop.cryptic = 0.5, nruns = 1,\n\tnTotalTaxa = c(30,40), nExtant = 0)\ntaxaCrypt <- fossilRecord2fossilTaxa(recordCrypt)\nlayout(1:2)\nparOrig <- par(no.readonly = TRUE)\npar(mar = c(0,0,0,0))\ncladoCrypt1 <- taxa2cladogram(taxaCrypt,drop.cryptic = FALSE)\nplot(cladoCrypt1)\ncladoCrypt2 <- taxa2cladogram(taxaCrypt,drop.cryptic = TRUE)\nplot(cladoCrypt2)\n\n#reset plotting\npar(parOrig)\nlayout(1) \n## End(No test)\n\n\n"} {"package":"paleotree","topic":"taxa2phylo","snippet":"### Name: taxa2phylo\n### Title: Convert Simulated Taxon Data into a Phylogeny\n### Aliases: taxa2phylo\n\n### ** Examples\n\n\nset.seed(444)\nrecord <- simFossilRecord(\n p = 0.1, \n q = 0.1, \n nruns = 1,\n nTotalTaxa = c(30,40), \n nExtant = 0\n )\ntaxa <- fossilRecord2fossilTaxa(record)\n# let's use taxa2cladogram to get the 'ideal' cladogram of the taxa\ntree <- taxa2phylo(taxa)\nphyloDiv(tree)\n\n# now a phylogeny with tips placed at\n # the apparent time of extinction for each taxon\nrangesCont <- sampleRanges(taxa,r = 0.5)\ntree <- taxa2phylo(taxa,obs_time = rangesCont[,2])\nphyloDiv(tree,drop.ZLB = FALSE)\n#note that it drops taxa which were never sampled!\n\n#testing with cryptic speciation\nset.seed(444)\nrecord <- simFossilRecord(\n p = 0.1, \n q = 0.1, \n prop.cryptic = 0.5, \n nruns = 1,\n nTotalTaxa = c(30,40), \n nExtant = 0, \n count.cryptic = TRUE\n )\ntaxaCrypt <- fossilRecord2fossilTaxa(record)\ntreeCrypt <- taxa2phylo(taxaCrypt)\nlayout(1)\nplot(treeCrypt)\naxisPhylo()\n\n\n\n"} {"package":"paleotree","topic":"taxonSortPBDBocc","snippet":"### Name: taxonSortPBDBocc\n### Title: Sorting Unique Taxa of a Given Rank from Paleobiology Database\n### Occurrence Data\n### Aliases: taxonSortPBDBocc\n\n### ** Examples\n\n# Note that most examples here using getPBDBocc()\n # use the argument 'failIfNoInternet = FALSE'\n # so that functions do not error out \n # but simply return NULL if internet\n # connection is not available, and thus\n # fail gracefully rather than error out (required by CRAN).\n# Remove this argument or set to TRUE so functions DO fail\n # when internet resources (paleobiodb) is not available.\n\n## No test: \n\n# getting occurrence data for a genus, sorting it\n# firest example: Dicellograptus\n\ndicelloData <- getPBDBocc(\"Dicellograptus\", \n failIfNoInternet = FALSE)\n \nif(!is.null(dicelloData)){ \n\ndicelloOcc2 <- taxonSortPBDBocc(\n data = dicelloData, \n rank = \"species\",\n onlyFormal = FALSE\n )\nnames(dicelloOcc2)\n\n}\n\n# try a PBDB API download with lots of synonymization\n\t#this should have only 1 species\n# *old* way, using v1.1 of PBDB API:\n# acoData <- read.csv(paste0(\n#\t\"https://paleobiodb.org/data1.1/occs/list.txt?\",\n#\t\"base_name = Acosarina%20minuta&show=ident,phylo\"))\n#\n# *new* method - with getPBDBocc, using v1.2 of PBDB API:\nacoData <- getPBDBocc(\"Acosarina minuta\", \n failIfNoInternet = FALSE)\n \nif(!is.null(acoData)){ \n\nacoOcc <- taxonSortPBDBocc(\n data = acoData, \n rank = \"species\", \n onlyFormal = FALSE\n )\nnames(acoOcc)\n\n}\n\n## End(No test)\n\n###########################################\n\n#load example graptolite PBDB occ dataset\ndata(graptPBDB)\n\n#get formal genera\noccGenus <- taxonSortPBDBocc(\n data = graptOccPBDB,\n rank = \"genus\"\n )\nlength(occGenus)\n\n#get formal species\noccSpeciesFormal <- taxonSortPBDBocc(\n data = graptOccPBDB,\n rank = \"species\")\nlength(occSpeciesFormal)\n\n#yes, there are fewer 'formal'\n # graptolite species in the PBDB then genera\n\n#get formal and informal species\noccSpeciesInformal <- taxonSortPBDBocc(\n data = graptOccPBDB, \n rank = \"species\",\n onlyFormal = FALSE\n )\nlength(occSpeciesInformal)\n\n#way more graptolite species are 'informal' in the PBDB\n\n#get formal and informal species \n\t#including from occurrences with uncertain taxonomy\n\t#basically everything and the kitchen sink\noccSpeciesEverything <- taxonSortPBDBocc(\n data = graptOccPBDB, \n rank = \"species\",\n onlyFormal = FALSE, \n cleanUncertain = FALSE)\nlength(occSpeciesEverything)\n\n\n\n\n\n"} {"package":"paleotree","topic":"taxonTable2taxonTree","snippet":"### Name: taxonTable2taxonTree\n### Title: Create a Taxonomy-Based Phylogeny ('Taxon Tree') from a\n### Hierarchical Table of Taxonomy Memberships\n### Aliases: taxonTable2taxonTree\n\n### ** Examples\n\n\n# let's create a small, really cheesy example\npokeTable <- rbind(cbind(\"Pokezooa\",\"Shelloidea\",\"Squirtadae\",\n c(\"Squirtle\",\"Blastoise\",\"Wartortle\")),\n c(\"Pokezooa\",\"Shelloidea\",\"\",\"Lapras\"),\n c(\"Pokezooa\",\"\",\"\",\"Parasect\"),\n cbind(\"Pokezooa\",\"Hirsutamona\",\"Rodentapokemorpha\",\n c(\"Linoone\",\"Sandshrew\",\"Pikachu\")),\n c(\"Pokezooa\",\"Hirsutamona\",NA,\"Ursaring\"))\n\npokeTree <- taxonTable2taxonTree(pokeTable)\n\nplot(pokeTree)\nnodelabels(pokeTree$node.label)\n\n\n\n"} {"package":"paleotree","topic":"termTaxa","snippet":"### Name: termTaxa\n### Title: Simulating Extinct Clades of Monophyletic Taxa\n### Aliases: termTaxa simTermTaxa candleTaxa simCandleTaxa trueCandle\n### simTermTaxaAdvanced trueTermTaxaTree deadTree\n\n### ** Examples\n\n\nset.seed(444)\n# example for 20 taxa\ntermTaxaRes <- simTermTaxa(20)\n\n# let look at the taxa...\ntaxa <- termTaxaRes$taxonRanges\ntaxicDivCont(taxa)\n# because ancestors don't even exist as taxa\n\t# the true diversity curve can go to zero\n\t# kinda bizarre!\n\n# the tree should give a better idea\ntree <- termTaxaRes$tree\nphyloDiv(tree)\n# well, okay, its a tree. \n\n# get the 'ideal cladogram' ala taxa2cladogram\n # much easier with terminal-taxa simulations\n # as no paraphyletic taxa\ncladogram <- tree\ncladogram$edge.length <- NULL\nplot(cladogram)\n\n# trying out trueTermTaxaTree\n# random times of observation: uniform distribution\ntime.obs <- apply(taxa,1,\n function(x) runif(1,x[2],x[1])\n )\ntree1 <- trueTermTaxaTree(\n termTaxaRes,\n time.obs\n )\nlayout(1:2)\nplot(tree)\nplot(tree1)\nlayout(1)\n\n########################################### \n# let's look at the change in the terminal branches\nplot(tree$edge.length,\n tree1$edge.length)\n# can see some edges are shorter on the new tree, cool\n\n# let's now simulate sampling and use FADs\nlayout(1:2)\nplot(tree)\naxisPhylo()\n\nFADs <- sampleRanges(\n termTaxaRes$taxonRanges,\n r = 0.1)[,1]\ntree1 <- trueTermTaxaTree(termTaxaRes, FADs)\n\nplot(tree1)\naxisPhylo()\n\n################################################\n# can condition on sampling some average number of taxa\n# analogous to deprecated function simFossilTaxa_SRcond\nr <- 0.1\navgtaxa <- 50\nsumRate <- 0.2\n\n# avg number necc for an avg number sampled\nntaxa_orig <- avgtaxa / (r / (r + sumRate))\t\ntermTaxaRes <- simTermTaxa(\n ntaxa = ntaxa_orig,\n sumRate = sumRate)\n\n# note that conditioning must be conducted using full sumRate\n# this is because durations are functions of both rates\n# just like in bifurcation\n\n# now, use advanced version of simTermTaxa: simTermTaxaAdvanced\n # allows for extant taxa in a term-taxa simulation\n\n#with min.cond\ntermTaxaRes <- simTermTaxaAdvanced(\n p = 0.1,\n q = 0.1,\n mintaxa = 50,\n maxtaxa = 100,\n maxtime = 100,\n minExtant = 10,\n maxExtant = 20,\n min.cond = TRUE\n )\n \n# notice that arguments are similar to simFossilRecord\n\t# and even more similar to deprecated function simFossilTaxa\n\t\nplot(termTaxaRes$tree)\nNtip(termTaxaRes$tree)\n\n# without min.cond\ntermTaxaRes <- simTermTaxaAdvanced(\n p = 0.1,\n q = 0.1,\n mintaxa = 50,\n maxtaxa = 100,\n maxtime = 100,\n minExtant = 10,\n maxExtant = 20,\n min.cond = FALSE\n )\n \nplot(termTaxaRes$tree)\nNtip(termTaxaRes$tree)\n\nlayout(1)\n\n\n"} {"package":"paleotree","topic":"testEdgeMat","snippet":"### Name: testEdgeMat\n### Title: Test the Edge Matrix of a \"phylo\" Phylogeny Object for\n### Inconsistencies\n### Aliases: testEdgeMat cleanNewPhylo cleanTree\n\n### ** Examples\n\n\nset.seed(444)\ntree <- rtree(10)\n# should return TRUE\ntestEdgeMat(tree)\n\n# should also work on star trees\ntestEdgeMat(stree(10))\n\n# should also work on trees with two taxa\ntestEdgeMat(rtree(2))\n\n# should also work on trees with one taxon\ntestEdgeMat(stree(1))\n\n#running cleanNewPhylo on this tree should have little effect\n\t\t#beyond ladderizing it...\ntree1 <- cleanNewPhylo(tree)\n\n#compare outputs\nlayout(1:2)\nplot(tree)\nplot(tree1)\nlayout(1)\n\n\n"} {"package":"paleotree","topic":"timeLadderTree","snippet":"### Name: timeLadderTree\n### Title: Resolve Polytomies by Order of First Appearance\n### Aliases: timeLadderTree\n\n### ** Examples\n\n## No test: \nset.seed(444)\nrecord <- simFossilRecord(p = 0.1, q = 0.1, nruns = 1,\n\tnTotalTaxa = c(100,200))\ntaxa <- fossilRecord2fossilTaxa(record)\ntree <- taxa2cladogram(taxa)\nranges <- sampleRanges(taxa,r = 0.5)\ntree1 <- timeLadderTree(tree,ranges)\nlayout(1:2)\nplot(ladderize(tree),show.tip.label = FALSE)\nplot(ladderize(tree1),show.tip.label = FALSE)\n\n#an example with applying timeLadderTree to discrete time data\nrangeData <- binTimeData(ranges,int.len = 5)\t#sim discrete range data\ntree2 <- bin_timePaleoPhy(tree,timeList = rangeData,timeres = TRUE)\nplot(ladderize(tree),show.tip.label = FALSE)\nplot(ladderize(tree2),show.tip.label = FALSE)\naxisPhylo() \n\nlayout(1)\n## End(No test)\n\n\n"} {"package":"paleotree","topic":"timeList2fourDate","snippet":"### Name: timeList2fourDate\n### Title: Converting Datasets of Taxon Ranges in Intervals Between\n### 'timeList' format and 'fourDate' format\n### Aliases: timeList2fourDate fourDateFunctions fourDate2timeList\n\n### ** Examples\n\n# timeList object from the retiolinae dataset\ndata(retiolitinae)\n\nstr(retioRanges)\n\ntaxicDivDisc(retioRanges)\n\nfourDateRet <- timeList2fourDate(retioRanges)\n\n# total uncertainty in retio first and last appearances?\nsum(\n (fourDateRet[,1] - fourDateRet[,2]) + \n (fourDateRet[,3]-fourDateRet[,4])\n )\n\n#convert back\nnewTimeList <- fourDate2timeList(fourDateRet)\ntaxicDivDisc(retioRanges)\n\n\n\n"} {"package":"paleotree","topic":"timePaleoPhy","snippet":"### Name: timePaleoPhy\n### Title: Simplistic _a posteriori_ Dating Approaches For Paleontological\n### Phylogenies\n### Aliases: timePaleoPhy bin_timePaleoPhy\n\n### ** Examples\n\n\n# examples with empirical data\n\n#load data\ndata(retiolitinae)\n\n#Can plot the unscaled cladogram\nplot(retioTree)\n#Can plot discrete time interval diversity curve with retioRanges\ntaxicDivDisc(retioRanges)\n\n#Use basic time-scaling (terminal branches only go to FADs)\nttree <- bin_timePaleoPhy(\n tree = retioTree,\n timeList = retioRanges,\n type = \"basic\",\n ntrees = 1,\n plot = TRUE\n )\n\n#Use basic time-scaling (terminal branches go to LADs)\nttree <- bin_timePaleoPhy(\n tree = retioTree,\n timeList = retioRanges,\n type = \"basic\",\n add.term = TRUE,\n ntrees = 1, \n plot = TRUE\n )\n\n#mininum branch length time-scaling (terminal branches only go to FADs)\nttree <- bin_timePaleoPhy(\n tree = retioTree,\n timeList = retioRanges,\n type = \"mbl\",\n vartime = 1, \n ntrees = 1, \n plot = TRUE\n )\n\n###################\n\n# examples with simulated data\n\n# Simulate some fossil ranges with simFossilRecord\nset.seed(444)\nrecord <- simFossilRecord(\n p = 0.1, q = 0.1, \n nruns = 1,\n nTotalTaxa = c(30,40), \n nExtant = 0\n )\ntaxa <- fossilRecord2fossilTaxa(record)\n \n#simulate a fossil record with imperfect sampling with sampleRanges\nrangesCont <- sampleRanges(taxa, r = 0.5)\n#let's use taxa2cladogram to get the 'ideal' cladogram of the taxa\ncladogram <- taxa2cladogram(taxa, \n plot = TRUE)\n \n#Now let's try timePaleoPhy using the continuous range data\nttree <- timePaleoPhy(\n cladogram,\n rangesCont, \n type = \"basic\",\n plot = TRUE\n )\n \n#plot diversity curve \nphyloDiv(ttree)\n\n\n################################################\n# that tree lacked the terminal parts of ranges \n # (tips stops at the taxon FADs)\n# let's add those terminal ranges back on with add.term\nttree <- timePaleoPhy(\n cladogram,\n rangesCont,\n type = \"basic\",\n add.term = TRUE,\n plot = TRUE\n )\n \n#plot diversity curve \nphyloDiv(ttree)\n\n\n#################################################\n# that tree didn't look very resolved, does it? \n # (See Wagner and Erwin 1995 to see why)\n# can randomly resolve trees using the argument randres\n# each resulting tree will have polytomies\n # randomly resolved stochastically using ape::multi2di\nttree <- timePaleoPhy(\n cladogram,\n rangesCont,\n type = \"basic\",\n ntrees = 1,\n randres = TRUE,\n add.term = TRUE,\n plot = TRUE\n )\n \n# Notice the warning it prints! PAY ATTENTION!\n# We would need to set ntrees to a large number\n # to get a fair sample of trees\n\n# if we set ntrees > 1, timePaleoPhy will make multiple time-trees\nttrees <- timePaleoPhy(\n cladogram,\n rangesCont,\n type = \"basic\",\n ntrees = 9,\n randres = TRUE,\n add.term = TRUE,\n plot = TRUE)\n#let's compare nine of them at once in a plot\nlayout(matrix(1:9, 3, 3))\nparOrig <- par(no.readonly = TRUE)\npar(mar = c(1, 1, 1, 1))\nfor(i in 1:9){\n plot(\n ladderize(ttrees[[i]]),\n show.tip.label = FALSE,\n no.margin = TRUE\n )\n }\n#they are all a bit different!\n\n\n############################################## \n# we can also resolve the polytomies in the tree\n # according to time of first appearance via the function timeLadderTree\n # by setting the argument 'timeres = TRUE'\nttree <- timePaleoPhy(\n cladogram,\n rangesCont,\n type = \"basic\",\n ntrees = 1,\n timeres = TRUE,\n add.term = TRUE,\n plot = TRUE\n )\n\n#can plot the median diversity curve with multiDiv\nlayout(1)\npar(parOrig)\nmultiDiv(ttrees)\n\n#compare different methods of timePaleoPhy\nlayout(matrix(1:6, 3, 2))\nparOrig <- par(no.readonly = TRUE)\npar(mar = c(3, 2, 1, 2))\nplot(ladderize(timePaleoPhy(\n cladogram,\n rangesCont,\n type = \"basic\",\n vartime = NULL,\n add.term = TRUE\n )))\naxisPhylo()\ntext(x = 50,y = 23,\n \"type = basic\",\n adj = c(0,0.5),\n cex = 1.2)\n#\nplot(ladderize(timePaleoPhy(\n cladogram,\n rangesCont,\n type = \"equal\",\n vartime = 10,\n add.term = TRUE\n )))\naxisPhylo()\ntext(x = 55,y = 23,\n \"type = equal\",\n adj = c(0,0.5),\n cex = 1.2)\n#\nplot(\n ladderize(\n timePaleoPhy(\n cladogram,\n rangesCont,\n type = \"aba\",\n vartime = 1,\n add.term = TRUE\n )\n )\n )\n \naxisPhylo()\ntext(x = 55,y = 23,\n \"type = aba\",\n adj = c(0,0.5),\n cex = 1.2)\n \n \n#\nplot(\n ladderize(\n timePaleoPhy(\n cladogram,\n rangesCont,\n type = \"zlba\",\n vartime = 1,\n add.term = TRUE\n )\n )\n )\n \naxisPhylo()\ntext(x = 55, \n y = 23, \n \"type = zlba\",\n adj = c(0,0.5), \n cex = 1.2\n )\n \n \n#\nplot(\n ladderize(\n timePaleoPhy(\n cladogram,\n rangesCont,\n type = \"mbl\",\n vartime = 1,\n add.term = TRUE\n )\n )\n )\n \naxisPhylo()\ntext(x = 55,y = 23,\n \"type = mbl\",\n adj = c(0,0.5),\n cex = 1.2\n )\nlayout(1)\npar(parOrig)\n\n\n##############################################\n#using node.mins\n#let's say we have (molecular??) evidence that\n # node #5 is at least 1200 time-units ago\n#to use node.mins, first need to drop any unshared taxa\n\ndroppers <- cladogram$tip.label[is.na(\n match(cladogram$tip.label,\n names(which(!is.na(rangesCont[,1])))\n )\n )]\ncladoDrop <- drop.tip(cladogram, droppers)\n\n# now make vector same length as number of nodes\nnodeDates <- rep(NA, Nnode(cladoDrop))\nnodeDates[5] <- 1200\n\nttree1 <- timePaleoPhy(\n cladoDrop,rangesCont,\n type = \"basic\",\n randres = FALSE,\n node.mins = nodeDates,\n plot = TRUE)\n \nttree2 <- timePaleoPhy(\n cladoDrop,\n rangesCont,\n type = \"basic\",\n randres = TRUE,\n node.mins = nodeDates,\n plot = TRUE)\n\n\n####################################################\n###################################################\n####################################################\n#Using bin_timePaleoPhy to time-scale with discrete interval data\n\n#first let's use binTimeData() to bin in intervals of 1 time unit\nrangesDisc <- binTimeData(rangesCont,int.length = 1)\n\nttreeB1 <- bin_timePaleoPhy(\n cladogram,\n rangesDisc,\n type = \"basic\",\n ntrees = 1,\n randres = TRUE,\n add.term = TRUE,\n plot = FALSE\n )\n\n#notice the warning it prints!\nphyloDiv(ttreeB1)\n\n#with time-order resolving via timeLadderTree\nttreeB2 <- bin_timePaleoPhy(\n cladogram,\n rangesDisc,\n type = \"basic\",\n ntrees = 1,\n timeres = TRUE,\n add.term = TRUE,\n plot = FALSE\n )\n\nphyloDiv(ttreeB2)\n\n\n#can also force the appearance timings not to be chosen stochastically\nttreeB3 <- bin_timePaleoPhy(\n cladogram,\n rangesDisc,\n type = \"basic\",\n ntrees = 1,\n nonstoch.bin = TRUE,\n randres = TRUE,\n add.term = TRUE,\n plot = FALSE\n )\n\nphyloDiv(ttreeB3)\n\n \n# testing node.mins in bin_timePaleoPhy\nttree <- bin_timePaleoPhy(\n cladoDrop,\n rangesDisc,\n type = \"basic\",\n ntrees = 1,\n add.term = TRUE,\n randres = FALSE,\n node.mins = nodeDates,\n plot = TRUE\n )\n \n# with randres = TRUE\nttree <- bin_timePaleoPhy(\n cladoDrop,\n rangesDisc,\n type = \"basic\",\n ntrees = 1,\n add.term = TRUE,\n randres = TRUE,\n node.mins = nodeDates,\n plot = TRUE\n )\n\n## No test: \n#simple three taxon example for testing inc.term.adj\nranges1 <- cbind(c(3, 4, 5), c(2, 3, 1))\nrownames(ranges1) <- paste(\"t\", 1:3, sep = \"\")\n\nclado1 <- read.tree(file = NA,\n text = \"(t1,(t2,t3));\")\n \nttree1 <- timePaleoPhy(\n clado1,\n ranges1,\n type = \"mbl\",\n vartime = 1\n )\n \nttree2 <- timePaleoPhy(\n clado1,\n ranges1,\n type = \"mbl\",\n vartime = 1,\n add.term = TRUE\n )\n \nttree3 <- timePaleoPhy(\n clado1,\n ranges1,\n type = \"mbl\",\n vartime = 1,\n add.term = TRUE,\n inc.term.adj = TRUE\n )\n\n# see differences in root times\nttree1$root.time\nttree2$root.time\nttree3$root.time\n\n-apply(ranges1, 1, diff)\n\nlayout(1:3)\n\nplot(ttree1)\naxisPhylo()\n\nplot(ttree2)\naxisPhylo()\n\nplot(ttree3)\naxisPhylo()\n\n## End(No test)\n\n\n\n"} {"package":"paleotree","topic":"timeSliceTree","snippet":"### Name: timeSliceTree\n### Title: Time-Slicing a Phylogeny\n### Aliases: timeSliceTree\n\n### ** Examples\n\n\n# a neat example of using phyloDiv with timeSliceTree \n # to simulate doing extant-only phylogeny studies \n # of diversification...in the past!\nset.seed(444)\nrecord <- simFossilRecord(\n p = 0.1, q = 0.1, nruns = 1,\n nTotalTaxa = c(30,40), \n nExtant = 0)\ntaxa <- fossilRecord2fossilTaxa(record)\ntaxicDivCont(taxa)\n\n# that's the whole diversity curve\n # now let's do it for a particular time-slide\ntree <- taxa2phylo(taxa)\n# use timeSliceTree to make tree of relationships\n # up until time = 950 \ntree950 <- timeSliceTree(\n tree,\n sliceTime = 950,\n plot = TRUE,\n drop.extinct = FALSE\n )\n\n# compare tip labels when we use tipLabels = \"allDesc\"\ntree950_AD <- timeSliceTree(\n tree,\n sliceTime = 950,\n plot = TRUE,\n tipLabel = \"allDesc\",\n drop.extinct = FALSE\n )\n \n# look for the differences! \ncbind(tree950$tip.label, tree950_AD$tip.label)\n\n# with timeSliceTree we could\n # look at the lineage accumulation curve \n # we would recover from the species extant\n # at that point in time\n\n# use drop.extinct = T to only get the\n # tree of lineages extant at time = 950\ntree950 <- timeSliceTree(\n tree,\n sliceTime = 950,\n plot = FALSE,\n drop.extinct = TRUE\n )\n# now its an ultrametric tree with many fewer tips...\n # lets plot the lineage accumulation plot on a log scale\nphyloDiv(tree950,\n plotLogRich = TRUE\n )\n\n\n\n"} {"package":"paleotree","topic":"tipDatingCompatabilitySummaryMrB","snippet":"### Name: tipDatingCompatabilitySummaryMrB\n### Title: Get the Compatibility Summary Topology From a Tip-Dating\n### Analysis with MrBayes\n### Aliases: tipDatingCompatabilitySummaryMrB\n\n### ** Examples\n\n## Not run: \n##D #pull post-burn-in trees from the posterior\n##D # and get the half-compatibility summary (majority-rule consensus)\n##D # by setting 'compatibilityThreshold = 0.5'\n##D \n##D halfCompatTree <- tipDatingCompatabilitySummaryMrB(\n##D \trunFile = \"C:\\\\myTipDatingAnalysis\\\\MrB_run_fossil_05-10-17.nex.run1.t\",\n##D \tnRuns = 2, burnin = 0.5, \n##D \tcompatibilityThreshold = 0.5,\n##D \tlabelPostProb = TRUE\n##D \t)\n##D \n##D # let's try plotting it with posterior probabilities as node labels\n##D plot(halfCompatTree)\n##D nodelabels(halfCompatTree$node.label)\n##D \n## End(Not run)\n\n\n\n"} {"package":"paleotree","topic":"treeContradiction","snippet":"### Name: treeContradiction\n### Title: Measure the Contradiction Difference Between Two Phylogenetic\n### Topologies\n### Aliases: treeContradiction\n\n### ** Examples\n\n\n# let's simulate two trees\n\nset.seed(1)\ntreeA <- rtree(30,br = NULL)\ntreeB <- rtree(30,br = NULL)\n\n## Not run: \n##D \n##D # visualize the difference between these two trees\n##D library(phytools)\n##D plot(cophylo(treeA,treeB))\n##D \n##D # what is the Robinson-Foulds (RF) distance between these trees?\n##D library(phangorn)\n##D treedist(treeA,treeB)\n##D \n## End(Not run)\n\n# The RF distance is less intuitive when \n # we consider a tree that isn't well-resolved\n\n# let's simulate the worst resolved tree possible: a star tree\ntreeC <- stree(30)\n\n## Not run: \n##D # plot the tanglegram between A and C\n##D plot(cophylo(treeA,treeC))\n##D \n##D # however the RF distance is *not* zero\n##D # even though the only difference is a difference in resolution\n##D treedist(treeA,treeC)\n## End(Not run)\n\n# the contradiction difference (CD) ignores differences in resolution\n\n# Tree C (the star tree) has zero CD between it and trees A and B\nidentical(treeContradiction(treeA,treeC),0) # should be zero distance\nidentical(treeContradiction(treeB,treeC),0) # should be zero distance\n\n# two identical trees also have zero CD between them (as you'd hope) \nidentical(treeContradiction(treeA,treeA),0) # should be zero distance\n\n#' and here's the CD between A and B\ntreeContradiction(treeA,treeB) # should be non-zero distance\n\n# a less ideal property of the CD is that two taxon on opposite ends of the \n# moving from side of the topology to the other of an otherwise identical tree\n# will return the maximum contradiction difference possible (i.e., ` = 1`)\n\n# an example\ntreeAA <- read.tree(text = \"(A,(B,(C,(D,(E,F)))));\")\ntreeBB <- read.tree(text = \"(E,(B,(C,(D,(A,F)))));\")\n\n## Not run: \n##D plot(cophylo(treeAA,treeBB))\n## End(Not run)\n\ntreeContradiction(treeAA,treeBB)\n\n## Not run: \n##D # Note however also a property of RF distance too:\n##D treedist(treeAA,treeBB)\n## End(Not run)\n\n\n\n"} {"package":"paleotree","topic":"twoWayEcologyCluster","snippet":"### Name: twoWayEcologyCluster\n### Title: R-Mode vs Q-Mode Two-Way Cluster Analyses and Abundance Plot for\n### Community Ecology Data\n### Aliases: twoWayEcologyCluster\n\n### ** Examples\n\nset.seed(1)\n\n# generate random community ecology data\n # using a Poisson distribution\ndata<-matrix(rpois(5*7,1),5,7)\n\n# get relative abundance, distance matrices\npropAbundMat<-t(apply(data,1,function(x) x/sum(x)))\nrownames(propAbundMat)<-paste0(\"site \", 1:nrow(propAbundMat))\ncolnames(propAbundMat)<-paste0(\"taxon \", 1:ncol(propAbundMat))\n\n# for simplicity, let's calculate\n # the pairwise square chord distance\n # between sites and taxa\n\nsquareChordDist<-function(mat){\n res<-apply(mat,1,function(x)\n apply(mat,1,function(y)\n sum((sqrt(x)-sqrt(y))^2)\n )\n )\n #\n res<-as.dist(res)\n return(res)\n }\n\n# its not a very popular distance metric\n # but it will do\n # quite popular in palynology\n\nsiteDist<-squareChordDist(propAbundMat)\ntaxaDist<-squareChordDist(t(propAbundMat))\n\ndev.new(width=10) \n\ntwoWayEcologyCluster(\n xDist = siteDist, \n yDist = taxaDist,\n propAbund = propAbundMat\n )\n\n## Not run: \n##D \n##D # now let's try an example with the example kanto dataset\n##D # and use bray-curtis distance from vegan\n##D \n##D library(vegan)\n##D \n##D data(kanto)\n##D \n##D # get distance matrices for sites and taxa\n##D # based on bray-curtis dist\n##D # standardized to total abundance\n##D \n##D # standardize site matrix to relative abundance\n##D siteStandKanto <- decostand(kanto, method = \"total\")\n##D \n##D # calculate site distance matrix (Bray-Curtis)\n##D siteDistKanto <- vegdist(siteStandKanto, \"bray\")\n##D \n##D # calculate taxa distance matrix (Bray-Curtis)\n##D # from transposed standardized site matrix \n##D taxaDistKanto <- vegdist(t(siteStandKanto), \"bray\")\n##D \n##D dev.new(width=10) \n##D \n##D twoWayEcologyCluster(\n##D xDist = siteDistKanto,\n##D yDist = taxaDistKanto,\n##D propAbund = siteStandKanto,\n##D cex.axisLabels = 0.8\n##D )\n##D \n## End(Not run)\n\n\n"} {"package":"paleotree","topic":"unitLengthTree","snippet":"### Name: unitLengthTree\n### Title: Scale Tree to Unit-Length\n### Aliases: unitLengthTree\n\n### ** Examples\n\n\nset.seed(444)\ntree <- rtree(10)\n\nlayout(1:2)\nplot(tree)\nplot(unitLengthTree(tree))\nlayout(1)\n\n\n\n"} {"package":"diathor","topic":"diaThorAll","snippet":"### Name: diaThorAll\n### Title: Runs all the DiaThor functions in a pipeline\n### Aliases: diaThorAll\n### Keywords: bioindicator biotic diatom ecology\n\n### ** Examples\n\n## No test: \n# Example using sample data included in the package (sampleData):\ndata(\"diat_sampleData\")\n# In the example, a temporary directory will be used in resultsPath\nallResults <- diaThorAll(diat_sampleData, resultsPath = tempdir())\n## End(No test)\n\n\n"} {"package":"diathor","topic":"diat_cemfgs_rb","snippet":"### Name: diat_cemfgs_rb\n### Title: Calculate the combined classification of ecological guilds and\n### size classes for diatoms\n### Aliases: diat_cemfgs_rb\n### Keywords: bioindicator biotic diatom ecology\n\n### ** Examples\n\n## No test: \n# Example using sample data included in the package (sampleData):\ndata(\"diat_sampleData\")\n# First, the diat_loadData() function has to be called to read the data\n# The data will be stored into a list (loadedData)\n# And an output folder will be selected through a dialog box if resultsPath is empty\n# In the example, a temporary directory will be used in resultsPath\ndf <- diat_loadData(diat_sampleData, resultsPath = tempdir())\nguildsResults <- diat_cemfgs_rb(df)\n## End(No test)\n\n\n"} {"package":"diathor","topic":"diat_des","snippet":"### Name: diat_des\n### Title: Calculates the Descy Index (DES)\n### Aliases: diat_des\n### Keywords: bioindicator biotic diatom ecology\n\n### ** Examples\n\n## No test: \n# Example using sample data included in the package (sampleData):\ndata(\"diat_sampleData\")\n# First, the diat_loadData() function has to be called to read the data\n# The data will be stored into a list (loadedData)\n# And an output folder will be selected through a dialog box if resultsPath is empty\n# In the example, a temporary directory will be used in resultsPath\ndf <- diat_loadData(diat_sampleData, resultsPath = tempdir())\ndesResults <- diat_des(df)\n## End(No test)\n\n\n"} {"package":"diathor","topic":"diat_disp","snippet":"### Name: diat_disp\n### Title: Calculates the Diatom Index for Soda Pans (DISP)\n### Aliases: diat_disp\n### Keywords: bioindicator biotic diatom ecology\n\n### ** Examples\n\n## No test: \n# Example using sample data included in the package (sampleData):\ndata(\"diat_sampleData\")\n# First, the diat_loadData() function has to be called to read the data\n# The data will be stored into a list (loadedData)\n# And an output folder will be selected through a dialog box if resultsPath is empty\n# In the example, a temporary directory will be used in resultsPath\ndf <- diat_loadData(diat_sampleData, resultsPath = tempdir())\ndispResults <- diat_disp(df)\n## End(No test)\n\n\n"} {"package":"diathor","topic":"diat_epid","snippet":"### Name: diat_epid\n### Title: Calculates the EPID index (EPID)\n### Aliases: diat_epid\n### Keywords: bioindicator biotic diatom ecology\n\n### ** Examples\n\n## No test: \n# Example using sample data included in the package (sampleData):\ndata(\"diat_sampleData\")\n# First, the diat_loadData() function has to be called to read the data\n# The data will be stored into a list (loadedData)\n# And an output folder will be selected through a dialog box if resultsPath is empty\n# In the example, a temporary directory will be used in resultsPath\ndf <- diat_loadData(diat_sampleData, resultsPath = tempdir())\nepidResults <- diat_epid(df)\n## End(No test)\n\n\n"} {"package":"diathor","topic":"diat_guilds","snippet":"### Name: diat_guilds\n### Title: Calculate ecological guilds for diatoms\n### Aliases: diat_guilds\n### Keywords: bioindicator biotic diatom ecology\n\n### ** Examples\n\n## No test: \n# Example using sample data included in the package (sampleData):\ndata(\"diat_sampleData\")\n# First, the diat_loadData() function has to be called to read the data\n# The data will be stored into a list (loadedData)\n# And an output folder will be selected through a dialog box if resultsPath is empty\n# In the example, a temporary directory will be used in resultsPath\ndf <- diat_loadData(diat_sampleData, resultsPath = tempdir())\nguildsResults <- diat_guilds(df)\n## End(No test)\n\n\n"} {"package":"diathor","topic":"diat_idap","snippet":"### Name: diat_idap\n### Title: Calculates the Indice Diatomique Artois-Picardie (IDAP)\n### Aliases: diat_idap\n### Keywords: bioindicator biotic diatom ecology\n\n### ** Examples\n\n## No test: \n# Example using sample data included in the package (sampleData):\ndata(\"diat_sampleData\")\n# First, the diat_loadData() function has to be called to read the data\n# The data will be stored into a list (loadedData)\n# And an output folder will be selected through a dialog box if resultsPath is empty\n# In the example, a temporary directory will be used in resultsPath\ndf <- diat_loadData(diat_sampleData, resultsPath = tempdir())\nidapResults <- diat_idap(df)\n## End(No test)\n\n\n"} {"package":"diathor","topic":"diat_idch","snippet":"### Name: diat_idch\n### Title: Calculates the Swiss Diatom Index (IDCH)\n### Aliases: diat_idch\n### Keywords: bioindicator biotic diatom ecology\n\n### ** Examples\n\n## No test: \n# Example using sample data included in the package (sampleData):\ndata(\"diat_sampleData\")\n# First, the diat_loadData() function has to be called to read the data\n# The data will be stored into a list (loadedData)\n# And an output folder will be selected through a dialog box if resultsPath is empty\n# In the example, a temporary directory will be used in resultsPath\ndf <- diat_loadData(diat_sampleData, resultsPath = tempdir())\nidchResults <- diat_idch(df)\n## End(No test)\n\n\n"} {"package":"diathor","topic":"diat_idp","snippet":"### Name: diat_idp\n### Title: Calculates the Pampean Diatom Index (IDP)\n### Aliases: diat_idp\n### Keywords: bioindicator biotic diatom ecology\n\n### ** Examples\n\n## No test: \n# Example using sample data included in the package (sampleData):\ndata(\"diat_sampleData\")\n# First, the diat_loadData() function has to be called to read the data\n# The data will be stored into a list (loadedData)\n# And an output folder will be selected through a dialog box if resultsPath is empty\n# In the example, a temporary directory will be used in resultsPath\ndf <- diat_loadData(diat_sampleData, resultsPath = tempdir())\nidpResults <- diat_idp(df)\n## End(No test)\n\n\n"} {"package":"diathor","topic":"diat_ilm","snippet":"### Name: diat_ilm\n### Title: Calculates the ILM Index (ILM)\n### Aliases: diat_ilm\n### Keywords: bioindicator biotic diatom ecology\n\n### ** Examples\n\n## No test: \n# Example using sample data included in the package (sampleData):\ndata(\"diat_sampleData\")\n# First, the diat_loadData() function has to be called to read the data\n# The data will be stored into a list (loadedData)\n# And an output folder will be selected through a dialog box if resultsPath is empty\n# In the example, a temporary directory will be used in resultsPath\ndf <- diat_loadData(diat_sampleData, resultsPath = tempdir())\nilmResults <- diat_ilm(df)\n## End(No test)\n\n\n"} {"package":"diathor","topic":"diat_ips","snippet":"### Name: diat_ips\n### Title: Calculates the Specific Polluosensitivity Index (IPS) index\n### Aliases: diat_ips\n### Keywords: bioindicator biotic diatom ecology\n\n### ** Examples\n\n## No test: \n# Example using sample data included in the package (sampleData):\ndata(\"diat_sampleData\")\n# First, the diat_loadData() function has to be called to read the data\n# The data will be stored into a list (loadedData)\n# And an output folder will be selected through a dialog box if resultsPath is empty\n# In the example, a temporary directory will be used in resultsPath\ndf <- diat_loadData(diat_sampleData, resultsPath = tempdir())\nipsResults <- diat_ips(df)\n## End(No test)\n\n\n"} {"package":"diathor","topic":"diat_lobo","snippet":"### Name: diat_lobo\n### Title: Calculates the Lobo Index (LOBO)\n### Aliases: diat_lobo\n### Keywords: bioindicator biotic diatom ecology\n\n### ** Examples\n\n## No test: \n# Example using sample data included in the package (sampleData):\ndata(\"diat_sampleData\")\n# First, the diat_loadData() function has to be called to read the data\n# The data will be stored into a list (loadedData)\n# And an output folder will be selected through a dialog box if resultsPath is empty\n# In the example, a temporary directory will be used in resultsPath\ndf <- diat_loadData(diat_sampleData, resultsPath = tempdir())\nloboResults <- diat_lobo(df)\n## End(No test)\n\n\n"} {"package":"diathor","topic":"diat_morpho","snippet":"### Name: diat_morpho\n### Title: Calculate morphological parameters for diatoms\n### Aliases: diat_morpho\n### Keywords: bioindicator biotic diatom ecology\n\n### ** Examples\n\n## No test: \n# Example using sample data included in the package (sampleData):\ndata(\"diat_sampleData\")\n# First, the diat_loadData() function has to be called to read the data\n# The data will be stored into a list (loadedData)\n# And an output folder will be selected through a dialog box if resultsPath is empty\n# In the example, a temporary directory will be used in resultsPath\ndf <- diat_loadData(diat_sampleData, resultsPath = tempdir())\nmorphoResults <- diat_morpho(df)\n## End(No test)\n\n\n"} {"package":"diathor","topic":"diat_pbidw","snippet":"### Name: diat_pbidw\n### Title: Calculates the PBIDW Index (PBIDW)\n### Aliases: diat_pbidw\n### Keywords: bioindicator biotic diatom ecology\n\n### ** Examples\n\n## No test: \n# Example using sample data included in the package (sampleData):\ndata(\"diat_sampleData\")\n# First, the diat_loadData() function has to be called to read the data\n# The data will be stored into a list (loadedData)\n# And an output folder will be selected through a dialog box if resultsPath is empty\n# In the example, a temporary directory will be used in resultsPath\ndf <- diat_loadData(diat_sampleData, resultsPath = tempdir())\npbidwResults <- diat_pbidw(df)\n## End(No test)\n\n\n"} {"package":"diathor","topic":"diat_size","snippet":"### Name: diat_size\n### Title: Calculate size classes for diatoms\n### Aliases: diat_size\n### Keywords: bioindicator biotic diatom ecology\n\n### ** Examples\n\n## No test: \n# Example using sample data included in the package (sampleData):\ndata(\"diat_sampleData\")\n# First, the diat_loadData() function has to be called to read the data\n# The data will be stored into a list (loadedData)\n# And an output folder will be selected through a dialog box if resultsPath is empty\n# In the example, a temporary directory will be used in resultsPath\ndf <- diat_loadData(diat_sampleData, resultsPath = tempdir())\nsizeResults <- diat_size(df)\n## End(No test)\n\n\n"} {"package":"diathor","topic":"diat_sla","snippet":"### Name: diat_sla\n### Title: Calculates the Sladecek Index (SLA)\n### Aliases: diat_sla\n### Keywords: bioindicator biotic diatom ecology\n\n### ** Examples\n\n## No test: \n# Example using sample data included in the package (sampleData):\ndata(\"diat_sampleData\")\n# First, the diat_loadData() function has to be called to read the data\n# The data will be stored into a list (loadedData)\n# And an output folder will be selected through a dialog box if resultsPath is empty\n# In the example, a temporary directory will be used in resultsPath\ndf <- diat_loadData(diat_sampleData, resultsPath = tempdir())\nslaResults <- diat_sla(df)\n## End(No test)\n\n\n"} {"package":"diathor","topic":"diat_spear","snippet":"### Name: diat_spear\n### Title: Calculates the SPEAR(herbicides) Index (SPEAR)\n### Aliases: diat_spear\n### Keywords: bioindicator biotic diatom ecology\n\n### ** Examples\n\n## No test: \n# Example using sample data included in the package (sampleData):\ndata(\"diat_sampleData\")\n# First, the diat_loadData() function has to be called to read the data\n# The data will be stored into a list (loadedData)\n# And an output folder will be selected through a dialog box if resultsPath is empty\n# In the example, a temporary directory will be used in resultsPath\ndf <- diat_loadData(diat_sampleData, resultsPath = tempdir())\nspearResults <- diat_spear(df)\n## End(No test)\n\n\n"} {"package":"diathor","topic":"diat_tdi","snippet":"### Name: diat_tdi\n### Title: Calculates the Trophic (TDI) index\n### Aliases: diat_tdi\n### Keywords: bioindicator biotic diatom ecology\n\n### ** Examples\n\n## No test: \n# Example using sample data included in the package (sampleData):\ndata(\"diat_sampleData\")\n# First, the diat_loadData() function has to be called to read the data\n# The data will be stored into a list (loadedData)\n# And an output folder will be selected through a dialog box if resultsPath is empty\n# In the example, a temporary directory will be used in resultsPath\ndf <- diat_loadData(diat_sampleData, resultsPath = tempdir())\ntdiResults <- diat_tdi(df)\n## End(No test)\n\n\n"} {"package":"diathor","topic":"diat_vandam","snippet":"### Name: diat_vandam\n### Title: Calculates ecological information for diatoms based on the Van\n### Dam classification\n### Aliases: diat_vandam\n### Keywords: bioindicator biotic diatom ecology\n\n### ** Examples\n\n## Not run: \n##D # Example using sample data included in the package (sampleData):\n##D data(\"diat_sampleData\")\n##D # First, the diat_loadData() function has to be called to read the data\n##D # The data will be stored into a list (loadedData)\n##D # And an output folder will be selected through a dialog box if resultsPath is empty\n##D # In the example, a temporary directory will be used in resultsPath\n##D df <- diat_loadData(diat_sampleData, resultsPath = tempdir())\n##D vandamResults <- diat_vandam(df)\n## End(Not run)\n\n\n"} {"package":"PLFD","topic":"plfd","snippet":"### Name: plfd\n### Title: PLFD\n### Aliases: plfd\n\n### ** Examples\n\nset.seed(2023)\nrDim <- 20\ncDim <- 20\n\nn <- 100\ny <- sample(1:2, n, TRUE, c(0.5, 0.5))\nx <- array(rnorm(rDim*cDim*n), dim=c(rDim, cDim, n))\nx[, , y==2] <- (x[, , y==2] + 1.0)\n\nntest <- 200\nytest <- sample(1:2, ntest, TRUE, c(0.5, 0.5))\nxtest <- array(rnorm(rDim*cDim*ntest), dim=c(rDim, cDim, ntest))\nxtest[, , ytest==2] <- (xtest[, , ytest==2] + 1.0)\n\n## Uniform partition\nprint( plfd(x, y, r0=5, c0=5) )\n\n## Pre-specify feature blocks\nblockList <- list(list(rIdx=1:5, cIdx=1:5), \n list(rIdx=6:10, cIdx=1:5), \n list(rIdx=3:9, cIdx=2:8))\nprint( plfd.model <- plfd(x, y, blockList=blockList) )\n\n## Predict\npredict(plfd.model, xtest, ytest)\n\n\n\n"} {"package":"UpSetVP","topic":"baima","snippet":"### Name: baima\n### Title: EcM Fungal Data with Environmental Variables\n### Aliases: baima baima.fun baima.env\n### Keywords: datasets\n\n### ** Examples\n\ndata(baima.fun)\ndata(baima.env)\n\n\n"} {"package":"UpSetVP","topic":"barplot_hp","snippet":"### Name: barplot_hp\n### Title: Visualization of HP Using Column Diagram\n### Aliases: barplot_hp\n\n### ** Examples\n\n## No test: \nlibrary(rdacca.hp)\n\n## A simple example of partial dbRDA\ndata(baima.fun)\ndata(baima.env)\n\n# Bray-Curtis index was used to calculate community composition dissimilarity\nbaima.fun.bray <- vegdist(baima.fun, method = \"bray\")\n\n# Quantify the individual effects of soil properties on EcM fungal community composition\nsoil <- baima.env[c(\"pH\", \"TP\", \"TK\", \"AN\", \"AP\", \"AK\")]\nbaima.soil.vp <- rdacca.hp(baima.fun.bray, soil, method = \"dbRDA\", type = \"adjR2\")\n\n# Plot individual effects\nbarplot_hp(baima.soil.vp, col.fill = \"var\", \n col.color = c(\"#8DD3C7\", \"#FFFFB3\", \"#BEBADA\", \"#FB8072\", \"#80B1D3\", \"#FDB462\", \"#B3DE69\"))\n## End(No test)\n\n\n"} {"package":"UpSetVP","topic":"upset_vp","snippet":"### Name: upset_vp\n### Title: Visualization of VPA and HP Using UpSetVP Diagram\n### Aliases: upset_vp\n\n### ** Examples\n\n## No test: \nlibrary(rdacca.hp)\n\n## A simple example of partial dbRDA\ndata(baima.fun)\ndata(baima.env)\n\n# Bray-Curtis index was used to calculate community composition dissimilarity\nbaima.fun.bray <- vegdist(baima.fun, method = \"bray\")\n\n# The relative importance of individual soil properties on EcM fungal community compositionon\nsoil <- baima.env[c(\"pH\", \"TP\", \"TK\", \"AN\", \"AP\", \"AK\")]\nbaima.soil.vp <- rdacca.hp(baima.fun.bray, soil,\n method = \"dbRDA\", var.part = TRUE, type = \"adjR2\")\n\n# Plot unique, common, as well as individual effects\nupset_vp(baima.soil.vp, plot.hp = TRUE)\n\n## Example was referenced from Gong et al. (2022)\nif(requireNamespace(\"adespatial\", quietly = TRUE)) {\n\n# Distance-based Moran's eigenvector maps (dbMEM) was used to extract spatial relationships\nspace.dbmem <- adespatial::dbmem(baima.env[c(\"latitude\", \"lontitude\")])\n \n # The relative importance of groups of environmental factors on EcM fungal community composition\n env.list <- list(\n elevation = baima.env[\"altitude\"],\n season = baima.env[\"season\"],\n space = data.frame(space.dbmem)[1:2],\n host = baima.env[c(\"em.GR\", \"em.abun\")],\n climate = baima.env[\"sea.MT\"],\n soil = baima.env[c(\"pH\", \"TP\", \"TK\", \"AN\", \"AP\", \"AK\")]\n )\n baima.env.vp <- rdacca.hp(baima.fun.bray, env.list,\n method = \"dbRDA\", var.part = TRUE, type = \"adjR2\")\n \n # Plot unique, common, as well as individual effects\n upset_vp(baima.env.vp, plot.hp = TRUE, order.part = \"degree\")\n}\n## End(No test)\n\n\n"} {"package":"magic","topic":"Frankenstein","snippet":"### Name: Frankenstein\n### Title: A perfect magic cube due to Frankenstein\n### Aliases: Frankenstein\n### Keywords: datasets\n\n### ** Examples\n\ndata(Frankenstein)\nis.perfect(Frankenstein)\n\n\n"} {"package":"magic","topic":"Ollerenshaw","snippet":"### Name: Ollerenshaw\n### Title: A most perfect square due to Ollerenshaw\n### Aliases: Ollerenshaw\n### Keywords: datasets\n\n### ** Examples\n\ndata(Ollerenshaw)\nis.mostperfect(Ollerenshaw)\n\n\n"} {"package":"magic","topic":"adiag","snippet":"### Name: adiag\n### Title: Binds arrays corner-to-corner\n### Aliases: adiag\n### Keywords: array\n\n### ** Examples\n\n a <- array( 1,c(2,2))\n b <- array(-1,c(2,2))\n adiag(a,b)\n\n ## dropped dimensions can count:\n\n b2 <- b1 <- b\n dim(a) <- c(2,1,2)\n dim(b1) <- c(2,2,1)\n dim(b2) <- c(1,2,2)\n\n dim(adiag(a,b1))\n dim(adiag(a,b2))\n\n## dimnames are preserved if not null:\n\na <- matrix(1,2,2,dimnames=list(col=c(\"red\",\"blue\"),size=c(\"big\",\"small\"))) \nb <- 8\ndim(b) <- c(1,1)\ndimnames(b) <- list(col=c(\"green\"),size=c(\"tiny\"))\nadiag(a,b) #dimnames preserved\nadiag(a,8) #dimnames lost because second argument has none.\n\n## non scalar values for pad can be confusing:\nq <- matrix(0,3,3)\nadiag(q,q,pad=1:4)\n\n## following example should make the pattern clear:\nadiag(q,q,pad=1:36)\n\n\n# Now, a use for arrays with dimensions of zero extent:\nz <- array(dim=c(0,3))\ncolnames(z) <- c(\"foo\",\"bar\",\"baz\")\n\nadiag(a,z) # Observe how this has\n # added no (ie zero) rows to \"a\" but\n # three extra columns filled with the pad value\n\nadiag(a,t(z))\nadiag(z,t(z)) # just the pad value\n\n\n\n"} {"package":"magic","topic":"allsubhypercubes","snippet":"### Name: allsubhypercubes\n### Title: Subhypercubes of magic hypercubes\n### Aliases: allsubhypercubes\n### Keywords: array\n\n### ** Examples\n\n a <- magichypercube.4n(1,d=4)\n allsubhypercubes(a)\n\n\n"} {"package":"magic","topic":"allsums","snippet":"### Name: allsums\n### Title: Row, column, and two diagonal sums of arrays\n### Aliases: allsums\n### Keywords: array\n\n### ** Examples\n\nallsums(magic(7))\nallsums(magic(7),func=max)\n\nallsums(magic(7),func=range)\nallsums(magic(7),func=function(x){x[1:2]})\n\n\nallsums(magic(7),sort)\n # beware! compare apply(magic(7),1,sort) and apply(magic(7),2,sort)\n\n\n\n"} {"package":"magic","topic":"apad","snippet":"### Name: apad\n### Title: Pad arrays\n### Aliases: apad\n### Keywords: array\n\n### ** Examples\n\n\napad(1:10,4,method=\"mirror\")\n\n\na <- matrix(1:30,5,6)\n\napad(a,c(4,4))\napad(a,c(4,4),post=FALSE)\n\napad(a,1,5)\n\napad(a,c(5,6),method=\"mirror\")\napad(a,c(5,6),method=\"mirror\",post=FALSE)\n\n\n"} {"package":"magic","topic":"apl","snippet":"### Name: apl\n### Title: Replacements for APL functions take and drop\n### Aliases: apl take apldrop apltake apldrop<- apltake<-\n### Keywords: array\n\n### ** Examples\n\na <- magichypercube.4n(m=1)\napltake(a,c(2,3,2))\napldrop(a,c(1,1,2))\n\nb <- matrix(1:30,5,6)\napldrop(b,c(1,-2)) <- -1\n\nb <- matrix(1:110,10,11)\napltake(b,2) <- -1\napldrop(b,c(5,-7)) <- -2\nb\n\n\n\n"} {"package":"magic","topic":"aplus","snippet":"### Name: aplus\n### Title: Generalized array addition\n### Aliases: aplus\n### Keywords: array\n\n### ** Examples\n\n\naplus(rbind(1:9),cbind(1:9))\n\na <- matrix(1:8,2,4)\nb <- matrix(1:10,5,2)\naplus(a*100,b,b)\n\n\n\n\n\n"} {"package":"magic","topic":"arev","snippet":"### Name: arev\n### Title: Reverses some dimensions; a generalization of rev\n### Aliases: arev\n### Keywords: array\n\n### ** Examples\n\na <- matrix(1:42,6,7)\narev(a) #Note swap defaults to TRUE\n\nb <- magichypercube.4n(1,d=4)\narev(b,c(TRUE,FALSE,TRUE,FALSE))\n\n\n"} {"package":"magic","topic":"arot","snippet":"### Name: arot\n### Title: Rotates an array about two specified dimensions\n### Aliases: arot\n### Keywords: array\n\n### ** Examples\n\na <- array(1:16,rep(2,4))\narot(a)\n\n\n"} {"package":"magic","topic":"arow","snippet":"### Name: arow\n### Title: Generalized row and col\n### Aliases: arow\n### Keywords: array\n\n### ** Examples\n\na <- array(0,c(3,3,2,2))\narow(a,2)\n(arow(a,1)+arow(a,2)+arow(a,3)+arow(a,4))%%2\n\n\n\n"} {"package":"magic","topic":"as.standard","snippet":"### Name: as.standard\n### Title: Standard form for magic squares\n### Aliases: as.standard is.standard is.standard.toroidal\n### Keywords: array\n\n### ** Examples\n\nis.standard(magic.2np1(4))\nas.standard(magic.4n(3))\n\nas.standard(magichypercube.4n(1,5))\n\n##non-square arrays:\nas.standard(magic(7)[1:3,])\n\n\n## Toroidal transforms preserve pandiagonalness:\nis.pandiagonal(as.standard(hudson(11)))\n\n\n## but not magicness:\nis.magic(as.standard(magic(10),TRUE))\n\n\n\n\n"} {"package":"magic","topic":"cilleruelo","snippet":"### Name: cilleruelo\n### Title: A class of multiplicative magic squares due to Cilleruelo and\n### Luca\n### Aliases: cilleruelo\n\n### ** Examples\n\nis.magic(cilleruelo(5,6))\nis.magic(cilleruelo(5,6),func=prod)\n\n\nf <- function(n){\n jj <-\n sapply(\n seq(from=5,len=n),\n function(i){cilleruelo(i,i-4)}\n )\n xM <- apply(jj,2,max)\n xm <- apply(jj,2,min)\n\n cbind(xM-xm , 5^(5/12)*xm^0.5 , 6*xm^0.5)\n}\n \nmatplot(f(200),type='l',log='xy',xlab='n',ylab='')\nlegend(x=\"topleft\",legend=c(\"xM-xm\",\"5^(5/12).xm^(1/2)\",\"6xm^(1/2)\"),\n lty=1:3,col=1:3)\n\n\n\n\n\n"} {"package":"magic","topic":"circulant","snippet":"### Name: circulant\n### Title: Circulant matrices of any order\n### Aliases: circulant is.circulant\n### Keywords: array\n\n### ** Examples\n\ncirculant(5)\ncirculant(2^(0:4))\nis.circulant(circulant(5))\n\n a <- outer(1:3,1:3,\"+\")%%3\n is.circulant(a)\n is.circulant(a,c(1,2))\n\n is.circulant(array(c(1:4,4:1),rep(2,3)))\n\n is.circulant(magic(5)%%5,c(1,-2))\n\n\n\n"} {"package":"magic","topic":"cube2","snippet":"### Name: cube2\n### Title: A pantriagonal magic cube\n### Aliases: cube2\n### Keywords: datasets\n\n### ** Examples\n\ndata(cube2)\nis.magichypercube(cube2)\nis.perfect(cube2)\n\n\n"} {"package":"magic","topic":"diag.off","snippet":"### Name: diag.off\n### Title: Extracts broken diagonals\n### Aliases: diag.off\n### Keywords: array\n\n### ** Examples\n\ndiag.off(magic(10),nw.se=FALSE,offset=0)\ndiag.off(magic(10),nw.se=FALSE,offset=1)\n\n\n"} {"package":"magic","topic":"do.index","snippet":"### Name: do.index\n### Title: Apply a function to array element indices\n### Aliases: do.index\n### Keywords: math\n\n### ** Examples\n\na <- array(0,c(2,3,4))\nb <- array(rpois(60,1),c(3,4,5))\n\nf1 <- function(x){sum(x)}\nf2 <- function(x){sum((x-1)^2)}\nf3 <- function(x){b[t(x)]}\nf4 <- function(x){sum(x)%%2}\nf5 <- function(x,u){x[u]}\n\ndo.index(a,f1) # should match arow(a,1)+arow(a,2)+arow(a,3)\ndo.index(a,f2)\ndo.index(a,f3) # same as apltake(b,dim(a))\ndo.index(a,f4) # Male/female toilets at NOC\ndo.index(a,f5,2) # same as arow(a,2)\n\n\n"} {"package":"magic","topic":"eq","snippet":"### Name: eq\n### Title: Comparison of two magic squares\n### Aliases: eq ne gt lt ge le %eq% %ne% %gt% %lt% %ge% %le%\n### Keywords: array\n\n### ** Examples\n\nmagic(4) %eq% magic.4n(1)\neq(magic(4) , magic.4n(1))\n\n\n"} {"package":"magic","topic":"fnsd","snippet":"### Name: fnsd\n### Title: First non-singleton dimension\n### Aliases: fnsd\n### Keywords: array\n\n### ** Examples\n\na <- array(1:24,c(1,1,1,1,2,1,3,4))\nfnsd(a)\nfnsd(a,2)\n\n\n"} {"package":"magic","topic":"force.integer","snippet":"### Name: force.integer\n### Title: Integerize array elements\n### Aliases: force.integer\n### Keywords: array\n\n### ** Examples\n\na <- matrix(rep(1,4),2,2)\nforce.integer(a)\nas.integer(a)\n\n\n"} {"package":"magic","topic":"hadamard","snippet":"### Name: hadamard\n### Title: Hadamard matrices\n### Aliases: hadamard is.hadamard sylvester\n### Keywords: array\n\n### ** Examples\n\nis.hadamard(sylvester(4))\nimage(sylvester(5))\n\n\n\n"} {"package":"magic","topic":"hendricks","snippet":"### Name: hendricks\n### Title: A perfect magic cube due to Hendricks\n### Aliases: hendricks\n### Keywords: datasets array\n\n### ** Examples\n\ndata(hendricks)\nis.perfect(hendricks)\n\n\n"} {"package":"magic","topic":"hudson","snippet":"### Name: hudson\n### Title: Pandiagonal magic squares due to Hudson\n### Aliases: hudson\n### Keywords: array\n\n### ** Examples\n\nhudson(n=11)\nmagicplot(hudson(n=11))\nis.associative(hudson(n=13))\nhudson(a=(2*1:13)%%13 , b=(8*1:13)%%13)\nall(replicate(10,is.magic(hudson(a=sample(13),b=sample(13)))))\n\n\n"} {"package":"magic","topic":"is.magic","snippet":"### Name: is.magic\n### Title: Various tests for the magicness of a square\n### Aliases: is.magic is.panmagic is.pandiagonal is.semimagic\n### is.semimagic.default is.associative is.regular is.ultramagic\n### is.normal is.sparse is.mostperfect is.2x2.correct is.bree.correct\n### is.latin is.antimagic is.totally.antimagic is.heterosquare\n### is.totally.heterosquare is.sam is.stam\n### Keywords: array\n\n### ** Examples\n\nis.magic(magic(4))\n\nis.magic(diag(7),func=max) # TRUE\nis.magic(diag(8),func=max) # FALSE\n\nstopifnot(is.magic(magic(3:8)))\n\nis.panmagic(panmagic.4())\nis.panmagic(panmagic.8())\n\ndata(Ollerenshaw)\nis.mostperfect(Ollerenshaw)\n\nproper.magic <- function(m){is.magic(m) & is.normal(m)}\nproper.magic(magic(20))\n\n\n"} {"package":"magic","topic":"is.semimagichypercube","snippet":"### Name: is.magichypercube\n### Title: magic hypercubes\n### Aliases: is.semimagichypercube is.magichypercube is.nasik\n### is.alicehypercube is.perfect is.diagonally.correct is.latinhypercube\n### Keywords: array\n\n### ** Examples\n\nlibrary(abind)\nis.semimagichypercube(magiccube.2np1(1))\nis.semimagichypercube(magichypercube.4n(1,d=4))\n\nis.perfect(magichypercube.4n(1,d=4))\n\n# Now try an array with minmax(dim(a))==FALSE:\na <- abind(magiccube.2np1(1),magiccube.2np1(1),along=2)\nis.semimagichypercube(a,g=TRUE)$rook.sums\n\n# is.semimagichypercube() takes further arguments:\nmymax <- function(x,UP){max(c(x,UP))}\nnot_mag <- array(1:81,rep(3,4))\nis.semimagichypercube(not_mag,func=mymax,UP=80) # FALSE\nis.semimagichypercube(not_mag,func=mymax,UP=81) # TRUE\n\n\na2 <- magichypercube.4n(m=1,d=4)\nis.diagonally.correct(a2)\nis.diagonally.correct(a2,g=TRUE)$diag.sums\n\n## To extract corner elements (note func(1:n) != func(n:1)):\nis.diagonally.correct(a2,func=function(x){x[1]},g=TRUE)$diag.sums \n\n\n#Now for a subhypercube of a magic hypercube that is not semimagic:\nis.magic(allsubhypercubes(magiccube.2np1(1))[[10]])\n\ndata(hendricks)\nis.perfect(hendricks)\n\n\n#note that Hendricks's magic cube also has many broken diagonals summing\n#correctly:\n\na <- allsubhypercubes(hendricks)\nld <- function(a){length(dim(a))}\n\njj <- unlist(lapply(a,ld))\nf <- function(i){is.perfect(a[[which(jj==2)[i]]])}\nall(sapply(1:sum(jj==2),f))\n\n#but this is NOT enough to ensure that it is pandiagonal (but I\n#think hendricks is pandiagonal).\n\n\nis.alicehypercube(magichypercube.4n(1,d=5),4,give.answers=TRUE)\n\n\n\n"} {"package":"magic","topic":"is.ok","snippet":"### Name: is.ok\n### Title: does a vector have the sum required to be a row or column of a\n### magic square?\n### Aliases: is.ok\n### Keywords: array\n\n### ** Examples\n\n is.ok(magic(5)[1,])\n\n\n\n"} {"package":"magic","topic":"is.square.palindromic","snippet":"### Name: is.square.palindromic\n### Title: Is a square matrix square palindromic?\n### Aliases: is.square.palindromic is.centrosymmetric is.persymmetric\n### Keywords: array\n\n### ** Examples\n\nis.square.palindromic(magic(3))\nis.persymmetric(matrix(c(1,0,0,1),2,2))\n\n#now try a circulant:\na <- matrix(0,5,5)\nis.square.palindromic(circulant(10)) #should be TRUE\n\n\n"} {"package":"magic","topic":"latin","snippet":"### Name: latin\n### Title: Random latin squares\n### Aliases: latin incidence is.incidence is.incidence.improper unincidence\n### inc_to_inc another_latin another_incidence rlatin\n### Keywords: array\n\n### ** Examples\n\n\nrlatin(5)\nrlatin(n=2, size=4, burnin=10)\n\n# An example that allows one to optimize an objective function\n# [here f()] over latin squares:\ngr <- function(x){ another_latin(matrix(x,7,7)) }\nset.seed(0)\nindex <- sample(49,20)\nf <- function(x){ sum(x[index])}\njj <- optim(par=as.vector(latin(7)), fn=f, gr=gr, method=\"SANN\", control=list(maxit=10))\nbest_latin <- matrix(jj$par,7,7)\nprint(best_latin)\nprint(f(best_latin))\n\n#compare starting value:\nf(circulant(7))\n\n\n\n\n"} {"package":"magic","topic":"lozenge","snippet":"### Name: lozenge\n### Title: Conway's lozenge algorithm for magic squares\n### Aliases: lozenge\n### Keywords: array\n\n### ** Examples\n\nlozenge(4)\nall(sapply(1:10,function(n){is.magic(lozenge(n))}))\n\n\n"} {"package":"magic","topic":"magic-package","snippet":"### Name: magic-package\n### Title: Create and Investigate Magic Squares\n### Aliases: magic-package\n### Keywords: package\n\n### ** Examples\n\n\nmagic(6)\n\nmagicplot(magic(8))\n\nmagichypercube.4n(1)\n\nis.alicehypercube(magichypercube.4n(1,d=5),4,give.answers=TRUE)\n\n\n\n"} {"package":"magic","topic":"magic.2np1","snippet":"### Name: magic.2np1\n### Title: Magic squares of odd order\n### Aliases: magic.2np1\n### Keywords: array\n\n### ** Examples\n\nmagic.2np1(1)\nf <- function(n){is.magic(magic.2np1(n))}\nall(sapply(1:20,f))\n\nis.panmagic(magic.2np1(5,ord.vec=c(2,1),break.vec=c(1,3)))\n\n\n"} {"package":"magic","topic":"magic.4n","snippet":"### Name: magic.4n\n### Title: Magic squares of order 4n\n### Aliases: magic.4n\n### Keywords: array\n\n### ** Examples\n\nmagic.4n(4)\nis.magic(magic.4n(5))\n\n\n"} {"package":"magic","topic":"magic.4np2","snippet":"### Name: magic.4np2\n### Title: Magic squares of order 4n+2\n### Aliases: magic.4np2\n### Keywords: array\n\n### ** Examples\n\nmagic.4np2(1)\nis.magic(magic.4np2(3))\n\n\n"} {"package":"magic","topic":"magic.8","snippet":"### Name: magic.8\n### Title: Regular magic squares of order 8\n### Aliases: magic.8\n### Keywords: array\n\n### ** Examples\n\n\nh <- magic.8()\nh[,,1]\n\nstopifnot(apply(h,3,is.magic))\n\n\n"} {"package":"magic","topic":"magic","snippet":"### Name: magic\n### Title: Creates magic squares\n### Aliases: magic\n### Keywords: array\n\n### ** Examples\n\nmagic(6)\nall(is.magic(magic(3:10)))\n\n## The first eigenvalue of a magic square is equal to the magic constant:\neigen(magic(10),FALSE,TRUE)$values[1] - magic.constant(10)\n\n## The sum of the eigenvalues of a magic square after the first is zero:\nsum(eigen(magic(10),FALSE,TRUE)$values[2:10])\n\n\n\n\n\n"} {"package":"magic","topic":"magic.constant","snippet":"### Name: magic.constant\n### Title: Magic constant of a magic square or hypercube\n### Aliases: magic.constant\n### Keywords: array\n\n### ** Examples\n\nmagic.constant(4)\n\n\n"} {"package":"magic","topic":"magic.prime","snippet":"### Name: magic.prime\n### Title: Magic squares prime order\n### Aliases: magic.prime\n### Keywords: array\n\n### ** Examples\n\nmagic.prime(7)\nf <- function(n){is.magic(magic.prime(n))}\nall(sapply(6*1:30+1,f))\nall(sapply(6*1:30-1,f))\n\nis.magic(magic.prime(9,i=2,j=4),give.answers=TRUE)\nmagic.prime(7,i=2,j=4)\n\n\n"} {"package":"magic","topic":"magic.product","snippet":"### Name: magic.product\n### Title: Product of two magic squares\n### Aliases: magic.product magic.product magic.product.fast\n### Keywords: array\n\n### ** Examples\n\nmagic.product(magic(3),magic(4))\nmagic.product(3,4)\n\nmat <- matrix(0,3,3)\na <- magic.product(3,4,mat=mat)\nmat[1,1] <- 1\nb <- magic.product(3,4,mat=mat)\n\na==b\n\n\n"} {"package":"magic","topic":"magiccube.2np1","snippet":"### Name: magiccube.2np1\n### Title: Magic cubes of order 2n+1\n### Aliases: magiccube.2np1\n### Keywords: array\n\n### ** Examples\n\n\n#try with m=3, n=2*3+1=7:\n\nm <-7 \nn <- 2*m+1\n\n\napply(magiccube.2np1(m),c(1,2),sum)\napply(magiccube.2np1(m),c(1,3),sum)\napply(magiccube.2np1(m),c(2,3),sum)\n\n#major diagonal checks out:\nsum(magiccube.2np1(m)[matrix(1:n,n,3)])\n\n#now other diagonals:\nb <- c(-1,1)\nf <- function(dir,v){if(dir>0){return(v)}else{return(rev(v))}}\ng <- function(jj){sum(magiccube.2np1(m)[sapply(jj,f,v=1:n)])}\napply(expand.grid(b,b,b),1,g) #each diagonal twice, once per direction.\n\n\n"} {"package":"magic","topic":"magiccubes","snippet":"### Name: magiccubes\n### Title: Magic cubes of order 3\n### Aliases: magiccubes\n### Keywords: datasets\n\n### ** Examples\n\ndata(magiccubes)\nmagiccubes$a1\nsapply(magiccubes,is.magichypercube)\n\n\n"} {"package":"magic","topic":"magichypercube.4n","snippet":"### Name: magichypercube.4n\n### Title: Magic hypercubes of order 4n\n### Aliases: magichypercube.4n\n### Keywords: array\n\n### ** Examples\n\nmagichypercube.4n(1,d=4)\nmagichypercube.4n(2,d=3)\n\n\n"} {"package":"magic","topic":"magicplot","snippet":"### Name: magicplot\n### Title: Joins consecutive numbers of a magic square.\n### Aliases: magicplot\n### Keywords: array\n\n### ** Examples\n\nmagicplot(magic.4n(2))\n\n\n"} {"package":"magic","topic":"minmax","snippet":"### Name: minmax\n### Title: are all elements of a vector identical?\n### Aliases: minmax\n### Keywords: array\n\n### ** Examples\n\ndata(Ollerenshaw)\nminmax(subsums(Ollerenshaw,2)) #should be TRUE, as per is.2x2.correct()\n\n\n"} {"package":"magic","topic":"notmagic.2n","snippet":"### Name: notmagic.2n\n### Title: An unmagic square\n### Aliases: notmagic.2n\n### Keywords: array\n\n### ** Examples\n\n notmagic.2n(4)\n is.magic(notmagic.2n(4))\n is.semimagic(notmagic.2n(4))\n\n\n\n"} {"package":"magic","topic":"nqueens","snippet":"### Name: nqueens\n### Title: N queens problem\n### Aliases: nqueens bernhardsson bernhardssonA bernhardssonB\n### Keywords: array\n\n### ** Examples\n\nbernhardsson(7)\n\na <-\n matrix(\n c(3,6,2,7,1,4,8,5,\n 2,6,8,3,1,4,7,5,\n 6,3,7,2,4,8,1,5,\n 3,6,8,2,4,1,7,5,\n 4,8,1,3,6,2,7,5,\n 7,2,6,3,1,4,8,5,\n 2,6,1,7,4,8,3,5,\n 1,6,8,3,7,4,2,5,\n 1,5,8,6,3,7,2,4,\n 2,4,6,8,3,1,7,5,\n 6,3,1,8,4,2,7,5,\n 4,6,8,2,7,1,3,5)\n ,8,12)\n\nout <- array(0L,c(8,8,12))\nfor(i in 1:12){\n out[cbind(seq_len(8),a[,i],i)] <- 1L\n}\n\n\n\n\n"} {"package":"magic","topic":"panmagic.4","snippet":"### Name: panmagic.4\n### Title: Panmagic squares of order 4\n### Aliases: panmagic.4\n### Keywords: array\n\n### ** Examples\n\npanmagic.4()\npanmagic.4(2^c(1,3,2,0))\npanmagic.4(10^(0:3))\n\n\n"} {"package":"magic","topic":"panmagic.4n","snippet":"### Name: panmagic.6npm1\n### Title: Panmagic squares of order 4n, 6n+1 and 6n-1\n### Aliases: panmagic.4n panmagic.6npm1 panmagic.6np1 panmagic.6nm1\n### Keywords: array\n\n### ** Examples\n\npanmagic.6np1(1)\npanmagic.6npm1(13)\n\nall(sapply(panmagic.6np1(1:3),is.panmagic))\n\n\n\n"} {"package":"magic","topic":"panmagic.8","snippet":"### Name: panmagic.8\n### Title: Panmagic squares of order 8\n### Aliases: panmagic.8\n### Keywords: array\n\n### ** Examples\n\nis.panmagic(panmagic.8(chosen=2:7))\nis.normal(panmagic.8(chosen=2:7))\nis.normal(panmagic.8(chosen=c(1,2,3,6,7,8)))\n\n#to see the twelve basis magic carpets, set argument 'chosen' to each\n#integer from 1 to 12 in turn, with vals=1:\n\npanmagic.8(chosen=1,vals=1)-1\nimage(panmagic.8(chosen=12,vals=1))\n\n\n\n"} {"package":"magic","topic":"perfectcube5","snippet":"### Name: perfectcube5\n### Title: A perfect magic cube of order 5\n### Aliases: perfectcube5\n### Keywords: datasets\n\n### ** Examples\n\ndata(perfectcube5)\nis.perfect(perfectcube5)\n\n\n"} {"package":"magic","topic":"perfectcube6","snippet":"### Name: perfectcube6\n### Title: A perfect cube of order 6\n### Aliases: perfectcube6\n### Keywords: datasets\n\n### ** Examples\n\ndata(perfectcube6)\nis.perfect(perfectcube6)\nis.magichypercube(perfectcube6[2:5,2:5,2:5])\n\n\n"} {"package":"magic","topic":"process","snippet":"### Name: process\n### Title: Force index arrays into range\n### Aliases: process\n### Keywords: array\n\n### ** Examples\n\n# extract the broken diagonal of magic.2np1(4) that passes\n# through element [1,5]:\n\na <- magic.2np1(4)\nindex <- t(c(1,5)+rbind(1:9,1:9))\na[process(index,9)]\n\n\n"} {"package":"magic","topic":"recurse","snippet":"### Name: recurse\n### Title: Recursively apply a permutation\n### Aliases: recurse\n### Keywords: array\n\n### ** Examples\n\n\nn <- 15\nnoquote(recurse(start=letters[1:n],perm=shift(1:n),i=0))\nnoquote(recurse(start=letters[1:n],perm=shift(1:n),i=1))\nnoquote(recurse(start=letters[1:n],perm=shift(1:n),i=2))\n\nnoquote(recurse(start=letters[1:n],perm=sample(n),i=1))\nnoquote(recurse(start=letters[1:n],perm=sample(n),i=2))\n\n\n\n"} {"package":"magic","topic":"sam","snippet":"### Name: sam\n### Title: Sparse antimagic squares\n### Aliases: sam\n### Keywords: array\n\n### ** Examples\n\nsam(6,2)\n\njj <- matrix(c(\n 5, 2, 3, 4, 1,\n 3, 5, 4, 1, 2,\n 2, 3, 1, 5, 4,\n 4, 1, 2, 3, 5, \n 1, 4, 5, 2, 3),5,5)\n\nis.sam(sam(5,2,B=jj))\n\n\n\n"} {"package":"magic","topic":"shift","snippet":"### Name: shift\n### Title: Shift origin of arrays and vectors\n### Aliases: shift ashift\n### Keywords: array\n\n### ** Examples\n\nshift(1:10,3)\nm <- matrix(1:100,10,10)\nashift(m,c(1,1))\nashift(m,c(0,1)) #note columns shifted by 1, rows unchanged.\nashift(m,dim(m)) #m unchanged (Mnemonic).\n\n\n"} {"package":"magic","topic":"strachey","snippet":"### Name: strachey\n### Title: Strachey's algorithm for magic squares\n### Aliases: strachey\n### Keywords: array\n\n### ** Examples\n\n strachey(3)\n strachey(2,square=magic(5))\n\n strachey(2,square=magic(5)) %eq% strachey(2,square=t(magic(5)))\n #should be FALSE\n\n #Show which numbers have been swapped:\n strachey(2,square=matrix(0,5,5))\n\n #It's still magic, but not normal:\n is.magic(strachey(2,square=matrix(0,5,5)))\n\n\n"} {"package":"magic","topic":"subsums","snippet":"### Name: subsums\n### Title: Sums of submatrices\n### Aliases: subsums\n### Keywords: array\n\n### ** Examples\n\n data(Ollerenshaw)\n subsums(Ollerenshaw,c(2,2))\n subsums(Ollerenshaw[,1:10],c(2,2))\n subsums(Ollerenshaw, matrix(c(0,6),2,2)) # effectively, is.bree.correct()\n\n # multidimensional example. \n a <- array(1,c(3,4,2))\n subsums(a,2) # note that p=2 is equivalent to p=c(2,2,2);\n # all elements should be identical\n\n subsums(a,2,wrap=FALSE) #note \"middle\" elements > \"outer\" elements\n\n\n #Example of nondefault function:\n x <- matrix(1:42,6,7)\n subsums(x,2,func=\"max\",pad=Inf,wrap=TRUE) \n subsums(x,2,func=\"max\",pad=Inf,wrap=FALSE)\n\n\n"} {"package":"magic","topic":"transf","snippet":"### Name: transf\n### Title: Frenicle's equivalent magic squares\n### Aliases: transf\n### Keywords: array\n\n### ** Examples\n\na <- magic(3)\nidentical(transf(a,0),a)\n\ntransf(a,1)\ntransf(a,2)\n\ntransf(a,1) %eq% transf(a,7)\n\n\n\n"} {"package":"ltm","topic":"Abortion","snippet":"### Name: Abortion\n### Title: Attitude Towards Abortion\n### Aliases: Abortion\n### Keywords: datasets\n\n### ** Examples\n\n\n## Descriptive statistics for Abortion data\ndsc <- descript(Abortion)\ndsc\nplot(dsc)\n\n\n\n"} {"package":"ltm","topic":"Environment","snippet":"### Name: Environment\n### Title: Attitude to the Environment\n### Aliases: Environment\n### Keywords: datasets\n\n### ** Examples\n\n\n## Descriptive statistics for Environment data\ndescript(Environment)\n\n\n\n"} {"package":"ltm","topic":"GoF.gpcm","snippet":"### Name: GoF\n### Title: Goodness of Fit for Rasch Models\n### Aliases: GoF.gpcm GoF.rasch\n### Keywords: multivariate\n\n### ** Examples\n\n\n## GoF for the Rasch model for the LSAT data:\nfit <- rasch(LSAT)\nGoF.rasch(fit)\n\n\n\n"} {"package":"ltm","topic":"LSAT","snippet":"### Name: LSAT\n### Title: The Law School Admission Test (LSAT), Section VI\n### Aliases: LSAT\n### Keywords: datasets\n\n### ** Examples\n\n\n## Descriptive statistics for LSAT data\ndsc <- descript(LSAT)\ndsc\nplot(dsc)\n\n\n\n"} {"package":"ltm","topic":"Mobility","snippet":"### Name: Mobility\n### Title: Women's Mobility\n### Aliases: Mobility\n### Keywords: datasets\n\n### ** Examples\n\n\n## Descriptive statistics for Mobility data\ndescript(Mobility)\n\n\n\n"} {"package":"ltm","topic":"Science","snippet":"### Name: Science\n### Title: Attitude to Science and Technology\n### Aliases: Science\n### Keywords: datasets\n\n### ** Examples\n\n\n## Descriptive statistics for Science data\ndescript(Science)\n\n\n\n"} {"package":"ltm","topic":"WIRS","snippet":"### Name: WIRS\n### Title: Workplace Industrial Relation Survey Data\n### Aliases: WIRS\n### Keywords: datasets\n\n### ** Examples\n\n\n## Descriptive statistics for Wirs data\ndescript(WIRS)\n\n\n\n"} {"package":"ltm","topic":"anova.gpcm","snippet":"### Name: anova\n### Title: Anova method for fitted IRT models\n### Aliases: anova.gpcm anova.grm anova.ltm anova.rasch anova.tpm\n### Keywords: methods\n\n### ** Examples\n\n\n## LRT between the constrained and unconstrained GRMs \n## for the Science data:\nfit0 <- grm(Science[c(1,3,4,7)], constrained = TRUE)\nfit1 <- grm(Science[c(1,3,4,7)])\nanova(fit0, fit1)\n\n\n## LRT between the one- and two-factor models \n## for the WIRS data:\nanova(ltm(WIRS ~ z1), ltm(WIRS ~ z1 + z2))\n\n\n## An LRT between the Rasch and a constrained \n## two-parameter logistic model for the WIRS data: \nfit0 <- rasch(WIRS)\nfit1 <- ltm(WIRS ~ z1, constraint = cbind(c(1, 3, 5), 2, 1))\nanova(fit0, fit1)\n\n\n## An LRT between the constrained (discrimination \n## parameter equals 1) and the unconstrained Rasch\n## model for the LSAT data: \nfit0 <- rasch(LSAT, constraint = rbind(c(6, 1)))\nfit1 <- rasch(LSAT)\nanova(fit0, fit1)\n\n\n## An LRT between the Rasch and the two-parameter \n## logistic model for the LSAT data: \nanova(rasch(LSAT), ltm(LSAT ~ z1))\n\n\n\n"} {"package":"ltm","topic":"biserial.cor","snippet":"### Name: biserial.cor\n### Title: Point-Biserial Correlation\n### Aliases: biserial.cor\n### Keywords: multivariate\n\n### ** Examples\n\n\n# the point-biserial correlation between\n# the total score and the first item, using\n# '0' as the reference level\nbiserial.cor(rowSums(LSAT), LSAT[[1]])\n\n# and using '1' as the reference level\nbiserial.cor(rowSums(LSAT), LSAT[[1]], level = 2)\n\n\n\n"} {"package":"ltm","topic":"coef.gpcm","snippet":"### Name: coef\n### Title: Extract Estimated Loadings\n### Aliases: coef.gpcm coef.grm coef.ltm coef.rasch coef.tpm\n### Keywords: methods\n\n### ** Examples\n\n\nfit <- grm(Science[c(1,3,4,7)])\ncoef(fit)\n\nfit <- ltm(LSAT ~ z1)\ncoef(fit, TRUE, TRUE)\n\nm <- rasch(LSAT)\ncoef(fit, TRUE, TRUE)\n\n\n\n"} {"package":"ltm","topic":"cronbach.alpha","snippet":"### Name: cronbach.alpha\n### Title: Cronbach's alpha\n### Aliases: cronbach.alpha\n### Keywords: multivariate\n\n### ** Examples\n\n\n# Cronbach's alpha for the LSAT data-set\n# with a Bootstrap 95% CI\ncronbach.alpha(LSAT, CI = TRUE, B = 500)\n\n\n\n"} {"package":"ltm","topic":"descript","snippet":"### Name: descript\n### Title: Descriptive Statistics\n### Aliases: descript\n### Keywords: multivariate\n\n### ** Examples\n\n\n## Descriptives for LSAT data:\ndsc <- descript(LSAT, 3)\ndsc\nplot(dsc, type = \"b\", lty = 1, pch = 1:5)\nlegend(\"topleft\", names(LSAT), pch = 1:5, col = 1:5, lty = 1, bty = \"n\")\n\n\n\n"} {"package":"ltm","topic":"factor.scores","snippet":"### Name: factor.scores\n### Title: Factor Scores - Ability Estimates\n### Aliases: factor.scores factor.scores.gpcm factor.scores.grm\n### factor.scores.ltm factor.scores.rasch factor.scores.tpm\n### Keywords: methods\n\n### ** Examples\n\n\n## Factor Scores for the Rasch model\nfit <- rasch(LSAT)\nfactor.scores(fit) # Empirical Bayes\n\n\n## Factor scores for all subjects in the\n## original dataset LSAT\nfactor.scores(fit, resp.patterns = LSAT)\n\n\n## Factor scores for specific patterns,\n## including NA's, can be obtained by \nfactor.scores(fit, resp.patterns = rbind(c(1,0,1,0,1), c(NA,1,0,NA,1)))\n\n\n## Not run: \n##D ## Factor Scores for the two-parameter logistic model\n##D fit <- ltm(Abortion ~ z1)\n##D factor.scores(fit, method = \"MI\", B = 20) # Multiple Imputation\n##D \n##D ## Factor Scores for the graded response model\n##D fit <- grm(Science[c(1,3,4,7)])\n##D factor.scores(fit, resp.patterns = rbind(1:4, c(NA,1,2,3)))\n## End(Not run)\n\n\n"} {"package":"ltm","topic":"fitted.gpcm","snippet":"### Name: fitted\n### Title: Fitted Values for IRT model\n### Aliases: fitted.gpcm fitted.grm fitted.ltm fitted.rasch fitted.tpm\n### Keywords: methods\n\n### ** Examples\n\nfit <- grm(Science[c(1,3,4,7)])\nfitted(fit, resp.patterns = matrix(1:4, nr = 4, nc = 4))\n\nfit <- rasch(LSAT)\nfitted(fit, type = \"conditional-probabilities\")\n\n\n"} {"package":"ltm","topic":"gpcm","snippet":"### Name: gpcm\n### Title: Generalized Partial Credit Model - Polytomous IRT\n### Aliases: gpcm\n### Keywords: multivariate regression\n\n### ** Examples\n\n\n## The Generalized Partial Credit Model for the Science data:\ngpcm(Science[c(1,3,4,7)])\n\n## The Generalized Partial Credit Model for the Science data,\n## assuming equal discrimination parameters across items:\ngpcm(Science[c(1,3,4,7)], constraint = \"1PL\")\n\n## The Generalized Partial Credit Model for the Science data,\n## assuming equal discrimination parameters across items\n## fixed at 1:\ngpcm(Science[c(1,3,4,7)], constraint = \"rasch\")\n\n## more examples can be found at:\n## http://wiki.r-project.org/rwiki/doku.php?id=packages:cran:ltm#sample_analyses\n\n\n\n"} {"package":"ltm","topic":"grm","snippet":"### Name: grm\n### Title: Graded Response Model - Polytomous IRT\n### Aliases: grm\n### Keywords: multivariate regression\n\n### ** Examples\n\n\n## The Graded Response model for the Science data:\ngrm(Science[c(1,3,4,7)])\n\n## The Graded Response model for the Science data,\n## assuming equal discrimination parameters across items:\ngrm(Science[c(1,3,4,7)], constrained = TRUE)\n\n## The Graded Response model for the Environment data\ngrm(Environment)\n\n\n\n"} {"package":"ltm","topic":"information","snippet":"### Name: information\n### Title: Area under the Test or Item Information Curves\n### Aliases: information\n### Keywords: multivariate\n\n### ** Examples\n\n\nfit <- rasch(LSAT)\ninformation(fit, c(-2, 0))\ninformation(fit, c(0, 2), items = c(3, 5))\n\n\n\n"} {"package":"ltm","topic":"item.fit","snippet":"### Name: item.fit\n### Title: Item-Fit Statistics and P-values\n### Aliases: item.fit\n### Keywords: multivariate\n\n### ** Examples\n\n\n# item-fit statistics for the Rasch model\n# for the Abortion data-set\nitem.fit(rasch(Abortion))\n\n# Yen's Q1 item-fit statistic (i.e., 10 latent ability groups; the\n# mean ability in each group is used to compute fitted proportions) \n# for the two-parameter logistic model for the LSAT data-set\nitem.fit(ltm(LSAT ~ z1), FUN = mean)\n\n\n\n"} {"package":"ltm","topic":"ltm","snippet":"### Name: ltm\n### Title: Latent Trait Model - Latent Variable Model for Binary Data\n### Aliases: ltm\n### Keywords: multivariate regression\n\n### ** Examples\n\n## The two-parameter logistic model for the WIRS data\n## with the constraint that (i) the easiness parameter \n## for the 1st item equals 1 and (ii) the discrimination\n## parameter for the 6th item equals -0.5\n\nltm(WIRS ~ z1, constr = rbind(c(1, 1, 1), c(6, 2, -0.5)))\n\n\n## One-factor and a quadratic term\n## using the Mobility data\nltm(Mobility ~ z1 + I(z1^2))\n\n## Two-factor model with an interaction term\n## using the WIRS data\nltm(WIRS ~ z1 * z2)\n\n\n## The two-parameter logistic model for the Abortion data \n## with 20 quadrature points and 20 EM iterations;\n## report results under the usual IRT parameterization\nltm(Abortion ~ z1, control = list(GHk = 20, iter.em = 20))\n\n\n\n"} {"package":"ltm","topic":"margins","snippet":"### Name: margins\n### Title: Fit of the model on the margins\n### Aliases: margins margins.gpcm margins.grm margins.ltm margins.rasch\n### margins.tpm\n### Keywords: methods\n\n### ** Examples\n\n\n## Two- and Three-way residuals for the Rasch model\nfit <- rasch(LSAT)\nmargins(fit)\nmargins(fit, \"three\")\n\n\n## Two- and Three-way residuals for the one-factor model\nfit <- ltm(WIRS ~ z1)\nmargins(fit)\nmargins(fit, \"three\")\n\n\n## Two- and Three-way residuals for the graded response model\nfit <- grm(Science[c(1,3,4,7)])\nmargins(fit)\nmargins(fit, \"three\")\n\n\n\n"} {"package":"ltm","topic":"mult.choice","snippet":"### Name: mult.choice\n### Title: Multiple Choice Items to Binary Responses\n### Aliases: mult.choice\n### Keywords: multivariate\n\n### ** Examples\n\n\ndat <- data.frame(It1 = sample(4, 100, TRUE),\n It2 = sample(4, 100, TRUE),\n It3 = sample(5, 100, TRUE),\n It4 = sample(5, 100, TRUE),\n It5 = sample(4, 100, TRUE),\n It6 = sample(5, 100, TRUE))\ndat[] <- lapply(dat, function (x) { x[sample(100, 4)] <- NA; x })\ncrct <- c(3, 2, 5, 3, 4, 5)\n####################\nmult.choice(dat, crct)\n\n\n\n"} {"package":"ltm","topic":"person.fit","snippet":"### Name: person.fit\n### Title: Person-Fit Statistics and P-values\n### Aliases: person.fit\n### Keywords: multivariate\n\n### ** Examples\n\n\n# person-fit statistics for the Rasch model\n# for the Abortion data-set\nperson.fit(rasch(Abortion))\n\n# person-fit statistics for the two-parameter logistic model\n# for the LSAT data-set\nperson.fit(ltm(LSAT ~ z1), simulate.p.value = TRUE, B = 100)\n\n\n\n"} {"package":"ltm","topic":"plot.gpcm","snippet":"### Name: plot IRT\n### Title: Plot method for fitted IRT models\n### Aliases: plot.gpcm plot.grm plot.ltm plot.rasch plot.tpm\n### Keywords: methods\n\n### ** Examples\n\n\n# Examples for plot.grm()\n\nfit <- grm(Science[c(1,3,4,7)])\n\n## Item Response Category Characteristic Curves for \n## the Science data\nop <- par(mfrow = c(2, 2))\nplot(fit, lwd = 2, legend = TRUE, ncol = 2)\n# re-set par()\npar(op)\n\n## Item Characteristic Curves for the 2nd category,\n## and items 1 and 3\nplot(fit, category = 2, items = c(1, 3), lwd = 2, legend = TRUE, cx = \"right\")\n\n## Item Information Curves for the Science data;\nplot(fit, type = \"IIC\", legend = TRUE, cx = \"topright\", lwd = 2, cex = 1.4)\n\n## Test Information Function for the Science data;\nplot(fit, type = \"IIC\", items = 0, lwd = 2)\n\n\n###################################################\n\n\n# Examples for plot.ltm()\n\n## Item Characteristic Curves for the two-parameter logistic\n## model; plot only items 1, 2, 4 and 6; take the range of the\n## latent ability to be (-2.5, 2.5):\nfit <- ltm(WIRS ~ z1)\nplot(fit, items = c(1, 2, 4, 6), zrange = c(-2.5, 2.5), lwd = 3, cex = 1.4)\n\n## Test Information Function under the two-parameter logistic\n## model for the Lsat data\nfit <- ltm(LSAT ~ z1)\nplot(fit, type = \"IIC\", items = 0, lwd = 2, cex.lab = 1.2, cex.main = 1.3)\ninfo <- information(fit, c(-3, 0))\ntext(x = 2, y = 0.5, labels = paste(\"Total Information:\", round(info$InfoTotal, 3), \n \"\\n\\nInformation in (-3, 0):\", round(info$InfoRange, 3), \n paste(\"(\", round(100 * info$PropRange, 2), \"%)\", sep = \"\")), cex = 1.2)\n\n## Item Characteristic Surfaces for the interaction model:\nfit <- ltm(WIRS ~ z1 * z2)\nplot(fit, ticktype = \"detailed\", theta = 30, phi = 30, expand = 0.5, d = 2, \n cex = 0.7, col = \"lightblue\")\n\n###################################################\n\n\n# Examples for plot.rasch()\n\n## Item Characteristic Curves for the WIRS data;\n## plot only items 1, 3 and 5:\nfit <- rasch(WIRS)\nplot(fit, items = c(1, 3, 5), lwd = 3, cex = 1.4)\nabline(v = -4:4, h = seq(0, 1, 0.2), col = \"lightgray\", lty = \"dotted\")\n\nfit <- rasch(LSAT)\n\n## Item Characteristic Curves for the LSAT data;\n## plot all items plus a legend and use only black:\nplot(fit, legend = TRUE, cx = \"right\", lwd = 3, cex = 1.4, \n cex.lab = 1.6, cex.main = 2, col = 1, lty = c(1, 1, 1, 2, 2),\n pch = c(16, 15, 17, 0, 1))\nabline(v = -4:4, h = seq(0, 1, 0.2), col = \"lightgray\", lty = \"dotted\")\n\n## Item Information Curves, for the first 3 items; include a legend\nplot(fit, type = \"IIC\", items = 1:3, legend = TRUE, lwd = 2, cx = \"topright\")\n\n## Test Information Function\nplot(fit, type = \"IIC\", items = 0, lwd = 2, cex.lab = 1.1, \n sub = paste(\"Call: \", deparse(fit$call)))\n\n## Total information in (-2, 0) based on all the items\ninfo.Tot <- information(fit, c(-2, 0))$InfoRange\n## Information in (-2, 0) based on items 2 and 4\ninfo.24 <- information(fit, c(-2, 0), items = c(2, 4))$InfoRange\ntext(x = 2, y = 0.5, labels = paste(\"Total Information in (-2, 0):\", \n round(info.Tot, 3), \n \"\\n\\nInformation in (-2, 0) based on\\n Items 2 and 4:\", round(info.24, 3), \n paste(\"(\", round(100 * info.24 / info.Tot, 2), \"%)\", sep = \"\")), \n cex = 1.2)\n\n## The Standard Error of Measurement can be plotted by\nvals <- plot(fit, type = \"IIC\", items = 0, plot = FALSE)\nplot(vals[, \"z\"], 1 / sqrt(vals[, \"info\"]), type = \"l\", lwd = 2,\n xlab = \"Ability\", ylab = \"Standard Error\", \n main = \"Standard Error of Measurement\")\n\n###################################################\n\n\n# Examples for plot.tpm()\n\n## Compare the Item Characteristic Curves for the LSAT data,\n## under the constraint Rasch model, the unconstraint Rasch model,\n## and the three parameter model assuming equal discrimination\n## across items\npar(mfrow = c(2, 2))\npl1 <- plot(rasch(LSAT, constr = cbind(length(LSAT) + 1, 1)))\ntext(2, 0.35, \"Rasch model\\nDiscrimination = 1\")\npl2 <- plot(rasch(LSAT))\ntext(2, 0.35, \"Rasch model\")\npl3 <- plot(tpm(LSAT, type = \"rasch\", max.guessing = 1))\ntext(2, 0.35, \"Rasch model\\nwith Guessing parameter\")\n\n## Compare the Item Characteristic Curves for Item 4\n## (you have to run the above first)\nplot(range(pl1[, \"z\"]), c(0, 1), type = \"n\", xlab = \"Ability\", \n ylab = \"Probability\", main = \"Item Characteristic Curves - Item 4\")\nlines(pl1[, c(\"z\", \"Item 4\")], lwd = 2, col = \"black\")\nlines(pl2[, c(\"z\", \"Item 4\")], lwd = 2, col = \"red\")\nlines(pl3[, c(\"z\", \"Item 4\")], lwd = 2, col = \"blue\")\nlegend(\"right\", c(\"Rasch model Discrimination = 1\", \"Rasch model\", \n \"Rasch model with\\nGuessing parameter\"), lwd = 2, col = c(\"black\", \n \"red\", \"blue\"), bty = \"n\")\n\n\n\n"} {"package":"ltm","topic":"plot.descript","snippet":"### Name: plot descript\n### Title: Descriptive Statistics Plot method\n### Aliases: plot.descript\n### Keywords: methods\n\n### ** Examples\n\n\n## Descriptives for WIRS data:\ndsc <- descript(WIRS)\ndsc\nplot(dsc, includeFirstLast = TRUE, type = \"b\", lty = 1, pch = 1:6)\nlegend(\"topleft\", names(WIRS), pch = 1:6, col = 1:6, lty = 1, bty = \"n\")\n\n\n\n"} {"package":"ltm","topic":"plot.fscores","snippet":"### Name: plot fscores\n### Title: Factor Scores - Ability Estimates Plot method\n### Aliases: plot.fscores\n### Keywords: methods\n\n### ** Examples\n\n\n## Factor Scores for LSAT data:\nfsc <- factor.scores(rasch(LSAT))\nplot(fsc, include.items = TRUE, main = \"KDE for Person Parameters\")\nlegend(\"left\", \"item parameters\", pch = 16, cex = 1.5, bty = \"n\")\n\n\n\n"} {"package":"ltm","topic":"rasch","snippet":"### Name: rasch\n### Title: Rasch Model\n### Aliases: rasch\n### Keywords: multivariate regression\n\n### ** Examples\n\n## The common form of the Rasch model for the \n## LSAT data, assuming that the discrimination\n## parameter equals 1\nrasch(LSAT, constraint = cbind(ncol(LSAT) + 1, 1))\n\n\n## The Rasch model for the LSAT data under the \n## normal ogive; to do that fix the discrimination\n## parameter to 1.702\nrasch(LSAT, constraint = cbind(ncol(LSAT) + 1, 1.702))\n\n## The Rasch model for the LSAT data with\n## unconstraint discrimination parameter\nrasch(LSAT)\n\n## The Rasch model with (artificially created) \n## missing data\ndata <- LSAT\ndata[] <- lapply(data, function(x){\n x[sample(1:length(x), sample(15, 1))] <- NA\n x\n})\nrasch(data)\n\n\n"} {"package":"ltm","topic":"rcor.test","snippet":"### Name: rcor.test\n### Title: Pairwise Associations between Items using a Correlation\n### Coefficient\n### Aliases: rcor.test\n### Keywords: multivariate\n\n### ** Examples\n\n\n## pairwise associations for Environment data:\nrcor.test(data.matrix(Environment), method = \"kendall\")\n\n## pairwise associations for independent normal random variates:\nmat <- matrix(rnorm(1000), 100, 10, dimnames = list(NULL, LETTERS[1:10]))\nrcor.test(mat)\nrcor.test(mat, method = \"kendall\")\nrcor.test(mat, method = \"spearman\")\n\n\n\n"} {"package":"ltm","topic":"residuals.gpcm","snippet":"### Name: residuals\n### Title: Residuals for IRT models\n### Aliases: residuals.gpcm residuals.grm residuals.ltm residuals.rasch\n### residuals.tpm\n### Keywords: methods\n\n### ** Examples\n\n\nfit <- ltm(LSAT ~ z1)\nresiduals(fit)\nresiduals(fit, order = FALSE)\n\n\n\n"} {"package":"ltm","topic":"rmvlogis","snippet":"### Name: rmvlogis\n### Title: Generate Random Responses Patterns under Dichotomous and\n### Polytomous IRT models\n### Aliases: rmvlogis rmvordlogis\n### Keywords: multivariate\n\n### ** Examples\n\n\n# 10 response patterns under a Rasch model\n# with 5 items\nrmvlogis(10, cbind(seq(-2, 2, 1), 1))\n\n# 10 response patterns under a GPCM model\n# with 5 items, with 3 categories each\nthetas <- lapply(1:5, function(u) c(seq(-1, 1, len = 2), 1.2))\nrmvordlogis(10, thetas)\n\n\n\n"} {"package":"ltm","topic":"summary.gpcm","snippet":"### Name: summary\n### Title: Summary method for fitted IRT models\n### Aliases: summary.gpcm summary.grm summary.ltm summary.rasch summary.tpm\n### Keywords: methods\n\n### ** Examples\n\n\n# use Hessian = TRUE if you want standard errors\nfit <- grm(Science[c(1,3,4,7)], Hessian = TRUE)\nsummary(fit)\n\n## One factor model using the WIRS data;\n## results are reported under the IRT\n## parameterization\nfit <- ltm(WIRS ~ z1)\nsummary(fit)\n\n\n\n"} {"package":"ltm","topic":"testEquatingData","snippet":"### Name: testEquatingData\n### Title: Prepares Data for Test Equating\n### Aliases: testEquatingData\n### Keywords: multivariate\n\n### ** Examples\n\n\n# Let two data-sets with common and unique items\ndat1 <- as.data.frame(rmvlogis(20, cbind(c(-2, 1, 2, 1), 1)))\nnames(dat1) <- c(\"CIt2\", \"CIt3\", \"CIt4\", \"W\")\n\ndat2 <- as.data.frame(rmvlogis(10, cbind(c(-2, -1, 1, 2, 0.95), 1)))\nnames(dat2) <- c(\"CIt1\", \"CIt2\", \"CIt3\", \"CIt4\", \"K\")\n\n# combine in one data-set by\nlisForms <- list(dat1, dat2)\ntestEquatingData(lisForms)\n\n\n\n"} {"package":"ltm","topic":"tpm","snippet":"### Name: tpm\n### Title: Birnbaum's Three Parameter Model\n### Aliases: tpm\n### Keywords: multivariate regression\n\n### ** Examples\n\n\n# the three parameter model\ntpm(LSAT)\n\n# use 'nlminb' as optimizer\ntpm(LSAT, control = list(optimizer = \"nlminb\"))\n\n\n# the three parameter model with equal \n# discrimination parameter across items\n# fix the guessing parameter for the third item to zero\ntpm(LSAT, type = \"rasch\", constraint = cbind(3, 1, 0))\n\n\n# the three parameter model for the Abortion data\nfit <- tpm(Abortion)\nfit\n\n# the guessing parameter estimates for items 1, 3, and 4 seem to be on\n# the boundary; update the fit by fixing them to zero\nupdate(fit, constraint = cbind(c(1, 3, 4), 1, 0))\n\n\n\n"} {"package":"ltm","topic":"unidimTest","snippet":"### Name: unidimTest\n### Title: Unidimensionality Check using Modified Parallel Analysis\n### Aliases: unidimTest\n### Keywords: multivariate\n\n### ** Examples\n\n## Not run: \n##D # Unidimensionality Check for the LSAT data-set\n##D # under a Rasch model:\n##D out <- unidimTest(rasch(LSAT))\n##D out\n##D plot(out, type = \"b\", pch = 1:2)\n##D legend(\"topright\", c(\"Real Data\", \"Average Simulated Data\"), lty = 1, \n##D pch = 1:2, col = 1:2, bty = \"n\")\n## End(Not run)\n\n\n"} {"package":"ltm","topic":"vcov.gpcm","snippet":"### Name: vcov\n### Title: vcov method for fitted IRT models\n### Aliases: vcov.gpcm vcov.grm vcov.ltm vcov.rasch vcov.tpm\n### Keywords: methods\n\n### ** Examples\n\nfit <- rasch(WIRS)\nvcov(fit)\nsqrt(diag(vcov(fit))) # standard errors under additive parameterization\n\n\n"} {"package":"modelDown","topic":"modelDown","snippet":"### Name: modelDown\n### Title: Generates a website with HTML summaries for predictive models\n### Aliases: modelDown\n\n### ** Examples\n\n## No test: \nrequire(\"ranger\")\nrequire(\"breakDown\")\nrequire(\"DALEX\")\n\n\n# Generate simple modelDown page\nHR_data_selected <- HR_data[1000:3000,]\nHR_glm_model <- glm(left~., HR_data_selected, family = \"binomial\")\nexplainer_glm <- explain(HR_glm_model, data=HR_data_selected, y = HR_data_selected$left)\n\nmodelDown::modelDown(explainer_glm,\n modules = c(\"model_performance\", \"variable_importance\",\n \"variable_response\"),\n output_folder = tempdir(),\n repository_name = \"HR\",\n device = \"png\",\n vr.vars= c(\"average_montly_hours\"),\n vr.type = \"ale\")\n\n# More complex example with all modules\nHR_ranger_model <- ranger(as.factor(left) ~ .,\n data = HR_data, num.trees = 500, classification = TRUE, probability = TRUE)\nexplainer_ranger <- explain(HR_ranger_model,\n data = HR_data, y = HR_data$left, function(model, data) {\n return(predict(model, data)$prediction[,2])\n}, na.rm=TRUE)\n\n# Two glm models used for drift detection\nHR_data1 <- HR_data[1:4000,]\nHR_data2 <- HR_data[4000:nrow(HR_data),]\nHR_glm_model1 <- glm(left~., HR_data1, family = \"binomial\")\nHR_glm_model2 <- glm(left~., HR_data2, family = \"binomial\")\nexplainer_glm1 <- explain(HR_glm_model1, data=HR_data1, y = HR_data1$left)\nexplainer_glm2 <- explain(HR_glm_model2, data=HR_data2, y = HR_data2$left)\n\nmodelDown::modelDown(list(explainer_glm1, explainer_glm2),\n modules = c(\"auditor\", \"drifter\", \"model_performance\", \"variable_importance\",\n \"variable_response\"),\n output_folder = tempdir(),\n repository_name = \"HR\",\n remote_repository_path = \"some_user/remote_repo_name\",\n device = \"png\",\n vr.vars= c(\"average_montly_hours\", \"time_spend_company\"),\n vr.type = \"ale\")\n## End(No test)\n\n\n"} {"package":"msd","topic":"expdata","snippet":"### Name: expdata\n### Title: Expected Ratings Matrix\n### Aliases: expdata\n\n### ** Examples\n\n# Using randomly generated values with minimum rating set to zero\nim <- runif(20, -2, 2)\npm <- runif(50, -2, 2)\nth <- sort(runif(5, -2, 2))\nm <- expdata(items = im, persons = pm, thresholds = th, minRating = 0)\n\n\n"} {"package":"msd","topic":"ims","snippet":"### Name: ims\n### Title: Item Measures\n### Aliases: ims\n\n### ** Examples\n\n# Simple example with randomly generated values and lowest rating category = 0.\nd <- as.numeric(sample(0:4, 500, replace = TRUE))\ndm <- matrix(d, nrow = 50, ncol = 10)\npm <- runif(50, -2, 2)\nth <- sort(runif(4, -2, 2))\nim <- ims(data = dm, persons = pm, thresholds = th, misfit = TRUE, minRating = 0)\n\n\n"} {"package":"msd","topic":"misfit","snippet":"### Name: misfit\n### Title: Infit and Outfit Statistics\n### Aliases: misfit\n\n### ** Examples\n\n# Using randomly generated values\nd <- as.numeric(sample(0:5, 500, replace = TRUE))\ndm <- matrix(d, nrow = 50, ncol = 10)\nim <- runif(10, -2, 2)\npm <- runif(50, -2, 2)\nth <- sort(runif(5, -2, 2))\nm <- misfit(data = dm, items = im, persons = pm, thresholds = th)\n\n# If the lowest or highest rating category is not in \\code{data}, specify \\code{minRating}\ndm[dm == 0] <- NA\nm2 <- misfit(data = dm, items = im, persons = pm, thresholds = th, minRating = 0)\n\n\n"} {"package":"msd","topic":"msd","snippet":"### Name: msd\n### Title: Method of Successive Dichotomizations\n### Aliases: msd\n### Keywords: models\n\n### ** Examples\n\n# Simple example using a randomly generated ratings matrix\nd <- as.numeric(sample(0:5, 200, replace = TRUE))\ndm <- matrix(d, nrow = 20, ncol = 10)\nm1 <- msd(dm, misfit = TRUE)\n\n# Anchor first 5 item measures and first 10 person measures\nim <- m1$item_measures\nim[6:length(im)] <- NA\npm <- m1$person_measures\npm[11:length(pm)] <- NA\nm2 <- msd(dm, items = im, persons = pm)\n\n# To test the accuracy of msd using simdata, set the mean item measure to zero\n# (axis origin in msd is the mean item measure) and the mean threshold to\n# zero (any non-zero mean threshold is reflected in the person measures).\nim <- runif(100, -2, 2)\nim <- im - mean(im)\npm <- runif(100, -2, 2)\nth <- sort(runif(5, -2, 2))\nth <- th - mean(th)\nd <- simdata(im, pm, th, missingProb = 0.15, minRating = 0)\nm <- msd(d)\n\n# Compare msd parameters to true values. Linear regression should\n# yield a slope very close to 1 and an intercept very close to 0.\nlm(m$item_measures ~ im)\nlm(m$person_measures ~ pm)\nlm(m$thresholds ~ th)\n\n\n"} {"package":"msd","topic":"pms","snippet":"### Name: pms\n### Title: Person Measures\n### Aliases: pms\n\n### ** Examples\n\n# Simple example with randomly generated values and lowest rating category = 0\nd <- as.numeric(sample(0:4, 500, replace = TRUE))\ndm <- matrix(d, nrow = 25, ncol = 20)\nim <- runif(20, -2, 2)\nth <- sort(runif(4, -2, 2))\npm <- pms(data = dm, items = im, thresholds = th, misfit = TRUE, minRating = 0)\n\n\n"} {"package":"msd","topic":"rasch","snippet":"### Name: rasch\n### Title: Dichotomous Rasch Model\n### Aliases: rasch\n\n### ** Examples\n\n# Simple example using a randomly generated ratings matrix\nd <- as.numeric(sample(0:1, 200, replace = TRUE))\ndm <- matrix(d, nrow = 20, ncol = 10)\nm1 <- rasch(dm, misfit = TRUE)\n\n# Anchor first 5 item measures and first 10 person measures\nim <- m1$item_measures\nim[6:length(im)] <- NA\npm <- m1$person_measures\npm[11:length(pm)] <- NA\nm2 <- rasch(dm, items = im, persons = pm)\n\n# To test the accuracy of rasch using simdata, set the true mean item measure to\n# zero (axis origin in rasch is the mean item measure). Note that the threshold for\n# dichotomous data is at 0.\nim <- runif(100, -2, 2)\nim <- im - mean(im)\npm <- runif(100, -2, 2)\nth <- 0\nd <- simdata(im, pm, th, missingProb = 0.15, minRating = 0)\nm <- rasch(d)\n\n# Compare rasch parameters to true values. Linear regression should\n# yield a slope very close to 1 and an intercept very close to 0.\nlm(m$item_measures ~ im)\nlm(m$person_measures ~ pm)\n\n\n"} {"package":"msd","topic":"simdata","snippet":"### Name: simdata\n### Title: Simulated Rating Scale Data\n### Aliases: simdata\n### Keywords: datagen\n\n### ** Examples\n\n# Use simdata to test the accuracy of msd. First, randomly generate item \n# measures, person measures and thresholds with 15 percent missing data and \n# ordinal rating categories from 0 to 5. Then, set mean item measure to zero \n# (axis origin in msd is the mean item measure) and mean threshold to zero \n# (any non-zero mean threshold is reflected in the person measures).\nim <- runif(100, -2, 2)\npm <- runif(100, -2, 2)\nth <- sort(runif(5, -2, 2))\nim <- im - mean(im)\nth <- th - mean(th)\nd <- simdata(im, pm, th, missingProb = 0.15, minRating = 0)\nm <- msd(d)\n\n# Compare msd parameters to true values. Linear regression should\n# yield a slope very close to 1 and an intercept very close to 0.\nlm(m$item_measures ~ im)\nlm(m$person_measures ~ pm)\nlm(m$thresholds ~ th)\n\n\n"} {"package":"msd","topic":"thresh","snippet":"### Name: thresh\n### Title: Rating Category Thresholds\n### Aliases: thresh\n\n### ** Examples\n\n# Using randomly generated values\nd <- as.numeric(sample(0:5, 1000, replace = TRUE))\nm <- matrix(d, nrow = 50, ncol = 20)\nim <- runif(20, -2, 2)\npm <- runif(50, -2, 2)\nth1 <- thresh(m, items = im, persons = pm)\n\n# Anchor first 10 item measures and first 10 person measures\nim[11:length(im)] <- NA\npm[11:length(pm)] <- NA\nth2 <- thresh(m, items = im, persons = pm)\n\n\n"} {"package":"redditadsR","topic":"fetch_redditads","snippet":"### Name: fetch_redditads\n### Title: fetch_redditads A function to fetch Reddit Ads data from the\n### windsor.ai API\n### Aliases: fetch_redditads\n\n### ** Examples\n\n## Not run: \n##D my_redditads_data <- fetch_redditads(api_key = \"your api key\",\n##D date_from = \"2022-10-01\",\n##D date_to = \"2022-10-02\",\n##D fields = c(\"campaign\", \"clicks\",\n##D \"spend\", \"impressions\", \"date\"))\n## End(Not run)\n\n\n"} {"package":"inTrees","topic":"GBM2List","snippet":"### Name: GBM2List\n### Title: Transform gbm object to a list of trees\n### Aliases: GBM2List\n### Keywords: gbm\n\n### ** Examples\n\n library(gbm)\n data(iris)\n X <- iris[,1:(ncol(iris)-1)]\n target <- iris[,\"Species\"] \n gbmFit <- gbm(Species~ ., data=iris, n.tree = 400,\n interaction.depth = 10,distribution=\"multinomial\")\n treeList <- GBM2List(gbmFit,X)\n ruleExec = extractRules(treeList,X)\n ruleExec <- unique(ruleExec)\n #ruleExec <- ruleExec[1:min(2000,length(ruleExec)),,drop=FALSE]\n ruleMetric <- getRuleMetric(ruleExec,X,target)\n ruleMetric <- pruneRule(ruleMetric,X,target)\n ruleMetric <- unique(ruleMetric)\n learner <- buildLearner(ruleMetric,X,target)\n pred <- applyLearner(learner,X)\n readableLearner <- presentRules(learner,colnames(X)) # more readable format\n err <- 1-sum(pred==target)/length(pred);\n\n\n"} {"package":"inTrees","topic":"Num2Level","snippet":"### Name: Num2Level\n### Title: internal function\n### Aliases: Num2Level\n### Keywords: internal\n\n### ** Examples\n\n##---- Should be DIRECTLY executable !! ----\n##-- ==> Define data, use random,\n##--\tor do help(data=index) for the standard data sets.\n\n## The function is currently defined as\nfunction (rfList, splitV) \n{\n for (i in 1:rfList$ntree) {\n rfList$list[[i]] <- data.frame(rfList$list[[i]])\n rfList$list[[i]][, \"prediction\"] <- data.frame(dicretizeVector(rfList$list[[i]][, \n \"prediction\"], splitV))\n colnames(rfList$list[[i]]) <- c(\"left daughter\", \"right daughter\", \n \"split var\", \"split point\", \"status\", \"prediction\")\n }\n return(rfList)\n }\n\n\n"} {"package":"inTrees","topic":"RF2List","snippet":"### Name: RF2List\n### Title: Transform a random forest object to a list of trees\n### Aliases: RF2List\n### Keywords: randomforest\n\n### ** Examples\n\nlibrary(RRF)\ndata(iris)\nX <- iris[,1:(ncol(iris)-1)]\ntarget <- iris[,\"Species\"] \nrf <- RRF(X,as.factor(target),ntree=100) # build an ordinary RF \ntreeList <- RF2List(rf)\nruleExec <- extractRules(treeList,X) # transform to R-executable rules\n\n\n"} {"package":"inTrees","topic":"XGB2List","snippet":"### Name: XGB2List\n### Title: Transform an xgboost object to a list of trees\n### Aliases: XGB2List\n### Keywords: xgboost\n\n### ** Examples\n\n\tlibrary(data.table)\n\tlibrary(xgboost)\n\t# test data set 1: iris\n\tX <- within(iris,rm(\"Species\")); Y <- iris[,\"Species\"]\n\tX <- within(iris,rm(\"Species\")); Y <- iris[,\"Species\"]\n\tmodel_mat <- model.matrix(~. -1, data=X)\n\txgb <- xgboost(model_mat, label = as.numeric(Y) - 1, nrounds = 20, \n\t\tobjective = \"multi:softprob\", num_class = 3 )\n\ttree_list <- XGB2List(xgb,model_mat)\n\n\n"} {"package":"inTrees","topic":"buildLearner","snippet":"### Name: buildLearner\n### Title: build a simplified tree ensemble learner (STEL)\n### Aliases: buildLearner\n### Keywords: STEL learner\n\n### ** Examples\n\ndata(iris)\nlibrary(RRF)\nX <- iris[,1:(ncol(iris)-1)]\ntarget <- iris[,\"Species\"] \nrf <- RRF(X,as.factor(target),ntree=100) # build an ordinary RF \ntreeList <- RF2List(rf)\nruleExec <- extractRules(treeList,X)\nruleExec <- unique(ruleExec)\nruleMetric <- getRuleMetric(ruleExec,X,target) # measure rules\nruleMetric <- pruneRule(ruleMetric,X,target) # prune each rule\n#ruleMetric <- selectRuleRRF(ruleMetric,X,target) # rule selection\nlearner <- buildLearner(ruleMetric,X,target)\npred <- applyLearner(learner,X)\nread <- presentRules(learner,colnames(X)) # more readable format\n\n# format the rule and metrics as a table in latex code\nlibrary(xtable)\nprint(xtable(read), include.rownames=FALSE)\nprint(xtable(ruleMetric[1:2,]), include.rownames=FALSE)\n\n\n\n"} {"package":"inTrees","topic":"computeRuleInfor","snippet":"### Name: computeRuleInfor\n### Title: compute rule information\n### Aliases: computeRuleInfor\n### Keywords: internal\n\n### ** Examples\n\n\t# this is an internal function.\n\n\n"} {"package":"inTrees","topic":"dataSimulate","snippet":"### Name: dataSimulate\n### Title: Simulate data\n### Aliases: dataSimulate\n### Keywords: simulate\n\n### ** Examples\n\nres <- dataSimulate(flag=1)\nX <- res$X; \ntarget <- res$target\n\n\n"} {"package":"inTrees","topic":"dicretizeVector","snippet":"### Name: dicretizeVector\n### Title: discretize a variable\n### Aliases: dicretizeVector\n### Keywords: discretize\n\n### ** Examples\n\n data(iris)\n dicretizeVector(iris[,1],3)\n\n\n"} {"package":"inTrees","topic":"extractRules","snippet":"### Name: extractRules\n### Title: Extract rules from a list of trees\n### Aliases: extractRules\n### Keywords: extract\n\n### ** Examples\n\n library(RRF)\n data(iris)\n X <- iris[,1:(ncol(iris)-1)]\n target <- iris[,\"Species\"] \n rf <- RRF(X,as.factor(target),ntree=100) # build an ordinary RF \n treeList <- RF2List(rf)\n ruleExec <- extractRules(treeList,X,digits=4) # transform to R-executable rules\n ruleExec <- unique(ruleExec)\n\n\n"} {"package":"inTrees","topic":"formatGBM","snippet":"### Name: formatGBM\n### Title: internal\n### Aliases: formatGBM\n### Keywords: internal\n\n### ** Examples\n\n##---- Should be DIRECTLY executable !! ----\n##-- ==> Define data, use random,\n##--\tor do help(data=index) for the standard data sets.\n\n## The function is currently defined as\nfunction (gbmList, splitBin,X) \n{\n for (j in 1:length(gbmList$list)) {\n a <- gbmList$list[[j]]\n rownames(a) <- 1:nrow(a)\n a$status <- a$SplitVar\n a <- a[, c(\"LeftNode\", \"RightNode\", \"MissingNode\", \"SplitVar\", \n \"SplitCodePred\", \"status\")]\n a[which(a[, \"SplitVar\"] >= 0), c(\"SplitVar\", \"LeftNode\", \n \"RightNode\", \"MissingNode\")] <- a[which(a[, \"SplitVar\"] >= \n 0), c(\"SplitVar\", \"LeftNode\", \"RightNode\", \"MissingNode\")] + \n 1\n ix <- a$MissingNode[which(a$MissingNode > 0)]\n if (length(ix) > 0) \n a$status[ix] <- 10\n a <- a[, c(\"LeftNode\", \"RightNode\", \"SplitVar\", \"SplitCodePred\", \n \"status\")]\n cat <- which(sapply(X, is.factor) & !sapply(X, is.ordered))\n ix <- which(a[, \"SplitVar\"] %in% cat)\n for (i in ix) a[i, \"SplitCodePred\"] <- splitBin[a[i, \n \"SplitCodePred\"] + 1]\n colnames(a) <- c(\"left daughter\", \"right daughter\", \"split var\", \n \"split point\", \"status\")\n gbmList$list[[j]] <- a\n }\n return(gbmList)\n }\n\n\n"} {"package":"inTrees","topic":"getFreqPattern","snippet":"### Name: getFreqPattern\n### Title: calculate frequent variable interactions\n### Aliases: getFreqPattern\n### Keywords: variable interaction\n\n### ** Examples\n\nlibrary(RRF)\nlibrary(arules)\ndata(iris)\nX <- iris[,1:(ncol(iris)-1)]\ntarget <- iris[,\"Species\"] \nrf <- RRF(X,as.factor(target),ntree=100) # build an ordinary RF \ntreeList <- RF2List(rf)\nruleExec <- extractRules(treeList,X) # transform to R-executable rules\nruleMetric <- getRuleMetric(ruleExec,X,target) \nfreqPattern <- getFreqPattern(ruleMetric)\nfreqPatternMetric <- getRuleMetric(freqPattern,X,target)\n\n\n"} {"package":"inTrees","topic":"getRuleMetric","snippet":"### Name: getRuleMetric\n### Title: Assign outcomes to a conditions, and measure the rules\n### Aliases: getRuleMetric\n### Keywords: measure rank\n\n### ** Examples\n\nlibrary(RRF)\ndata(iris)\nX <- iris[,1:(ncol(iris)-1)]\ntarget <- iris[,\"Species\"] \nrf <- RRF(X,as.factor(target),ntree=100) # build an ordinary RF \ntreeList <- RF2List(rf)\nruleExec <- extractRules(treeList,X) # transform to R-executable rules\nruleExec <- unique(ruleExec)\nruleMetric <- getRuleMetric(ruleExec,X,target) # measure rules\n\n\n"} {"package":"inTrees","topic":"lookupRule","snippet":"### Name: lookupRule\n### Title: internal\n### Aliases: lookupRule\n### Keywords: internal\n\n### ** Examples\n\n##---- Should be DIRECTLY executable !! ----\n##-- ==> Define data, use random,\n##--\tor do help(data=index) for the standard data sets.\n\n## The function is currently defined as\nfunction (rules, strList) \n{\n ix <- grep(strList[1], rules[, \"condition\"])\n if (length(strList) >= 2) {\n for (i in 2:length(strList)) {\n ix2 <- grep(strList[i], rules[, \"condition\"])\n ix <- intersect(ix, ix2)\n }\n }\n if (length(ix) >= 1) \n return(rules[ix, , drop = FALSE])\n if (length(ix) == 0) \n return(NULL)\n }\n\n\n"} {"package":"inTrees","topic":"measureRule","snippet":"### Name: measureRule\n### Title: internal\n### Aliases: measureRule\n### Keywords: internal\n\n### ** Examples\n\n##---- Should be DIRECTLY executable !! ----\n##-- ==> Define data, use random,\n##--\tor do help(data=index) for the standard data sets.\n\n## The function is currently defined as\nfunction (ruleExec, X, target, pred = NULL) \n{\n len <- length(unlist(strsplit(ruleExec, split = \" & \")))\n origRule <- ruleExec\n ruleExec <- paste(\"which(\", ruleExec, \")\")\n ixMatch <- eval(parse(text = ruleExec))\n if (length(ixMatch) == 0) {\n v <- c(\"-1\", \"-1\", \"-1\", \"\", \"\")\n names(v) <- c(\"len\", \"freq\", \"err\", \"condition\", \"pred\")\n return(v)\n }\n ys <- target[ixMatch]\n freq <- round(length(ys)/nrow(X), digits = 3)\n if (is.numeric(target)) {\n ysMost <- mean(ys)\n err <- sum((ysMost - ys)^2)/length(ys)\n }\n else {\n if (length(pred) > 0) {\n ysMost = pred\n }\n else {\n ysMost <- names(which.max(table(ys)))\n }\n conf <- round(table(ys)[ysMost]/sum(table(ys)), digits = 3)\n err <- 1 - conf\n }\n rule <- origRule\n v <- c(len, freq, err, rule, ysMost)\n names(v) <- c(\"len\", \"freq\", \"err\", \"condition\", \"pred\")\n return(v)\n }\n\n\n"} {"package":"inTrees","topic":"presentRules","snippet":"### Name: presentRules\n### Title: Present a learner using column names instead of X[i,]\n### Aliases: presentRules\n### Keywords: present\n\n### ** Examples\n\n # See function \"buildLearner\"\n\n\n"} {"package":"inTrees","topic":"pruneRule","snippet":"### Name: pruneRule\n### Title: Prune irrevant variable-value pair from a rule condition\n### Aliases: pruneRule\n### Keywords: prune\n\n### ** Examples\n\n# see function \"buildLearner\"\n\n\n"} {"package":"inTrees","topic":"pruneSingleRule","snippet":"### Name: pruneSingleRule\n### Title: internal\n### Aliases: pruneSingleRule\n### Keywords: internal\n\n### ** Examples\n\n##---- Should be DIRECTLY executable !! ----\n##-- ==> Define data, use random,\n##--\tor do help(data=index) for the standard data sets.\n\n## The function is currently defined as\nfunction (rule, X, target, maxDecay, typeDecay) \n{\n newRuleMetric <- measureRule(rule[\"condition\"], X, target)\n errOrig <- as.numeric(newRuleMetric[\"err\"])\n ruleV <- unlist(strsplit(rule[\"condition\"], split = \" & \"))\n pred <- rule[\"pred\"]\n if (length(ruleV) == 1) \n return(newRuleMetric)\n for (i in length(ruleV):1) {\n restRule <- ruleV[-i]\n restRule <- paste(restRule, collapse = \" & \")\n metricTmp <- measureRule(restRule, X, target, pred)\n errNew <- as.numeric(metricTmp[\"err\"])\n if (typeDecay == 1) {\n decay <- (errNew - errOrig)/max(errOrig, 1e-06)\n }\n else {\n decay <- (errNew - errOrig)\n }\n if (decay <= maxDecay) {\n ruleV <- ruleV[-i]\n newRuleMetric <- metricTmp\n if (length(ruleV) <= 1) \n break\n }\n }\n return(newRuleMetric)\n }\n\n\n"} {"package":"inTrees","topic":"rule2Table","snippet":"### Name: rule2Table\n### Title: internal function\n### Aliases: rule2Table\n### Keywords: internal\n\n### ** Examples\n\n##---- Should be DIRECTLY executable !! ----\n##-- ==> Define data, use random,\n##--\tor do help(data=index) for the standard data sets.\n\n## The function is currently defined as\nfunction (ruleExec, X, target) \n{\n I <- rep(0, nrow(X))\n ruleExec <- paste(\"which(\", ruleExec, \")\")\n ixMatch <- eval(parse(text = ruleExec))\n if (length(ixMatch) > 0) \n I[ixMatch] <- 1\n names(I) = NULL\n return(I)\n }\n\n\n"} {"package":"inTrees","topic":"ruleList2Exec","snippet":"### Name: ruleList2Exec\n### Title: internal\n### Aliases: ruleList2Exec\n### Keywords: internal\n\n### ** Examples\n\n##---- Should be DIRECTLY executable !! ----\n##-- ==> Define data, use random,\n##--\tor do help(data=index) for the standard data sets.\n\n## The function is currently defined as\nfunction (X, allRulesList) \n{\n typeX = getTypeX(X)\n ruleExec <- unique(t(sapply(allRulesList, singleRuleList2Exec, \n typeX = typeX)))\n ruleExec <- t(ruleExec)\n colnames(ruleExec) <- \"condition\"\n return(ruleExec)\n }\n\n\n"} {"package":"inTrees","topic":"selectRuleRRF","snippet":"### Name: selectRuleRRF\n### Title: select a set of relevant and non-redundant rules\n### Aliases: selectRuleRRF\n### Keywords: select\n\n### ** Examples\n\n # See function \"buildLearner:\n\n\n"} {"package":"inTrees","topic":"singleRuleList2Exec","snippet":"### Name: singleRuleList2Exec\n### Title: internal\n### Aliases: singleRuleList2Exec\n### Keywords: internal\n\n### ** Examples\n\n##---- Should be DIRECTLY executable !! ----\n##-- ==> Define data, use random,\n##--\tor do help(data=index) for the standard data sets.\n\n## The function is currently defined as\nfunction (ruleList, typeX) \n{\n ruleExec <- \"\"\n vars <- ls(ruleList)\n vars <- vars[order(as.numeric(vars))]\n for (i in 1:length(vars)) {\n if (typeX[as.numeric(vars[i])] == 2) {\n values <- paste(\"c(\", paste(paste(\"'\", ruleList[[vars[i]]], \n \"'\", sep = \"\"), collapse = \",\"), \")\", sep = \"\")\n tmp = paste(\"X[,\", vars[i], \"] %in% \", values, sep = \"\")\n }\n else {\n tmp = ruleList[[vars[i]]]\n }\n if (i == 1) \n ruleExec <- paste(ruleExec, tmp, sep = \"\")\n if (i > 1) \n ruleExec <- paste(ruleExec, \" & \", tmp, sep = \"\")\n }\n return(c(ruleExec))\n }\n\n\n"} {"package":"inTrees","topic":"sortRule","snippet":"### Name: sortRule\n### Title: internal\n### Aliases: sortRule\n### Keywords: internal\n\n### ** Examples\n\n##---- Should be DIRECTLY executable !! ----\n##-- ==> Define data, use random,\n##--\tor do help(data=index) for the standard data sets.\n\n## The function is currently defined as\nfunction (M, decreasing = TRUE) \n{\n qIx = order((1 - as.numeric(ruleMetric[, \"err\"])), as.numeric(ruleMetric[, \n \"freq\"]), -as.numeric(ruleMetric[, \"len\"]), decreasing = decreasing)\n return(M[qIx, ])\n }\n\n\n"} {"package":"inTrees","topic":"voteAllRules","snippet":"### Name: voteAllRules\n### Title: internal\n### Aliases: voteAllRules\n### Keywords: internal\n\n### ** Examples\n\n##---- Should be DIRECTLY executable !! ----\n##-- ==> Define data, use random,\n##--\tor do help(data=index) for the standard data sets.\n\n## The function is currently defined as\nfunction (ruleMetric, X, type = \"r\", method = \"median\") \n{\n xVoteList = vector(\"list\", nrow(X))\n predY <- rep(\"\", nrow(X))\n for (i in 1:nrow(ruleMetric)) {\n ixMatch <- eval(parse(text = paste(\"which(\", ruleMetric[i, \n \"condition\"], \")\")))\n if (length(ixMatch) == 0) \n next\n for (ii in ixMatch) {\n xVoteList[[ii]] = c(xVoteList[[ii]], ruleMetric[i, \n \"pred\"])\n }\n }\n for (i in 1:length(xVoteList)) {\n thisV <- xVoteList[[i]]\n if (length(thisV) == 0) \n next\n if (type == \"c\") \n predY[i] <- names(table(thisV)[which.max(table(thisV))])\n if (type == \"r\") {\n thisV = as.numeric(thisV)\n if (method == \"median\") {\n predY[i] <- median(thisV)\n }\n else {\n predY[i] <- mean(thisV)\n }\n }\n }\n if (type == \"r\") \n predY <- as.numeric(predY)\n return(predY)\n }\n\n\n"} {"package":"dCUR","topic":"AASP","snippet":"### Name: AASP\n### Title: Academic Achievement Score Projection -AASP-\n### Aliases: AASP\n\n### ** Examples\n\ndata(AASP)\ndim(AASP)\n\n\n"} {"package":"dCUR","topic":"CUR","snippet":"### Name: CUR\n### Title: CUR\n### Aliases: CUR\n\n### ** Examples\n\n## No test: \n #Classic CUR with top scores selection criteria.\n result <- CUR(data=AASP, variables=hoessem:notabachillerato,\n k=20, rows = 1, columns = .2, standardize = TRUE,\n cur_method = \"sample_cur\")\n result\n#Extension of classic CUR: Recalibrating leverages scores\n#and adjusting a mixtures Gaussian models to leverages.\n result <- CUR(data=AASP, variables=hoessem:notabachillerato,\n k=20, rows = 1, columns = .2, standardize = TRUE,\n cur_method = \"mixture\",\n correlation = R1, correlation_type = \"partial\")\n result\n## End(No test)\n\n\n\n"} {"package":"dCUR","topic":"dCUR","snippet":"### Name: dCUR\n### Title: dCUR\n### Aliases: dCUR dCUR-package\n\n### ** Examples\n\n\n## No test: \n results <- dCUR::dCUR(data=AASP, variables=hoessem:notabachillerato,\n k=15, rows=0.25, columns=0.25,skip = 0.1, standardize=TRUE,\n cur_method=\"sample_cur\",\n parallelize =TRUE, dynamic_columns = TRUE,\n dynamic_rows = TRUE)\n results\n## End(No test)\n\n\n\n"} {"package":"dCUR","topic":"mixture_plots","snippet":"### Name: mixture_plots\n### Title: mixture_plots\n### Aliases: mixture_plots\n\n### ** Examples\n\n## No test: \nresults <- CUR(data=AASP, variables=hoessem:notabachillerato,\nk=20, rows = .9999999, columns = .10, standardize = TRUE,\ncur_method = \"mixture\")\nmixture_plots(results)\n## End(No test)\n\n\n\n"} {"package":"dCUR","topic":"optimal_stage","snippet":"### Name: optimal_stage\n### Title: optimal_stage\n### Aliases: optimal_stage\n\n### ** Examples\n\n## No test: \nresults <- dCUR(data=AASP, variables=hoessem:notabachillerato,\nk=15, rows=0.25, columns=0.25,skip = 0.1, standardize=TRUE,\ncur_method=\"sample_cur\",\nparallelize =TRUE, dynamic_columns = TRUE,\ndynamic_rows = TRUE)\nresult <- optimal_stage(results, limit = 80)\nresult\nresult$k_plot\nresult$columns_plot\nresult$data\nresult$optimal\n## End(No test)\n\n\n\n"} {"package":"dCUR","topic":"relevant_variables_plot","snippet":"### Name: relevant_variables_plot\n### Title: relevant_variables_plot\n### Aliases: relevant_variables_plot\n\n### ** Examples\n\n## No test: \nresult <- CUR(data=AASP, variables=hoessem:notabachillerato,\nk=20, rows = 1, columns = .2, standardize = TRUE,\ncur_method = \"sample_cur\")\nrelevant_variables_plot(result)\n## End(No test)\n\n\n\n"} {"package":"dCUR","topic":"var_exp","snippet":"### Name: var_exp\n### Title: var_exp\n### Aliases: var_exp\n\n### ** Examples\n\n## No test: \nvar_exp(AASP, standardize = TRUE, hoessem:notabachillerato)\n## End(No test)\n\n\n"} {"package":"msda","topic":"GDS1615","snippet":"### Name: GDS1615\n### Title: GDS1615 data introduced in Burczynski et al. (2012).\n### Aliases: GDS1615 x y\n### Keywords: datasets\n\n### ** Examples\n\ndata(GDS1615)\n\n\n"} {"package":"msda","topic":"cv.msda","snippet":"### Name: cv.msda\n### Title: Cross-validation for msda\n### Aliases: cv.msda\n### Keywords: models classification\n\n### ** Examples\n\ndata(GDS1615)\nx<-GDS1615$x\ny<-GDS1615$y\nobj.cv<-cv.msda(x=x,y=y,nfolds=5,lambda.opt=\"max\")\nlambda.min<-obj.cv$lambda.min\nid.min<-which(obj.cv$lambda==lambda.min)\npred<-predict(obj.cv$msda.fit,x)[,id.min]\n\n\n"} {"package":"msda","topic":"msda","snippet":"### Name: msda\n### Title: Fits a regularization path for Multi-Class Sparse Discriminant\n### Analysis\n### Aliases: msda\n### Keywords: models classification\n\n### ** Examples\n\ndata(GDS1615)\nx<-GDS1615$x\ny<-GDS1615$y\nobj <- msda(x = x, y = y)\n\n\n"} {"package":"msda","topic":"plot.msda","snippet":"### Name: plot.msda\n### Title: Plot coefficients from a \"msda\" object\n### Aliases: plot.msda\n### Keywords: models classification\n\n### ** Examples\n\ndata(GDS1615)\nx<-GDS1615$x\ny<-GDS1615$y\nobj <- msda(x = x, y = y)\nplot(obj)\n\n\n"} {"package":"msda","topic":"predict.msda","snippet":"### Name: predict.msda\n### Title: make predictions from a \"msda\" object.\n### Aliases: predict.msda\n### Keywords: models classification\n\n### ** Examples\n\ndata(GDS1615)\nx<-GDS1615$x\ny<-GDS1615$y\nobj <- msda(x = x, y = y)\npred<-predict(obj,x)\n\n\n"} {"package":"varian","topic":"Variability_Measures","snippet":"### Name: Variability_Measures\n### Title: Variability Measures\n### Aliases: Variability_Measures by_id rmssd rmssd_id rolling_diff\n### rolling_diff_id sd_id\n### Keywords: utilities\n\n### ** Examples\n\nsd_id(mtcars$mpg, mtcars$cyl, long=TRUE)\nsd_id(mtcars$mpg, mtcars$cyl, long=FALSE)\nrmssd(1:4)\nrmssd(c(1, 3, 2, 4))\nrmssd_id(mtcars$mpg, mtcars$cyl)\nrmssd_id(mtcars$mpg, mtcars$cyl, long=FALSE)\nrolling_diff(1:7, window = 4)\nrolling_diff(c(1, 4, 3, 4, 5))\nrolling_diff_id(mtcars$mpg, mtcars$cyl, window = 3)\n\n\n"} {"package":"varian","topic":"empirical_pvalue","snippet":"### Name: empirical_pvalue\n### Title: Calculates an empirical p-value based on the data\n### Aliases: empirical_pvalue\n### Keywords: utilities\n\n### ** Examples\n\nempirical_pvalue(rnorm(100))\n\n\n"} {"package":"varian","topic":"parallel_stan","snippet":"### Name: parallel_stan\n### Title: Wrapper for the stan function to parallelize chains\n### Aliases: parallel_stan\n### Keywords: utilities\n\n### ** Examples\n\n# Make me!\n\n\n"} {"package":"varian","topic":"param_summary","snippet":"### Name: param_summary\n### Title: Calculates summaries for a parameter\n### Aliases: param_summary\n### Keywords: utilities\n\n### ** Examples\n\nparam_summary(rnorm(100))\nparam_summary(rnorm(100), pretty = TRUE)\n\n\n"} {"package":"varian","topic":"pval_smartformat","snippet":"### Name: pval_smartformat\n### Title: nice formatting for p-values\n### Aliases: pval_smartformat\n### Keywords: utilities\n\n### ** Examples\n\nvarian:::pval_smartformat(c(1, .15346, .085463, .05673, .04837, .015353462,\n .0089, .00164, .0006589, .0000000053326), 3, 5)\n\n\n"} {"package":"varian","topic":"res_gamma","snippet":"### Name: res_gamma\n### Title: Estimates the parameters of a Gamma distribution from SDs\n### Aliases: res_gamma\n### Keywords: utilities\n\n### ** Examples\n\nset.seed(1234)\ny <- rgamma(100, 3, 2)\nx <- rnorm(100 * 10, mean = 0, sd = rep(y, each = 10))\nID <- rep(1:100, each = 10)\nres_gamma(x, ID)\n\n\n"} {"package":"varian","topic":"simulate_gvm","snippet":"### Name: simulate_gvm\n### Title: Simulate a Gamma Variability Model\n### Aliases: simulate_gvm\n### Keywords: utilities\n\n### ** Examples\n\nraw.sim <- simulate_gvm(12, 140, 0, 1, 4, .1, 94367)\nsim.data <- with(raw.sim, {\n set.seed(265393)\n x2 <- MASS::mvrnorm(k, c(0, 0), matrix(c(1, .3, .3, 1), 2))\n y2 <- rnorm(k, cbind(Int = 1, x2) %*% matrix(c(3, .5, .7)) + sigma, sd = 3)\n data.frame(\n y = Data$y,\n y2 = y2[Data$ID2],\n x1 = x2[Data$ID2, 1],\n x2 = x2[Data$ID2, 2],\n ID = Data$ID2)\n})\n\n\n"} {"package":"varian","topic":"stan_inits","snippet":"### Name: stan_inits\n### Title: Calculate Initial Values for Stan VM Model\n### Aliases: stan_inits\n### Keywords: models\n\n### ** Examples\n\n# make me!\n\n\n"} {"package":"varian","topic":"varian","snippet":"### Name: varian\n### Title: Variablity Analysis using a Bayesian Variability Model (VM)\n### Aliases: varian varian-package\n### Keywords: models\n\n### ** Examples\n\n## Not run: \n##D sim.data <- with(simulate_gvm(4, 60, 0, 1, 3, 2, 94367), {\n##D set.seed(265393)\n##D x2 <- MASS::mvrnorm(k, c(0, 0), matrix(c(1, .3, .3, 1), 2))\n##D y2 <- rnorm(k, cbind(Int = 1, x2) %*% matrix(c(3, .5, .7)) + sigma, sd = 3)\n##D data.frame(\n##D y = Data$y,\n##D y2 = y2[Data$ID2],\n##D x1 = x2[Data$ID2, 1],\n##D x2 = x2[Data$ID2, 2],\n##D ID = Data$ID2)\n##D })\n##D m <- varian(y2 ~ x1 + x2, y ~ 1 | ID, data = sim.data, design = \"V -> Y\",\n##D totaliter = 10000, warmup = 1500, thin = 10, chains = 4, verbose=TRUE)\n##D \n##D # check diagnostics\n##D vm_diagnostics(m)\n##D \n##D sim.data2 <- with(simulate_gvm(21, 250, 0, 1, 3, 2, 94367), {\n##D set.seed(265393)\n##D x2 <- MASS::mvrnorm(k, c(0, 0), matrix(c(1, .3, .3, 1), 2))\n##D y2 <- rnorm(k, cbind(Int = 1, x2) %*% matrix(c(3, .5, .7)) + sigma, sd = 3)\n##D data.frame(\n##D y = Data$y,\n##D y2 = y2[Data$ID2],\n##D x1 = x2[Data$ID2, 1],\n##D x2 = x2[Data$ID2, 2],\n##D ID = Data$ID2)\n##D })\n##D # warning: may take several minutes\n##D m2 <- varian(y2 ~ x1 + x2, y ~ 1 | ID, data = sim.data2, design = \"V -> Y\",\n##D totaliter = 10000, warmup = 1500, thin = 10, chains = 4, verbose=TRUE)\n##D # check diagnostics\n##D vm_diagnostics(m2)\n## End(Not run)\n\n\n"} {"package":"varian","topic":"vm_diagnostics","snippet":"### Name: vm_diagnostics\n### Title: Plot diagnostics from a VM model\n### Aliases: vm_diagnostics\n### Keywords: hplot\n\n### ** Examples\n\n# Make Me!\n\n\n"} {"package":"varian","topic":"vm_stan","snippet":"### Name: vm_stan\n### Title: Create a Stan class VM object\n### Aliases: vm_stan\n### Keywords: models\n\n### ** Examples\n\n# Make Me!\n## Not run: \n##D test1 <- vm_stan(\"V -> Y\", useU=TRUE)\n##D test2 <- vm_stan(\"V -> Y\", useU=FALSE)\n##D test3 <- vm_stan(\"V -> M -> Y\", useU=TRUE)\n##D test4 <- vm_stan(\"V -> M -> Y\", useU=FALSE)\n##D test5 <- vm_stan(\"V\")\n## End(Not run)\n\n\n"} {"package":"varian","topic":"vmp_plot","snippet":"### Name: vmp_plot\n### Title: Plot the posterior distributions of the focal parameters from a\n### VM model\n### Aliases: vmp_plot\n### Keywords: hplot\n\n### ** Examples\n\n# Using made up data because the real models take a long time to run\nset.seed(1234) # make reproducible\nvmp_plot(matrix(rnorm(1000), ncol = 2))\n\n\n"} {"package":"sparsebnUtils","topic":"coerce_discrete","snippet":"### Name: coerce_discrete\n### Title: Recode discrete data\n### Aliases: coerce_discrete coerce_discrete.factor coerce_discrete.numeric\n### coerce_discrete.integer coerce_discrete.character\n### coerce_discrete.data.frame coerce_discrete.sparsebnData\n\n### ** Examples\n\nx <- 1:5\ncoerce_discrete(x) # output: 0 1 2 3 4\n\nx <- c(\"high\", \"normal\", \"high\", \"low\")\ncoerce_discrete(x) # output: 0 2 0 1\n\n\n\n"} {"package":"sparsebnUtils","topic":"fit_multinom_dag","snippet":"### Name: fit_multinom_dag\n### Title: Inference in discrete Bayesian networks\n### Aliases: fit_multinom_dag\n\n### ** Examples\n\n\n### construct a random data set\nx <- c(0,1,0,1,0)\ny <- c(1,0,1,0,1)\nz <- c(0,1,2,1,0)\na <- c(1,1,1,0,0)\nb <- c(0,0,1,1,1)\ndat <- data.frame(x, y, z, a, b)\n\n### randomly construct an edgelist of a graph\nnnode <- ncol(dat)\nli <- vector(\"list\", length = nnode)\nli[[1]] <- c(2L,4L)\nli[[2]] <- c(3L,4L,5L)\nli[[3]] <- integer(0)\nli[[4]] <- integer(0)\nli[[5]] <- integer(0)\nedgeL <- edgeList(li)\n\n### run fit_multinom_dag\nfit.multinom <- fit_multinom_dag(edgeL, dat)\n\n\n\n"} {"package":"sparsebnUtils","topic":"to_bn","snippet":"### Name: to_bn\n### Title: Conversion between graph types\n### Aliases: to_bn to_graphNEL to_igraph to_network sparsebn-compat\n\n### ** Examples\n\n## Not run: \n##D ### Learn the cytometry network\n##D library(sparsebn)\n##D data(cytometryContinuous)\n##D cyto.data <- sparsebnData(cytometryContinuous[[\"data\"]],\n##D type = \"continuous\",\n##D ivn = cytometryContinuous[[\"ivn\"]])\n##D cyto.learn <- estimate.dag(data = cyto.data)\n##D \n##D ### The output is a sparsebnPath object, which is a list of sparsebnFit objects\n##D class(cyto.learn)\n##D class(cyto.learn[[1]])\n##D \n##D ### Convert to igraph\n##D cyto.igraph <- to_igraph(cyto.learn)\n##D class(cyto.igraph) # not an igraph object!\n##D class(cyto.igraph[[1]]$edges) # the graph data in the 'edges' slot is converted to igraph\n##D gr <- cyto.igraph[[1]]$edges\n##D \n##D ### Different behaviour when input is already an edgeList\n##D edgeL <- cyto.learn[[1]]$edges\n##D gr <- to_igraph(edgeL) # input is edgeList, not sparsebnFit or sparsebnPath\n##D class(gr) # igraph object\n## End(Not run)\n\n\n\n"} {"package":"sparsebnUtils","topic":"sparsebnData","snippet":"### Name: sparsebnData\n### Title: sparsebnData class\n### Aliases: sparsebnData is.sparsebnData sparsebnData.data.frame\n### sparsebnData.matrix print.sparsebnData summary.sparsebnData\n### plot.sparsebnData\n\n### ** Examples\n\n\n### Generate a random continuous dataset\nmat <- matrix(rnorm(1000), nrow = 20)\ndat <- sparsebnData(mat, type = \"continuous\") # purely observational data with continuous variables\n\n### Discrete data\nmat <- rbind(c(0,2,0),\n c(1,1,0),\n c(1,0,3),\n c(0,1,0))\ndat.levels <- list(c(0,1), c(0,1,2), c(0,1,2,3))\ndat <- sparsebnData(mat,\n type = \"discrete\",\n levels = dat.levels) # purely observational data with discrete variables\n\ndat.ivn <- list(c(1), # first observation was intervened at node 1\n c(1), # second observation was intervened at node 1\n c(2,3), # third observation was intervened at nodes 2 and 3\n c(1,3)) # fourth observation was intervened at nodes 1 and 3\ndat <- sparsebnData(mat,\n type = \"discrete\",\n levels = dat.levels,\n ivn = dat.ivn) # specify intervention rows\n\n\n\n"} {"package":"sparsebnUtils","topic":"sparsebnFit","snippet":"### Name: sparsebnFit\n### Title: sparsebnFit class\n### Aliases: sparsebnFit is.sparsebnFit print.sparsebnFit\n### summary.sparsebnFit plot.sparsebnFit\n\n### ** Examples\n\n\n## Not run: \n##D ### Learn the cytometry network\n##D library(sparsebn)\n##D data(cytometryContinuous) # from the sparsebn package\n##D cyto.data <- sparsebnData(cytometryContinuous[[\"data\"]], type = \"continuous\")\n##D cyto.learn <- estimate.dag(cyto.data)\n##D \n##D ### Inspect the output\n##D class(cyto.learn[[1]])\n##D print(cyto.learn[[2]])\n##D show.parents(cyto.learn[[1]], c(\"raf\", \"mek\", \"plc\"))\n##D \n##D ### Manipulate a particular graph\n##D cyto.fit <- cyto.learn[[7]]\n##D num.nodes(cyto.fit)\n##D num.edges(cyto.fit)\n##D show.parents(cyto.fit, c(\"raf\", \"mek\", \"plc\"))\n##D plot(cyto.fit)\n##D \n##D ### Use graph package instead of edgeLists\n##D setGraphPackage(\"graph\", coerce = TRUE) # set sparsebn to use graph package\n##D cyto.edges <- cyto.fit$edges\n##D degree(cyto.edges) # only available with graph package\n##D isConnected(cyto.edges) # only available with graph package\n## End(Not run)\n\n\n\n"} {"package":"sparsebnUtils","topic":"sparsebnPath","snippet":"### Name: sparsebnPath\n### Title: sparsebnPath class\n### Aliases: sparsebnPath is.sparsebnPath print.sparsebnPath\n### summary.sparsebnPath plot.sparsebnPath\n\n### ** Examples\n\n\n## Not run: \n##D ### Learn the cytometry network\n##D library(sparsebn)\n##D data(cytometryContinuous) # from the sparsebn package\n##D cyto.data <- sparsebnData(cytometryContinuous[[\"data\"]], type = \"continuous\")\n##D cyto.learn <- estimate.dag(cyto.data)\n##D \n##D ### Inspect the output\n##D class(cyto.learn)\n##D print(cyto.learn)\n##D plot(cyto.learn)\n## End(Not run)\n\n\n\n"} {"package":"LSTS","topic":"Box.Ljung.Test","snippet":"### Name: Box.Ljung.Test\n### Title: Ljung-Box Test Plot\n### Aliases: Box.Ljung.Test\n\n### ** Examples\n\nBox.Ljung.Test(malleco, lag = 5)\n\n\n"} {"package":"LSTS","topic":"LS.kalman","snippet":"### Name: LS.kalman\n### Title: Kalman filter for locally stationary processes\n### Aliases: LS.kalman\n\n### ** Examples\n\nfit_kalman <- LS.kalman(malleco, start(malleco))\n\n\n"} {"package":"LSTS","topic":"LS.summary","snippet":"### Name: LS.summary\n### Title: Summary for Locally Stationary Time Series\n### Aliases: LS.summary\n\n### ** Examples\n\nfit_whittle <- LS.whittle(\n series = malleco, start = c(1, 1, 1, 1),\n order = c(p = 1, q = 0), ar.order = 1, sd.order = 1, N = 180, n.ahead = 10\n)\nLS.summary(fit_whittle)\n\n\n"} {"package":"LSTS","topic":"LS.whittle","snippet":"### Name: LS.whittle\n### Title: Whittle estimator to Locally Stationary Time Series\n### Aliases: LS.whittle\n\n### ** Examples\n\n# Analysis by blocks of phi and sigma parameters\nN <- 200\nS <- 100\nM <- trunc((length(malleco) - N) / S + 1)\ntable <- c()\nfor (j in 1:M) {\n x <- malleco[(1 + S * (j - 1)):(N + S * (j - 1))]\n table <- rbind(table, nlminb(\n start = c(0.65, 0.15), N = N,\n objective = LS.whittle.loglik,\n series = x, order = c(p = 1, q = 0)\n )$par)\n}\nu <- (N / 2 + S * (1:M - 1)) / length(malleco)\ntable <- as.data.frame(cbind(u, table))\ncolnames(table) <- c(\"u\", \"phi\", \"sigma\")\n# Start parameters\nphi <- smooth.spline(table$phi, spar = 1, tol = 0.01)$y\nfit.1 <- nls(phi ~ a0 + a1 * u, start = list(a0 = 0.65, a1 = 0.00))\nsigma <- smooth.spline(table$sigma, spar = 1)$y\nfit.2 <- nls(sigma ~ b0 + b1 * u, start = list(b0 = 0.65, b1 = 0.00))\nfit_whittle <- LS.whittle(\n series = malleco, start = c(coef(fit.1), coef(fit.2)), order = c(p = 1, q = 0),\n ar.order = 1, sd.order = 1, N = 180, n.ahead = 10\n)\n\n\n"} {"package":"LSTS","topic":"block.smooth.periodogram","snippet":"### Name: block.smooth.periodogram\n### Title: Smooth Periodogram by Blocks\n### Aliases: block.smooth.periodogram\n\n### ** Examples\n\nblock.smooth.periodogram(malleco)\n\n\n"} {"package":"LSTS","topic":"hessian","snippet":"### Name: hessian\n### Title: Hessian Matrix\n### Aliases: hessian\n\n### ** Examples\n\n# Variance of the maximum likelihood estimator for mu parameter in\n# gaussian data\nloglik <- function(series, x, sd = 1) {\n -sum(log(dnorm(series, mean = x, sd = sd)))\n}\nsqrt(c(var(malleco) / length(malleco), diag(solve(hessian(\n f = loglik, x = mean(malleco), series = malleco,\n sd = sd(malleco)\n)))))\n\n\n"} {"package":"LSTS","topic":"periodogram","snippet":"### Name: periodogram\n### Title: Periodogram function\n### Aliases: periodogram\n\n### ** Examples\n\n# AR(1) simulated\nset.seed(1776)\nts.sim <- arima.sim(n = 1000, model = list(order = c(1, 0, 0), ar = 0.7))\nper <- periodogram(ts.sim)\nper$plot\n\n\n"} {"package":"LSTS","topic":"smooth.periodogram","snippet":"### Name: smooth.periodogram\n### Title: Smoothing periodogram\n### Aliases: smooth.periodogram\n\n### ** Examples\n\n# AR(1) simulated\nrequire(ggplot2)\nset.seed(1776)\nts.sim <- arima.sim(n = 1000, model = list(order = c(1, 0, 0), ar = 0.7))\nper <- periodogram(ts.sim)\naux <- smooth.periodogram(ts.sim, plot = FALSE, spar = .7)\nsm_p <- data.frame(x = aux$lambda, y = aux$smooth.periodogram)\nsp_d <- data.frame(\n x = aux$lambda,\n y = spectral.density(ar = 0.7, lambda = aux$lambda)\n)\ng <- per$plot\ng +\n geom_line(data = sm_p, aes(x, y), color = \"#ff7f0e\") +\n geom_line(data = sp_d, aes(x, y), color = \"#d31244\")\n\n\n"} {"package":"LSTS","topic":"spectral.density","snippet":"### Name: spectral.density\n### Title: Spectral Density\n### Aliases: spectral.density\n\n### ** Examples\n\n# Spectral Density AR(1)\nrequire(ggplot2)\nf <- spectral.density(ar = 0.5, lambda = malleco)\nggplot(data.frame(x = malleco, y = f)) +\n geom_line(aes(x = as.numeric(x), y = as.numeric(y))) +\n labs(x = \"Frequency\", y = \"Spectral Density\") +\n theme_minimal()\n\n\n"} {"package":"LSTS","topic":"ts.diag","snippet":"### Name: ts.diag\n### Title: Diagnostic Plots for Time Series fits\n### Aliases: ts.diag\n\n### ** Examples\n\nts.diag(malleco)\n\n\n"} {"package":"photobiologySun","topic":"photobiologySun","snippet":"### Name: photobiologySun-package\n### Title: photobiologySun: Data for Sunlight Spectra\n### Aliases: photobiologySun photobiologySun-package\n\n### ** Examples\n\nlibrary(photobiology)\nlibrary(photobiologyWavebands)\n\nq_irrad(sun_may_morning.spct, PAR())\nq_ratio(sun_may_morning.spct, Red(\"Smith10\"), Far_red(\"Smith10\"))\n\n\n\n"} {"package":"photobiologySun","topic":"sun_reference.mspct","snippet":"### Name: sun_reference.mspct\n### Title: Reference solar spectra from ASTM G173\n### Aliases: sun_reference.mspct\n### Keywords: datasets\n\n### ** Examples\n\n\nnames(sun_reference.mspct)\n\n\n\n"} {"package":"tableExtra","topic":"draw_table_extra","snippet":"### Name: draw_table_extra\n### Title: Graphical display of a table with grobs of varying scales and\n### colours.\n### Aliases: draw_table_extra\n\n### ** Examples\n\n## No test: \nlibrary(dplyr)\nlibrary(tableExtra)\nlibrary(tibble)\n\n# load data\nload(system.file(\"testdata\", \"pcawg_counts.rda\", package=\"tableExtra\"))\nload(system.file(\"testdata\", \"sbs_aetiologies.rda\", package=\"tableExtra\"))\n\npcawg_plot_data <- function(){\n scale_breaks <- seq(from=0, to=1, by=0.1)\n color_palette <- c(\"#ffc651\", \"#ffa759\", \"#ff8962\", \"#ff6b6b\", \"#cc6999\", \"#9968c8\", \n \"#6767f8\", \"#4459ce\", \"#224ba5\",\"#013d7c\")\n color_breaks <- c(0, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 25, 1e6)\n color_bg <- c(\"#f8f9fa\", \"#e9ecef\")\n\n theme <- ttheme_awesome(base_size=12,\n rep_mode=\"col\",\n core_size=5, \n scale_breaks=scale_breaks,\n color_palette=color_palette, \n color_breaks=color_breaks, \n core=list(bg_params=list(fill=color_bg)))\n\n # define dscale and cols_more from PCAWG data\n dscale <- pcawg_counts %>%\n group_by(Cancer.Types) %>%\n mutate(n=n()) %>%\n summarize_at(vars(-Sample.Names, -Accuracy), ~sum(.x>0)) %>%\n mutate_at(vars(-Cancer.Types,-n), ~./n)\n\n cols_more <- list(\"n=\"=dscale$n)\n dscale$n <- NULL\n dscale <- column_to_rownames(.data=dscale, var=\"Cancer.Types\")\n dscale <- t(as.matrix(dscale))\n \n # define dcolor and rows_more from PCAWG data\n mask <- sbs_aetiologies$Signature %in% rownames(dscale)\n rows_more <- list(\"Aetiology\"=sbs_aetiologies[mask, \"Aetiology\"])\n\n dcolor <- pcawg_counts %>%\n group_by(Cancer.Types) %>%\n summarize_at(vars(-Sample.Names, -Accuracy), ~median(.[.!=0]*1e6/3.2e9)) %>%\n replace(is.na(.),0)\n\n dcolor <- column_to_rownames(.data=dcolor, var=\"Cancer.Types\")\n dcolor <- t(as.matrix(dcolor))\n\n list(dscale=dscale, dcolor=dcolor, cols_more=cols_more, rows_more=rows_more, theme=theme)\n}\n\n# tables needed for the plot and graphical parameters in `theme`\nplot_data <- pcawg_plot_data()\n\n# draw\noutput <- file.path(tempdir(),\"table_extra_pcawg.pdf\")\ndraw_table_extra(dscale=plot_data$dscale, theme=plot_data$theme, output=output,\n dcolor=plot_data$dcolor, cols_more=plot_data$cols_more,\n rows_more=plot_data$rows_more,\n dscale_title_legend=\"Prop of tumors with the signature\",\n dcolor_title_legend=\"Median mut/Mb due to signature\")\n## End(No test)\n\n\n"} {"package":"POUMM","topic":"OU","snippet":"### Name: OU\n### Title: Distribution of an Ornstein-Uhlenbeck Process at Time t, Given\n### Initial State at Time 0\n### Aliases: OU dOU rOU meanOU varOU sdOU\n\n### ** Examples\n\nz0 <- 8\nt <- 10\nn <- 100000\nsample <- rOU(n, z0, t, 2, 3, 1)\ndens <- dOU(sample, z0, t, 2, 3, 1)\nvar(sample) # around 1/4\nvarOU(t, 2, 1) \n\n\n\n"} {"package":"POUMM","topic":"POUMM","snippet":"### Name: POUMM\n### Title: The Phylogenetic (Ornstein-Uhlenbeck) Mixed Model\n### Aliases: POUMM\n\n### ** Examples\n\n## Not run: \n##D # Please, read the package vignette for more detailed examples.\n##D N <- 500\n##D tr <- ape::rtree(N)\n##D z <- rVNodesGivenTreePOUMM(tr, 0, 2, 3, 1, 1)[1:N]\n##D fit <- POUMM(z, tr, spec = specifyPOUMM(nSamplesMCMC = 5e4))\n##D plot(fit)\n##D summary(fit)\n##D AIC(fit)\n##D BIC(fit)\n##D coef(fit)\n##D logLik(fit)\n##D fitted(fit)\n##D plot(resid(fit))\n##D abline(h=0)\n##D \n##D # fit PMM to the same data and do a likelihood ratio test\n##D fitPMM <- POUMM(z, tr, spec = specifyPMM(nSamplesMCMC = 5e4))\n##D lmtest::lrtest(fitPMM, fit)\n## End(Not run)\n\n\n\n"} {"package":"POUMM","topic":"PhylogeneticH2","snippet":"### Name: PhylogeneticH2\n### Title: Phylogenetic Heritability\n### Aliases: PhylogeneticH2 alpha sigmaOU sigmae H2e\n\n### ** Examples\n\n# At POUMM stationary state (equilibrium, t=Inf)\nH2 <- H2(alpha = 0.75, sigma = 1, sigmae = 1, t = Inf) # 0.4\nalpha <- alpha(H2 = H2, sigma = 1, sigmae = 1, t = Inf) # 0.75\nsigma <- sigmaOU(H2 = H2, alpha = 0.75, sigmae = 1, t = Inf) # 1\nsigmae <- sigmae(H2 = H2, alpha = 0.75, sigma = 1, t = Inf) # 1\n\n# At finite time t = 0.2\nH2 <- H2(alpha = 0.75, sigma = 1, sigmae = 1, t = 0.2) # 0.1473309\nalpha <- alpha(H2 = H2, sigma = 1, sigmae = 1, t = 0.2) # 0.75\nsigma <- sigmaOU(H2 = H2, alpha = 0.75, sigmae = 1, t = 0.2) # 1\nsigmae <- sigmae(H2 = H2, alpha = 0.75, sigma = 1, t = 0.2) # 1\n\n \n\n\n"} {"package":"POUMM","topic":"likPOUMMGivenTreeVTipsC","snippet":"### Name: likPOUMMGivenTreeVTipsC\n### Title: Fast (parallel) POUMM likelihood calculation using the SPLITT\n### library\n### Aliases: likPOUMMGivenTreeVTipsC\n\n### ** Examples\n\n## Not run: \n##D N <- 100\n##D tr <- ape::rtree(N)\n##D z <- rVNodesGivenTreePOUMM(tr, 0, 2, 3, 1, 1)[1:N]\n##D pruneInfo <- pruneTree(tr, z)\n##D microbenchmark::microbenchmark(\n##D likCpp <- likPOUMMGivenTreeVTipsC(pruneInfo$integrator, 2, 3, 1, 1),\n##D likR <- likPOUMMGivenTreeVTips(z, tr, 2, 3, 1, 1, pruneInfo = pruneInfo))\n##D \n##D # should be the same values\n##D likCpp\n##D likR\n## End(Not run)\n\n\n\n"} {"package":"POUMM","topic":"plot.summary.POUMM","snippet":"### Name: plot.summary.POUMM\n### Title: Plot a summary of a POUMM fit\n### Aliases: plot.summary.POUMM\n\n### ** Examples\n\n## Not run: \n##D library(POUMM)\n##D \n##D set.seed(1)\n##D \n##D N <- 1000\n##D \n##D # create a random non-ultrametric tree of N tips\n##D tree <- ape::rtree(N) \n##D \n##D # Simulate the evolution of a trait along the tree\n##D z <- rVNodesGivenTreePOUMM(\n##D tree, g0 = 8, alpha = 1, theta = 4, sigma = 1.2, sigmae = .8)\n##D \n##D fit <- POUMM(z[1:N], tree, spec = list(nSamplesMCMC = 4e5))\n##D \n##D # Summarize the results from the fit in a table:\n##D summary(fit)\n##D \n##D # Create plots for some of the inferred parameters/statistics:\n##D pl <- plot(fit, stat = c(\"alpha\", \"theta\", \"sigma\", \"sigmae\", \"H2tMean\"), \n##D doZoomIn = TRUE, \n##D zoomInFilter = paste(\"!(stat %in% c('alpha', 'sigma', 'sigmae')) |\",\n##D \"(value >= 0 & value <= 8)\"),\n##D doPlot = FALSE)\n##D \n##D pl$traceplot\n##D pl$densplot\n## End(Not run)\n\n\n\n"} {"package":"POUMM","topic":"rTrajectoryOU","snippet":"### Name: rTrajectoryOU\n### Title: Generation of a random trajectory of an OU process starting from\n### a given initial state\n### Aliases: rTrajectoryOU\n\n### ** Examples\n\nz0 <- 0\nnSteps <- 100\nt <- 0.01\ntrajectory <- rTrajectoryOU(z0, t, 2, 2, 1, steps = nSteps)\nplot(trajectory, type = 'l')\n\n\n\n"} {"package":"ImportanceIndice","topic":"Distribution_LossSource","snippet":"### Name: Distribution_LossSource\n### Title: Loss source distribution information\n### Aliases: Distribution_LossSource\n\n### ** Examples\n\nlibrary(ImportanceIndice)\ndata(\"DataLossSource\")\ndata(\"DataSolutionSource\")\n\nDistribution_LossSource(DataLossSource)\nDistribution_SolutionSource(DataSolutionSource)\n\n\n\n"} {"package":"ImportanceIndice","topic":"Distribution_SolutionSource","snippet":"### Name: Distribution_SolutionSource\n### Title: Solution source distribution information\n### Aliases: Distribution_SolutionSource\n\n### ** Examples\n\nlibrary(ImportanceIndice)\ndata(\"DataLossSource\")\ndata(\"DataSolutionSource\")\n\nDistribution_LossSource(DataLossSource)\nDistribution_SolutionSource(DataSolutionSource)\n\n\n"} {"package":"ImportanceIndice","topic":"EffectivenessOfSolution","snippet":"### Name: EffectivenessOfSolution\n### Title: Function to estimate the effectiveness of solution sources\n### (S.S.) by loss source (Percentage_I.I. > 0.00) in the production\n### system.\n### Aliases: EffectivenessOfSolution\n\n### ** Examples\n\nlibrary(ImportanceIndice)\ndata(\"DataLossSource\")\ndata(\"DataSolutionSource\")\ndata(\"DataProduction\")\ndata(\"DataNumberSamples\")\n\nDistribution_LossSource(DataLossSource)\nDistribution_SolutionSource(DataSolutionSource)\n\n#################################################\n###################################################\n\n\nLS<-LossSource(DataLoss = DataLossSource,DataProd = DataProduction)\nLS\n\nLP<-LossProduction(Data=DataLossSource,Prod = DataProduction,\n Evaluation=DataNumberSamples,\n SegurityMargen=0.75,MaximumToleranceOfLossFruits=1)\nLP\n\nES<-EffectivenessOfSolution(DataLossSource=DataLossSource,\n DataSolutionSource=DataSolutionSource,Production=DataProduction)\nES\n\n\n\n"} {"package":"ImportanceIndice","topic":"LossProduction","snippet":"### Name: LossProduction\n### Title: Obtaining indices associated with loss of production.\n### Aliases: LossProduction\n\n### ** Examples\n\nlibrary(ImportanceIndice)\ndata(\"DataLossSource\")\ndata(\"DataSolutionSource\")\ndata(\"DataProduction\")\ndata(\"DataNumberSamples\")\n\nDistribution_LossSource(DataLossSource)\nDistribution_SolutionSource(DataSolutionSource)\n\n#################################################\n###################################################\n\n\nLS<-LossSource(DataLoss = DataLossSource,DataProd = DataProduction)\nLS\n\nLP<-LossProduction(Data=DataLossSource,Prod = DataProduction,\n Evaluation=DataNumberSamples,\n SegurityMargen=0.75,MaximumToleranceOfLossFruits=1)\nLP\n\nES<-EffectivenessOfSolution(DataLossSource=DataLossSource,\n DataSolutionSource=DataSolutionSource,Production=DataProduction)\nES\n\n\n\n"} {"package":"ImportanceIndice","topic":"LossSource","snippet":"### Name: LossSource\n### Title: Obtaining indices associated with sources of loss\n### Aliases: LossSource\n\n### ** Examples\n\nlibrary(ImportanceIndice)\ndata(\"DataLossSource\")\ndata(\"DataSolutionSource\")\ndata(\"DataProduction\")\ndata(\"DataNumberSamples\")\n\nDistribution_LossSource(DataLossSource)\nDistribution_SolutionSource(DataSolutionSource)\n\n#################################################\n###################################################\n\n\nLS=LossSource(DataLoss = DataLossSource,DataProd = DataProduction)\nLS\n\nLP=LossProduction(Data=DataLossSource,Prod = DataProduction,\n Evaluation=DataNumberSamples,\n SegurityMargen=0.75,MaximumToleranceOfLossFruits=1)\nLP\n\nES=EffectivenessOfSolution(DataLossSource=DataLossSource,\n DataSolutionSource=DataSolutionSource,Production=DataProduction)\nES\n\n\n\n"} {"package":"ImportanceIndice","topic":"NonAttentionLevel","snippet":"### Name: NonAttentionLevel\n### Title: Estimates levels of non-attention.\n### Aliases: NonAttentionLevel\n\n### ** Examples\n\nlibrary(ImportanceIndice)\ndata(\"DataLossSource\")\ndata(\"DataSolutionSource\")\ndata(\"DataProduction\")\ndata(\"DataNumberSamples\")\n\nDistribution_LossSource(DataLossSource)\nDistribution_SolutionSource(DataSolutionSource)\n\n#################################################\n###################################################\n\n\n\nLS<-LossSource(DataLoss = DataLossSource,DataProd = DataProduction)\nLS\n\nLP<-LossProduction(Data=DataLossSource,Prod = DataProduction,\n Evaluation=DataNumberSamples,\n SegurityMargen=0.75,MaximumToleranceOfLossFruits=1)\nLP\n\n\nES<-EffectivenessOfSolution(DataLossSource=DataLossSource,\n DataSolutionSource=DataSolutionSource,Production =DataProduction)\nES\n\n\n\n\nid<-SelectEffectivenessOfSolution(ES)\nid<-c(TRUE , TRUE, TRUE , FALSE, TRUE)\n\n\nSS<-SolutionSource(SolutionData = DataSolutionSource,\n EffectivenessOfSolution = ES,Production = DataProduction,Id = id)\nSS\n\n\nNAL<-NonAttentionLevel(EffectivenessOfSolution = ES,LossProduction = LP,Id = id,Verbose=TRUE)\nNAL\n\n\n"} {"package":"ImportanceIndice","topic":"SelectEffectivenessOfSolution","snippet":"### Name: SelectEffectivenessOfSolution\n### Title: Determine the pair by pair effects that are important for the\n### analysis.\n### Aliases: SelectEffectivenessOfSolution\n\n### ** Examples\n\nlibrary(ImportanceIndice)\ndata(\"DataLossSource\")\ndata(\"DataSolutionSource\")\ndata(\"DataProduction\")\ndata(\"DataNumberSamples\")\n\nDistribution_LossSource(DataLossSource)\nDistribution_SolutionSource(DataSolutionSource)\n\n#################################################\n###################################################\n\n\n\nLS<-LossSource(DataLoss = DataLossSource,DataProd = DataProduction)\nLS\n\nLP<-LossProduction(Data=DataLossSource,Prod = DataProduction,\n Evaluation=DataNumberSamples,\n SegurityMargen=0.75,MaximumToleranceOfLossFruits=1)\nLP\n\n\nES<-EffectivenessOfSolution(DataLossSource=DataLossSource,\n DataSolutionSource=DataSolutionSource,Production =DataProduction)\nES\n\n\n\n\nid<-SelectEffectivenessOfSolution(ES)\nid<-c(TRUE , TRUE, TRUE , FALSE, TRUE)\n\n\nSS<-SolutionSource(SolutionData = DataSolutionSource,\n EffectivenessOfSolution = ES,Production = DataProduction,Id = id)\nSS\n\n\nNAL<-NonAttentionLevel(EffectivenessOfSolution = ES,LossProduction = LP,Id = id,Verbose=TRUE)\nNAL\n\n\n"} {"package":"ImportanceIndice","topic":"SolutionSource","snippet":"### Name: SolutionSource\n### Title: Obtaining indexes associated with the solution sources.\n### Aliases: SolutionSource\n\n### ** Examples\n\nlibrary(ImportanceIndice)\ndata(\"DataLossSource\")\ndata(\"DataSolutionSource\")\ndata(\"DataProduction\")\ndata(\"DataNumberSamples\")\n\nDistribution_LossSource(DataLossSource)\nDistribution_SolutionSource(DataSolutionSource)\n\n#################################################\n###################################################\n\n\n\nLS<-LossSource(DataLoss = DataLossSource,DataProd = DataProduction)\nLS\n\nLP<-LossProduction(Data=DataLossSource,Prod = DataProduction,\n Evaluation=DataNumberSamples,\n SegurityMargen=0.75,MaximumToleranceOfLossFruits=1)\nLP\n\n\nES<-EffectivenessOfSolution(DataLossSource=DataLossSource,\n DataSolutionSource=DataSolutionSource,Production =DataProduction)\nES\n\n\n\n\nid<-SelectEffectivenessOfSolution(ES)\nid<-c(TRUE , TRUE, TRUE , FALSE, TRUE)\n\n\nSS<-SolutionSource(SolutionData = DataSolutionSource,\n EffectivenessOfSolution = ES,Production = DataProduction,Id = id)\nSS\n\n\nNAL<-NonAttentionLevel(EffectivenessOfSolution = ES,LossProduction = LP,Id = id,Verbose=TRUE)\nNAL\n\n\n"} {"package":"splitTools","topic":"create_folds","snippet":"### Name: create_folds\n### Title: Create Folds\n### Aliases: create_folds\n\n### ** Examples\n\ny <- rep(c(letters[1:4]), each = 5)\ncreate_folds(y)\ncreate_folds(y, k = 2)\ncreate_folds(y, k = 2, m_rep = 2)\ncreate_folds(y, k = 3, type = \"blocked\")\n\n\n"} {"package":"splitTools","topic":"create_timefolds","snippet":"### Name: create_timefolds\n### Title: Creates Folds for Time Series Data\n### Aliases: create_timefolds\n\n### ** Examples\n\ny <- runif(100)\ncreate_timefolds(y)\ncreate_timefolds(y, use_names = FALSE)\ncreate_timefolds(y, use_names = FALSE, type = \"moving\")\n\n\n"} {"package":"splitTools","topic":"multi_strata","snippet":"### Name: multi_strata\n### Title: Create Strata from Multiple Features\n### Aliases: multi_strata\n\n### ** Examples\n\ny_multi <- data.frame(\n A = rep(c(letters[1:4]), each = 20),\n B = factor(sample(c(0, 1), 80, replace = TRUE)),\n c = rnorm(80)\n)\ny <- multi_strata(y_multi, k = 3)\nfolds <- create_folds(y, k = 5)\n\n\n"} {"package":"splitTools","topic":"partition","snippet":"### Name: partition\n### Title: Split Data into Partitions\n### Aliases: partition\n\n### ** Examples\n\ny <- rep(c(letters[1:4]), each = 5)\npartition(y, p = c(0.7, 0.3), seed = 1)\npartition(y, p = c(0.7, 0.3), split_into_list = FALSE, seed = 1)\np <- c(train = 0.8, valid = 0.1, test = 0.1)\npartition(y, p, seed = 1)\npartition(y, p, split_into_list = FALSE, seed = 1)\npartition(y, p, split_into_list = FALSE, use_names = FALSE, seed = 1)\npartition(y, p = c(0.7, 0.3), type = \"grouped\")\npartition(y, p = c(0.7, 0.3), type = \"blocked\")\n\n\n"} {"package":"vegan","topic":"BCI","snippet":"### Name: BCI\n### Title: Barro Colorado Island Tree Counts\n### Aliases: BCI BCI.env\n### Keywords: datasets\n\n### ** Examples\n\ndata(BCI, BCI.env)\nhead(BCI.env)\n## see changed species names\noldnames <- attr(BCI, \"original.names\")\ntaxa <- cbind(\"Old Names\" = oldnames, \"Current Names\" = names(BCI))\nnoquote(taxa[taxa[,1] != taxa[,2], ])\n\n\n"} {"package":"vegan","topic":"CCorA","snippet":"### Name: CCorA\n### Title: Canonical Correlation Analysis\n### Aliases: CCorA biplot.CCorA\n### Keywords: multivariate\n\n### ** Examples\n\n# Example using two mite groups. The mite data are available in vegan\ndata(mite)\n# Two mite species associations (Legendre 2005, Fig. 4)\ngroup.1 <- c(1,2,4:8,10:15,17,19:22,24,26:30)\ngroup.2 <- c(3,9,16,18,23,25,31:35)\n# Separate Hellinger transformations of the two groups of species \nmite.hel.1 <- decostand(mite[,group.1], \"hel\")\nmite.hel.2 <- decostand(mite[,group.2], \"hel\")\nrownames(mite.hel.1) = paste(\"S\",1:nrow(mite),sep=\"\")\nrownames(mite.hel.2) = paste(\"S\",1:nrow(mite),sep=\"\")\nout <- CCorA(mite.hel.1, mite.hel.2)\nout\nbiplot(out, \"ob\") # Two plots of objects\nbiplot(out, \"v\", cex=c(0.7,0.6)) # Two plots of variables\nbiplot(out, \"ov\", cex=c(0.7,0.6)) # Four plots (2 for objects, 2 for variables)\nbiplot(out, \"b\", cex=c(0.7,0.6)) # Two biplots\nbiplot(out, xlabs = NA, plot.axes = c(3,5)) # Plot axes 3, 5. No object names\nbiplot(out, plot.type=\"biplots\", xlabs = NULL) # Replace object names by numbers\n\n# Example using random numbers. No significant relationship is expected\nmat1 <- matrix(rnorm(60),20,3)\nmat2 <- matrix(rnorm(100),20,5)\nout2 = CCorA(mat1, mat2, permutations=99)\nout2\nbiplot(out2, \"b\")\n\n\n"} {"package":"vegan","topic":"MDSrotate","snippet":"### Name: MDSrotate\n### Title: Rotate First MDS Dimension Parallel to an External Variable\n### Aliases: MDSrotate\n### Keywords: multivariate\n\n### ** Examples\n\ndata(varespec)\ndata(varechem)\nmod <- monoMDS(vegdist(varespec))\nmod <- with(varechem, MDSrotate(mod, pH))\nplot(mod)\nef <- envfit(mod ~ pH, varechem, permutations = 0)\nplot(ef)\nordisurf(mod ~ pH, varechem, knots = 1, add = TRUE)\n\n\n"} {"package":"vegan","topic":"MOStest","snippet":"### Name: MOStest\n### Title: Mitchell-Olds and Shaw Test for the Location of Quadratic\n### Extreme\n### Aliases: MOStest plot.MOStest fieller.MOStest profile.MOStest\n### confint.MOStest\n### Keywords: models regression\n\n### ** Examples\n\n## The Al-Mufti data analysed in humpfit():\nmass <- c(140,230,310,310,400,510,610,670,860,900,1050,1160,1900,2480)\nspno <- c(1, 4, 3, 9, 18, 30, 20, 14, 3, 2, 3, 2, 5, 2)\nmod <- MOStest(mass, spno)\n## Insignificant\nmod\n## ... but inadequate shape of the curve\nop <- par(mfrow=c(2,2), mar=c(4,4,1,1)+.1)\nplot(mod)\n## Looks rather like log-link with Poisson error and logarithmic biomass\nmod <- MOStest(log(mass), spno, family=quasipoisson)\nmod\nplot(mod)\npar(op)\n## Confidence Limits\nfieller.MOStest(mod)\nconfint(mod)\nplot(profile(mod))\n\n\n"} {"package":"vegan","topic":"RsquareAdj","snippet":"### Name: RsquareAdj\n### Title: Adjusted R-square\n### Aliases: RsquareAdj RsquareAdj.default RsquareAdj.rda RsquareAdj.cca\n### RsquareAdj.lm RsquareAdj.glm\n### Keywords: univar multivariate\n\n### ** Examples\n\ndata(mite)\ndata(mite.env)\n## rda\nm <- rda(decostand(mite, \"hell\") ~ ., mite.env)\nRsquareAdj(m)\n## cca\nm <- cca(decostand(mite, \"hell\") ~ ., mite.env)\nRsquareAdj(m)\n## default method\nRsquareAdj(0.8, 20, 5)\n\n\n"} {"package":"vegan","topic":"SSarrhenius","snippet":"### Name: SSarrhenius\n### Title: Self-Starting nls Species-Area Models\n### Aliases: SSarrhenius SSlomolino SSgitay SSgleason\n### Keywords: models\n\n### ** Examples\n\n## Get species area data: sipoo.map gives the areas of islands\ndata(sipoo, sipoo.map)\nS <- specnumber(sipoo)\nplot(S ~ area, sipoo.map, xlab = \"Island Area (ha)\",\n ylab = \"Number of Species\", ylim = c(1, max(S)))\n## The Arrhenius model\nmarr <- nls(S ~ SSarrhenius(area, k, z), data=sipoo.map)\nmarr\n## confidence limits from profile likelihood\nconfint(marr)\n## draw a line\nxtmp <- with(sipoo.map, seq(min(area), max(area), len=51))\nlines(xtmp, predict(marr, newdata=data.frame(area = xtmp)), lwd=2)\n## The normal way is to use linear regression on log-log data,\n## but this will be different from the previous:\nmloglog <- lm(log(S) ~ log(area), data=sipoo.map)\nmloglog\nlines(xtmp, exp(predict(mloglog, newdata=data.frame(area=xtmp))),\n lty=2)\n## Gleason: log-linear\nmgle <- nls(S ~ SSgleason(area, k, slope), sipoo.map)\nlines(xtmp, predict(mgle, newdata=data.frame(area=xtmp)),\n lwd=2, col=2)\n## Gitay: quadratic of log-linear\nmgit <- nls(S ~ SSgitay(area, k, slope), sipoo.map)\nlines(xtmp, predict(mgit, newdata=data.frame(area=xtmp)),\n lwd=2, col = 3)\n## Lomolino: using original names of the parameters (Lomolino 2000):\nmlom <- nls(S ~ SSlomolino(area, Smax, A50, Hill), sipoo.map)\nmlom\nlines(xtmp, predict(mlom, newdata=data.frame(area=xtmp)),\n lwd=2, col = 4)\n## One canned model of standard R:\nmmic <- nls(S ~ SSmicmen(area, slope, Asym), sipoo.map)\nlines(xtmp, predict(mmic, newdata = data.frame(area=xtmp)),\n lwd =2, col = 5)\nlegend(\"bottomright\", c(\"Arrhenius\", \"log-log linear\", \"Gleason\", \"Gitay\", \n \"Lomolino\", \"Michaelis-Menten\"), col=c(1,1,2,3,4,5), lwd=c(2,1,2,2,2,2), \n lty=c(1,2,1,1,1,1))\n## compare models (AIC)\nallmods <- list(Arrhenius = marr, Gleason = mgle, Gitay = mgit, \n Lomolino = mlom, MicMen= mmic)\nsapply(allmods, AIC)\n\n\n"} {"package":"vegan","topic":"add1.cca","snippet":"### Name: add1.cca\n### Title: Add or Drop Single Terms to a Constrained Ordination Model\n### Aliases: add1.cca drop1.cca\n### Keywords: multivariate models\n\n### ** Examples\n\ndata(dune)\ndata(dune.env)\n## Automatic model building based on AIC but with permutation tests\nstep(cca(dune ~ 1, dune.env), reformulate(names(dune.env)), test=\"perm\")\n## see ?ordistep to do the same, but based on permutation P-values\n## Not run: \n##D ordistep(cca(dune ~ 1, dune.env), reformulate(names(dune.env)))\n## End(Not run)\n## Manual model building\n## -- define the maximal model for scope\nmbig <- rda(dune ~ ., dune.env)\n## -- define an empty model to start with\nm0 <- rda(dune ~ 1, dune.env)\n## -- manual selection and updating\nadd1(m0, scope=formula(mbig), test=\"perm\")\nm0 <- update(m0, . ~ . + Management)\nadd1(m0, scope=formula(mbig), test=\"perm\")\nm0 <- update(m0, . ~ . + Moisture)\n## -- included variables still significant?\ndrop1(m0, test=\"perm\")\nadd1(m0, scope=formula(mbig), test=\"perm\")\n\n\n"} {"package":"vegan","topic":"adipart","snippet":"### Name: adipart\n### Title: Additive Diversity Partitioning and Hierarchical Null Model\n### Testing\n### Aliases: adipart adipart.default adipart.formula hiersimu\n### hiersimu.default hiersimu.formula\n### Keywords: multivariate\n\n### ** Examples\n\n## NOTE: 'nsimul' argument usually needs to be >= 99\n## here much lower value is used for demonstration\n\ndata(mite)\ndata(mite.xy)\ndata(mite.env)\n## Function to get equal area partitions of the mite data\ncutter <- function (x, cut = seq(0, 10, by = 2.5)) {\n out <- rep(1, length(x))\n for (i in 2:(length(cut) - 1))\n out[which(x > cut[i] & x <= cut[(i + 1)])] <- i\n return(out)}\n## The hierarchy of sample aggregation\nlevsm <- with(mite.xy, data.frame(\n l1=1:nrow(mite),\n l2=cutter(y, cut = seq(0, 10, by = 2.5)),\n l3=cutter(y, cut = seq(0, 10, by = 5)),\n l4=rep(1, nrow(mite))))\n## Let's see in a map\npar(mfrow=c(1,3))\nplot(mite.xy, main=\"l1\", col=as.numeric(levsm$l1)+1, asp = 1)\nplot(mite.xy, main=\"l2\", col=as.numeric(levsm$l2)+1, asp = 1)\nplot(mite.xy, main=\"l3\", col=as.numeric(levsm$l3)+1, asp = 1)\npar(mfrow=c(1,1))\n## Additive diversity partitioning\nadipart(mite, index=\"richness\", nsimul=19)\n## the next two define identical models\nadipart(mite, levsm, index=\"richness\", nsimul=19)\nadipart(mite ~ l2 + l3, levsm, index=\"richness\", nsimul=19)\n## Hierarchical null model testing\n## diversity analysis (similar to adipart)\nhiersimu(mite, FUN=diversity, relative=TRUE, nsimul=19)\nhiersimu(mite ~ l2 + l3, levsm, FUN=diversity, relative=TRUE, nsimul=19)\n## Hierarchical testing with the Morisita index\nmorfun <- function(x) dispindmorisita(x)$imst\nhiersimu(mite ~., levsm, morfun, drop.highest=TRUE, nsimul=19)\n\n\n"} {"package":"vegan","topic":"adonis2","snippet":"### Name: adonis\n### Title: Permutational Multivariate Analysis of Variance Using Distance\n### Matrices\n### Aliases: adonis2\n### Keywords: multivariate nonparametric\n\n### ** Examples\n\ndata(dune)\ndata(dune.env)\n## default test by terms\nadonis2(dune ~ Management*A1, data = dune.env)\n## overall tests\nadonis2(dune ~ Management*A1, data = dune.env, by = NULL)\n\n### Example of use with strata, for nested (e.g., block) designs.\ndat <- expand.grid(rep=gl(2,1), NO3=factor(c(0,10)),field=gl(3,1) )\ndat\nAgropyron <- with(dat, as.numeric(field) + as.numeric(NO3)+2) +rnorm(12)/2\nSchizachyrium <- with(dat, as.numeric(field) - as.numeric(NO3)+2) +rnorm(12)/2\ntotal <- Agropyron + Schizachyrium\ndotplot(total ~ NO3, dat, jitter.x=TRUE, groups=field,\n type=c('p','a'), xlab=\"NO3\", auto.key=list(columns=3, lines=TRUE) )\n\nY <- data.frame(Agropyron, Schizachyrium)\nmod <- metaMDS(Y, trace = FALSE)\nplot(mod)\n### Ellipsoid hulls show treatment\nwith(dat, ordiellipse(mod, field, kind = \"ehull\", label = TRUE))\n### Spider shows fields\nwith(dat, ordispider(mod, field, lty=3, col=\"red\"))\n\n### Incorrect (no strata)\nadonis2(Y ~ NO3, data = dat, permutations = 199)\n## Correct with strata\nwith(dat, adonis2(Y ~ NO3, data = dat, permutations = 199, strata = field))\n\n\n"} {"package":"vegan","topic":"anosim","snippet":"### Name: anosim\n### Title: Analysis of Similarities\n### Aliases: anosim summary.anosim plot.anosim\n### Keywords: multivariate nonparametric htest\n\n### ** Examples\n\ndata(dune)\ndata(dune.env)\ndune.dist <- vegdist(dune)\ndune.ano <- with(dune.env, anosim(dune.dist, Management))\nsummary(dune.ano)\nplot(dune.ano)\n\n\n"} {"package":"vegan","topic":"anova.cca","snippet":"### Name: anova.cca\n### Title: Permutation Test for Constrained Correspondence Analysis,\n### Redundancy Analysis and Constrained Analysis of Principal Coordinates\n### Aliases: anova.cca permutest permutest.cca\n### Keywords: multivariate htest\n\n### ** Examples\n\ndata(dune, dune.env)\nmod <- cca(dune ~ Moisture + Management, dune.env)\n## overall test\nanova(mod)\n## tests for individual terms\nanova(mod, by=\"term\")\nanova(mod, by=\"margin\")\n## sequential test for contrasts\nanova(mod, by = \"onedf\")\n## test for adding all environmental variables\nanova(mod, cca(dune ~ ., dune.env))\n\n\n"} {"package":"vegan","topic":"avgdist","snippet":"### Name: avgdist\n### Title: Averaged Subsampled Dissimilarity Matrices\n### Aliases: avgdist\n### Keywords: multivariate\n\n### ** Examples\n\n# Import an example count dataset\ndata(BCI)\n# Test the base functionality\nmean.avg.dist <- avgdist(BCI, sample = 50, iterations = 10)\n# Test the transformation function\nmean.avg.dist.t <- avgdist(BCI, sample = 50, iterations = 10, transf = sqrt)\n# Test the median functionality\nmedian.avg.dist <- avgdist(BCI, sample = 50, iterations = 10, meanfun = median)\n# Print the resulting tables\nhead(as.matrix(mean.avg.dist))\nhead(as.matrix(mean.avg.dist.t))\nhead(as.matrix(median.avg.dist))\n# Run example to illustrate low variance of mean, median, and stdev results\n# Mean and median std dev are around 0.05\nsdd <- avgdist(BCI, sample = 50, iterations = 100, meanfun = sd)\nsummary(mean.avg.dist)\nsummary(median.avg.dist)\nsummary(sdd)\n# Test for when subsampling depth excludes some samples\n# Return samples that are removed for not meeting depth filter\ndepth.avg.dist <- avgdist(BCI, sample = 450, iterations = 10)\n# Print the result\ndepth.avg.dist\n\n\n"} {"package":"vegan","topic":"beals","snippet":"### Name: beals\n### Title: Beals Smoothing and Degree of Absence\n### Aliases: beals swan\n### Keywords: manip smooth\n\n### ** Examples\n\ndata(dune)\n## Default\nx <- beals(dune)\n## Remove target species\nx <- beals(dune, include = FALSE)\n## Smoothed values against presence or absence of species\npa <- decostand(dune, \"pa\")\nboxplot(as.vector(x) ~ unlist(pa), xlab=\"Presence\", ylab=\"Beals\")\n## Remove the bias of tarbet species: Yields lower values.\nbeals(dune, type =3, include = FALSE)\n## Uses abundance information.\n## Vector with beals smoothing values corresponding to the first species\n## in dune.\nbeals(dune, species=1, include=TRUE) \n\n\n"} {"package":"vegan","topic":"betadisper","snippet":"### Name: betadisper\n### Title: Multivariate homogeneity of groups dispersions (variances)\n### Aliases: betadisper scores.betadisper anova.betadisper plot.betadisper\n### boxplot.betadisper TukeyHSD.betadisper eigenvals.betadisper\n### print.betadisper ordimedian\n### Keywords: methods multivariate hplot\n\n### ** Examples\n\ndata(varespec)\n\n## Bray-Curtis distances between samples\ndis <- vegdist(varespec)\n\n## First 16 sites grazed, remaining 8 sites ungrazed\ngroups <- factor(c(rep(1,16), rep(2,8)), labels = c(\"grazed\",\"ungrazed\"))\n\n## Calculate multivariate dispersions\nmod <- betadisper(dis, groups)\nmod\n\n## Perform test\nanova(mod)\n\n## Permutation test for F\npermutest(mod, pairwise = TRUE, permutations = 99)\n\n## Tukey's Honest Significant Differences\n(mod.HSD <- TukeyHSD(mod))\nplot(mod.HSD)\n\n## Plot the groups and distances to centroids on the\n## first two PCoA axes\nplot(mod)\n\n## with data ellipses instead of hulls\nplot(mod, ellipse = TRUE, hull = FALSE) # 1 sd data ellipse\nplot(mod, ellipse = TRUE, hull = FALSE, conf = 0.90) # 90% data ellipse\n\n# plot with manual colour specification\nmy_cols <- c(\"#1b9e77\", \"#7570b3\")\nplot(mod, col = my_cols, pch = c(16,17), cex = 1.1)\n\n## can also specify which axes to plot, ordering respected\nplot(mod, axes = c(3,1), seg.col = \"forestgreen\", seg.lty = \"dashed\")\n\n## Draw a boxplot of the distances to centroid for each group\nboxplot(mod)\n\n## `scores` and `eigenvals` also work\nscrs <- scores(mod)\nstr(scrs)\nhead(scores(mod, 1:4, display = \"sites\"))\n# group centroids/medians \nscores(mod, 1:4, display = \"centroids\")\n# eigenvalues from the underlying principal coordinates analysis\neigenvals(mod) \n\n## try out bias correction; compare with mod3\n(mod3B <- betadisper(dis, groups, type = \"median\", bias.adjust=TRUE))\nanova(mod3B)\npermutest(mod3B, permutations = 99)\n\n## should always work for a single group\ngroup <- factor(rep(\"grazed\", NROW(varespec)))\n(tmp <- betadisper(dis, group, type = \"median\"))\n(tmp <- betadisper(dis, group, type = \"centroid\"))\n\n## simulate missing values in 'd' and 'group'\n## using spatial medians\ngroups[c(2,20)] <- NA\ndis[c(2, 20)] <- NA\nmod2 <- betadisper(dis, groups) ## messages\nmod2\npermutest(mod2, permutations = 99)\nanova(mod2)\nplot(mod2)\nboxplot(mod2)\nplot(TukeyHSD(mod2))\n\n## Using group centroids\nmod3 <- betadisper(dis, groups, type = \"centroid\")\nmod3\npermutest(mod3, permutations = 99)\nanova(mod3)\nplot(mod3)\nboxplot(mod3)\nplot(TukeyHSD(mod3))\n\n\n\n"} {"package":"vegan","topic":"betadiver","snippet":"### Name: betadiver\n### Title: Indices of beta Diversity\n### Aliases: betadiver scores.betadiver plot.betadiver\n### Keywords: multivariate\n\n### ** Examples\n\n## Raw data and plotting\ndata(sipoo)\nm <- betadiver(sipoo)\nplot(m)\n## The indices\nbetadiver(help=TRUE)\n## The basic Whittaker index\nd <- betadiver(sipoo, \"w\")\n## This should be equal to Sorensen index (binary Bray-Curtis in\n## vegan)\nrange(d - vegdist(sipoo, binary=TRUE))\n\n\n"} {"package":"vegan","topic":"bgdispersal","snippet":"### Name: bgdispersal\n### Title: Coefficients of Biogeographical Dispersal Direction\n### Aliases: bgdispersal\n### Keywords: multivariate nonparametric\n\n### ** Examples\n\nmat <- matrix(c(32,15,14,10,70,30,100,4,10,30,25,0,18,0,40,\n 0,0,20,0,0,0,0,4,0,30,20,0,0,0,0,25,74,42,1,45,89,5,16,16,20),\n 4, 10, byrow=TRUE)\nbgdispersal(mat)\n\n\n"} {"package":"vegan","topic":"bioenv","snippet":"### Name: bioenv\n### Title: Best Subset of Environmental Variables with Maximum (Rank)\n### Correlation with Community Dissimilarities\n### Aliases: bioenv bioenv.default bioenv.formula summary.bioenv bioenvdist\n### Keywords: multivariate\n\n### ** Examples\n\n# The method is very slow for large number of possible subsets.\n# Therefore only 6 variables in this example.\ndata(varespec)\ndata(varechem)\nsol <- bioenv(wisconsin(varespec) ~ log(N) + P + K + Ca + pH + Al, varechem)\nsol\n## IGNORE_RDIFF_BEGIN\nsummary(sol)\n## IGNORE_RDIFF_END\n\n\n"} {"package":"vegan","topic":"biplot.rda","snippet":"### Name: biplot.rda\n### Title: PCA biplot\n### Aliases: biplot.rda biplot.cca\n### Keywords: hplot\n\n### ** Examples\n\ndata(dune)\nmod <- rda(dune, scale = TRUE)\nbiplot(mod, scaling = \"symmetric\")\n\n## different type for species and site scores\nbiplot(mod, scaling = \"symmetric\", type = c(\"text\", \"points\"))\n\n## We can use ordiplot pipes in R 4.1 to build similar plots with\n## flexible control\n## Not run: \n##D if (getRversion() >= \"4.1\") {\n##D plot(mod, scaling = \"symmetric\", type=\"n\") |>\n##D text(\"sites\", cex=0.8) |>\n##D text(\"species\", arrows=TRUE, length=0.02, col=\"red\", cex=0.6)\n##D }\n## End(Not run)\n\n\n"} {"package":"vegan","topic":"capscale","snippet":"### Name: capscale\n### Title: [Partial] Distance-based Redundancy Analysis\n### Aliases: capscale dbrda\n### Keywords: multivariate\n\n### ** Examples\n\ndata(varespec)\ndata(varechem)\n## Basic Analysis\nvare.cap <- capscale(varespec ~ N + P + K + Condition(Al), varechem,\n dist=\"bray\")\nvare.cap\nplot(vare.cap)\nanova(vare.cap)\n## Avoid negative eigenvalues with additive constant\ncapscale(varespec ~ N + P + K + Condition(Al), varechem,\n dist=\"bray\", add =TRUE)\n## Avoid negative eigenvalues by taking square roots of dissimilarities\ncapscale(varespec ~ N + P + K + Condition(Al), varechem,\n dist = \"bray\", sqrt.dist= TRUE)\n## Principal coordinates analysis with extended dissimilarities\ncapscale(varespec ~ 1, dist=\"bray\", metaMDS = TRUE)\n## dbrda\ndbrda(varespec ~ N + P + K + Condition(Al), varechem,\n dist=\"bray\")\n## avoid negative eigenvalues also with Jaccard distances\ndbrda(varespec ~ N + P + K + Condition(Al), varechem,\n dist=\"jaccard\")\n\n\n"} {"package":"vegan","topic":"cascadeKM","snippet":"### Name: cascadeKM\n### Title: K-means partitioning using a range of values of K\n### Aliases: cascadeKM cIndexKM plot.cascadeKM orderingKM pregraphKM\n### Keywords: cluster\n\n### ** Examples\n\n # Partitioning a (10 x 10) data matrix of random numbers\n mat <- matrix(runif(100),10,10)\n res <- cascadeKM(mat, 2, 5, iter = 25, criterion = 'calinski') \n toto <- plot(res)\n \n # Partitioning an autocorrelated time series\n vec <- sort(matrix(runif(30),30,1))\n res <- cascadeKM(vec, 2, 5, iter = 25, criterion = 'calinski')\n toto <- plot(res)\n \n # Partitioning a large autocorrelated time series\n # Note that we remove the grid lines\n vec <- sort(matrix(runif(1000),1000,1))\n res <- cascadeKM(vec, 2, 7, iter = 10, criterion = 'calinski')\n toto <- plot(res, gridcol=NA)\n \n\n\n"} {"package":"vegan","topic":"cca","snippet":"### Name: cca\n### Title: [Partial] [Constrained] Correspondence Analysis and Redundancy\n### Analysis\n### Aliases: cca cca.default cca.formula rda rda.default rda.formula\n### Keywords: multivariate\n\n### ** Examples\n\ndata(varespec)\ndata(varechem)\n## Common but bad way: use all variables you happen to have in your\n## environmental data matrix\nvare.cca <- cca(varespec, varechem)\nvare.cca\nplot(vare.cca)\n## Formula interface and a better model\nvare.cca <- cca(varespec ~ Al + P*(K + Baresoil), data=varechem)\nvare.cca\nplot(vare.cca)\n## Partialling out and negative components of variance\ncca(varespec ~ Ca, varechem)\ncca(varespec ~ Ca + Condition(pH), varechem)\n## RDA\ndata(dune)\ndata(dune.env)\ndune.Manure <- rda(dune ~ Manure, dune.env)\nplot(dune.Manure) \n\n\n"} {"package":"vegan","topic":"clamtest","snippet":"### Name: clamtest\n### Title: Multinomial Species Classification Method (CLAM)\n### Aliases: clamtest summary.clamtest plot.clamtest\n### Keywords: htest\n\n### ** Examples\n\ndata(mite)\ndata(mite.env)\nsol <- with(mite.env, clamtest(mite, Shrub==\"None\", alpha=0.005))\nsummary(sol)\nhead(sol)\nplot(sol)\n\n\n"} {"package":"vegan","topic":"commsim","snippet":"### Name: commsim\n### Title: Create an Object for Null Model Algorithms\n### Aliases: commsim make.commsim print.commsim\n### Keywords: multivariate datagen\n\n### ** Examples\n\n## write the r00 algorithm\nf <- function(x, n, ...)\n array(replicate(n, sample(x)), c(dim(x), n))\n(cs <- commsim(\"r00\", fun=f, binary=TRUE,\n isSeq=FALSE, mode=\"integer\"))\n\n## retrieving the sequential swap algorithm\n(cs <- make.commsim(\"swap\"))\n\n## feeding a commsim object as argument\nmake.commsim(cs)\n\n## making the missing c1 model using r1 as a template\n## non-sequential algorithm for binary matrices\n## that preserves the species (column) frequencies,\n## but uses row marginal frequencies\n## as probabilities of selecting sites\nf <- function (x, n, nr, nc, rs, cs, ...) {\n out <- array(0L, c(nr, nc, n))\n J <- seq_len(nc)\n storage.mode(rs) <- \"double\"\n for (k in seq_len(n))\n for (j in J)\n out[sample.int(nr, cs[j], prob = rs), j, k] <- 1L\n out\n}\ncs <- make.commsim(\"r1\")\ncs$method <- \"c1\"\ncs$fun <- f\n\n## structural constraints\ndiagfun <- function(x, y) {\n c(sum = sum(y) == sum(x),\n fill = sum(y > 0) == sum(x > 0),\n rowSums = all(rowSums(y) == rowSums(x)),\n colSums = all(colSums(y) == colSums(x)),\n rowFreq = all(rowSums(y > 0) == rowSums(x > 0)),\n colFreq = all(colSums(y > 0) == colSums(x > 0)))\n}\nevalfun <- function(meth, x, n) {\n m <- nullmodel(x, meth)\n y <- simulate(m, nsim=n)\n out <- rowMeans(sapply(1:dim(y)[3],\n function(i) diagfun(attr(y, \"data\"), y[,,i])))\n z <- as.numeric(c(attr(y, \"binary\"), attr(y, \"isSeq\"),\n attr(y, \"mode\") == \"double\"))\n names(z) <- c(\"binary\", \"isSeq\", \"double\")\n c(z, out)\n}\nx <- matrix(rbinom(10*12, 1, 0.5)*rpois(10*12, 3), 12, 10)\nalgos <- make.commsim()\na <- t(sapply(algos, evalfun, x=x, n=10))\nprint(as.table(ifelse(a==1,1,0)), zero.print = \".\")\n\n\n"} {"package":"vegan","topic":"contribdiv","snippet":"### Name: contribdiv\n### Title: Contribution Diversity Approach\n### Aliases: contribdiv plot.contribdiv\n### Keywords: multivariate\n\n### ** Examples\n\n## Artificial example given in\n## Table 2 in Lu et al. 2007\nx <- matrix(c(\n1/3,1/3,1/3,0,0,0,\n0,0,1/3,1/3,1/3,0,\n0,0,0,1/3,1/3,1/3),\n3, 6, byrow = TRUE,\ndimnames = list(LETTERS[1:3],letters[1:6]))\nx\n## Compare results with Table 2\ncontribdiv(x, \"richness\")\ncontribdiv(x, \"simpson\")\n## Relative contribution (C values), compare with Table 2\n(cd1 <- contribdiv(x, \"richness\", relative = TRUE, scaled = FALSE))\n(cd2 <- contribdiv(x, \"simpson\", relative = TRUE, scaled = FALSE))\n## Differentiation coefficients\nattr(cd1, \"diff.coef\") # D_ST\nattr(cd2, \"diff.coef\") # D_DT\n## BCI data set\ndata(BCI)\nopar <- par(mfrow=c(2,2))\nplot(contribdiv(BCI, \"richness\"), main = \"Absolute\")\nplot(contribdiv(BCI, \"richness\", relative = TRUE), main = \"Relative\")\nplot(contribdiv(BCI, \"simpson\"))\nplot(contribdiv(BCI, \"simpson\", relative = TRUE))\npar(opar)\n\n\n"} {"package":"vegan","topic":"decorana","snippet":"### Name: decorana\n### Title: Detrended Correspondence Analysis and Basic Reciprocal Averaging\n### Aliases: decorana summary.decorana print.summary.decorana plot.decorana\n### downweight scores.decorana points.decorana text.decorana\n### Keywords: multivariate\n\n### ** Examples\n\ndata(varespec)\nvare.dca <- decorana(varespec)\nvare.dca\nsummary(vare.dca)\nplot(vare.dca)\n\n### the detrending rationale:\ngaussresp <- function(x,u) exp(-(x-u)^2/2)\nx <- seq(0,6,length=15) ## The gradient\nu <- seq(-2,8,len=23) ## The optima\npack <- outer(x,u,gaussresp)\nmatplot(x, pack, type=\"l\", main=\"Species packing\")\nopar <- par(mfrow=c(2,2))\nplot(scores(prcomp(pack)), asp=1, type=\"b\", main=\"PCA\")\nplot(scores(decorana(pack, ira=1)), asp=1, type=\"b\", main=\"CA\")\nplot(scores(decorana(pack)), asp=1, type=\"b\", main=\"DCA\")\nplot(scores(cca(pack ~ x), dis=\"sites\"), asp=1, type=\"b\", main=\"CCA\")\n\n### Let's add some noise:\nnoisy <- (0.5 + runif(length(pack)))*pack\npar(mfrow=c(2,1))\nmatplot(x, pack, type=\"l\", main=\"Ideal model\")\nmatplot(x, noisy, type=\"l\", main=\"Noisy model\")\npar(mfrow=c(2,2))\nplot(scores(prcomp(noisy)), type=\"b\", main=\"PCA\", asp=1)\nplot(scores(decorana(noisy, ira=1)), type=\"b\", main=\"CA\", asp=1)\nplot(scores(decorana(noisy)), type=\"b\", main=\"DCA\", asp=1)\nplot(scores(cca(noisy ~ x), dis=\"sites\"), asp=1, type=\"b\", main=\"CCA\")\npar(opar)\n\n\n"} {"package":"vegan","topic":"decostand","snippet":"### Name: decostand\n### Title: Standardization Methods for Community Ecology\n### Aliases: decostand wisconsin decobackstand\n### Keywords: multivariate manip\n\n### ** Examples\n\ndata(varespec)\nsptrans <- decostand(varespec, \"max\")\napply(sptrans, 2, max)\nsptrans <- wisconsin(varespec)\n\n# CLR transformation for rows, with pseudocount\nvarespec.clr <- decostand(varespec, \"clr\", pseudocount=1)\n\n# ALR transformation for rows, with pseudocount and reference sample\nvarespec.alr <- decostand(varespec, \"alr\", pseudocount=1, reference=1)\n\n## Chi-square: PCA similar but not identical to CA.\n## Use wcmdscale for weighted analysis and identical results.\nsptrans <- decostand(varespec, \"chi.square\")\nplot(procrustes(rda(sptrans), cca(varespec)))\n\n\n"} {"package":"vegan","topic":"deviance.cca","snippet":"### Name: deviance.cca\n### Title: Statistics Resembling Deviance and AIC for Constrained\n### Ordination\n### Aliases: deviance.cca deviance.rda extractAIC.cca\n### Keywords: multivariate models\n\n### ** Examples\n\n# The deviance of correspondence analysis equals Chi-square\ndata(dune)\ndata(dune.env)\nchisq.test(dune)\ndeviance(cca(dune))\n# Stepwise selection (forward from an empty model \"dune ~ 1\")\nord <- cca(dune ~ ., dune.env)\nstep(cca(dune ~ 1, dune.env), scope = formula(ord))\n\n\n"} {"package":"vegan","topic":"dispindmorisita","snippet":"### Name: dispindmorisita\n### Title: Morisita index of intraspecific aggregation\n### Aliases: dispindmorisita\n### Keywords: multivariate spatial\n\n### ** Examples\n\ndata(dune)\nx <- dispindmorisita(dune)\nx\ny <- dispindmorisita(dune, unique.rm = TRUE)\ny\ndim(x) ## with unique species\ndim(y) ## unique species removed\n\n\n"} {"package":"vegan","topic":"dispweight","snippet":"### Name: dispweight\n### Title: Dispersion-based weighting of species counts\n### Aliases: dispweight gdispweight summary.dispweight\n### Keywords: multivariate manip\n\n### ** Examples\n\ndata(mite, mite.env)\n## dispweight and its summary\nmite.dw <- with(mite.env, dispweight(mite, Shrub, nsimul = 99))\n## IGNORE_RDIFF_BEGIN\nsummary(mite.dw)\n## IGNORE_RDIFF_END\n## generalized dispersion weighting\nmite.dw <- gdispweight(mite ~ Shrub + WatrCont, data = mite.env)\nrda(mite.dw ~ Shrub + WatrCont, data = mite.env)\n\n\n"} {"package":"vegan","topic":"distconnected","snippet":"### Name: distconnected\n### Title: Connectedness of Dissimilarities\n### Aliases: distconnected no.shared\n### Keywords: multivariate\n\n### ** Examples\n\n## There are no disconnected data in vegan, and the following uses an\n## extremely low threshold limit for connectedness. This is for\n## illustration only, and not a recommended practice.\ndata(dune)\ndis <- vegdist(dune)\ngr <- distconnected(dis, toolong=0.4)\n# Make sites with no shared species as NA in Manhattan dissimilarities\ndis <- vegdist(dune, \"manhattan\")\nis.na(dis) <- no.shared(dune)\n\n\n"} {"package":"vegan","topic":"diversity","snippet":"### Name: diversity\n### Title: Ecological Diversity Indices\n### Aliases: diversity simpson.unb fisher.alpha specnumber\n### Keywords: univar\n\n### ** Examples\n\ndata(BCI, BCI.env)\nH <- diversity(BCI)\nsimp <- diversity(BCI, \"simpson\")\ninvsimp <- diversity(BCI, \"inv\")\n## Unbiased Simpson\nunbias.simp <- simpson.unb(BCI)\n## Fisher alpha\nalpha <- fisher.alpha(BCI)\n## Plot all\npairs(cbind(H, simp, invsimp, unbias.simp, alpha), pch=\"+\", col=\"blue\")\n## Species richness (S) and Pielou's evenness (J):\nS <- specnumber(BCI) ## rowSums(BCI > 0) does the same...\nJ <- H/log(S)\n## beta diversity defined as gamma/alpha - 1:\n## alpha is the average no. of species in a group, and gamma is the\n## total number of species in the group\n(alpha <- with(BCI.env, tapply(specnumber(BCI), Habitat, mean)))\n(gamma <- with(BCI.env, specnumber(BCI, Habitat)))\ngamma/alpha - 1\n## similar calculations with Shannon diversity\n(alpha <- with(BCI.env, tapply(diversity(BCI), Habitat, mean))) # average\n(gamma <- with(BCI.env, diversity(BCI, groups=Habitat))) # pooled\n## additive beta diversity based on Shannon index\ngamma-alpha\n\n\n"} {"package":"vegan","topic":"dune","snippet":"### Name: dune\n### Title: Vegetation and Environment in Dutch Dune Meadows.\n### Aliases: dune dune.env\n### Keywords: datasets\n\n### ** Examples\n\ndata(dune)\ndata(dune.env)\n\n\n"} {"package":"vegan","topic":"dune.taxon","snippet":"### Name: dune.taxon\n### Title: Taxonomic Classification and Phylogeny of Dune Meadow Species\n### Aliases: dune.taxon dune.phylodis\n### Keywords: datasets\n\n### ** Examples\n \n data(dune.taxon) \n data(dune.phylodis)\n\n\n"} {"package":"vegan","topic":"eigenvals","snippet":"### Name: eigenvals\n### Title: Extract Eigenvalues from an Ordination Object\n### Aliases: eigenvals eigenvals.default eigenvals.prcomp\n### eigenvals.princomp eigenvals.cca eigenvals.wcmdscale eigenvals.pcnm\n### eigenvals.dudi eigenvals.pca eigenvals.pco eigenvals.decorana\n### summary.eigenvals\n### Keywords: multivariate\n\n### ** Examples\n\ndata(varespec)\ndata(varechem)\nmod <- cca(varespec ~ Al + P + K, varechem)\nev <- eigenvals(mod)\nev\nsummary(ev)\n\n## choose which eignevalues to return\neigenvals(mod, model = \"unconstrained\")\n\n\n"} {"package":"vegan","topic":"envfit","snippet":"### Name: envfit\n### Title: Fits an Environmental Vector or Factor onto an Ordination\n### Aliases: envfit envfit.default envfit.formula vectorfit factorfit\n### plot.envfit scores.envfit labels.envfit\n### Keywords: multivariate aplot htest\n\n### ** Examples\n\ndata(varespec, varechem)\nlibrary(MASS)\nord <- metaMDS(varespec)\n(fit <- envfit(ord, varechem, perm = 999))\nscores(fit, \"vectors\")\nplot(ord)\nplot(fit)\nplot(fit, p.max = 0.05, col = \"red\")\n## Adding fitted arrows to CCA. We use \"lc\" scores, and hope\n## that arrows are scaled similarly in cca and envfit plots\nord <- cca(varespec ~ Al + P + K, varechem)\nplot(ord, type=\"p\")\nfit <- envfit(ord, varechem, perm = 999, display = \"lc\")\nplot(fit, p.max = 0.05, col = \"red\")\n## 'scaling' must be set similarly in envfit and in ordination plot\nplot(ord, type = \"p\", scaling = \"sites\")\nfit <- envfit(ord, varechem, perm = 0, display = \"lc\", scaling = \"sites\")\nplot(fit, col = \"red\")\n\n## Class variables, formula interface, and displaying the\n## inter-class variability with ordispider, and semitransparent\n## white background for labels (semitransparent colours are not\n## supported by all graphics devices)\ndata(dune)\ndata(dune.env)\nord <- cca(dune)\nfit <- envfit(ord ~ Moisture + A1, dune.env, perm = 0)\nplot(ord, type = \"n\")\nwith(dune.env, ordispider(ord, Moisture, col=\"skyblue\"))\nwith(dune.env, points(ord, display = \"sites\", col = as.numeric(Moisture),\n pch=16))\nplot(fit, cex=1.2, axis=TRUE, bg = rgb(1, 1, 1, 0.5))\n## Use shorter labels for factor centroids\nlabels(fit)\nplot(ord)\nplot(fit, labels=list(factors = paste(\"M\", c(1,2,4,5), sep = \"\")),\n bg = rgb(1,1,0,0.5))\n\n\n"} {"package":"vegan","topic":"eventstar","snippet":"### Name: eventstar\n### Title: Scale Parameter at the Minimum of the Tsallis Evenness Profile\n### Aliases: eventstar\n### Keywords: optimize multivariate utilities\n\n### ** Examples\n\ndata(BCI)\n(x <- eventstar(BCI[1:5,]))\n## profiling\ny <- as.numeric(BCI[10,])\n(z <- eventstar(y))\nq <- seq(0, 2, 0.05)\nEprof <- tsallis(y, scales=q, norm=TRUE)\nHprof <- tsallis(y, scales=q)\nDprof <- tsallis(y, scales=q, hill=TRUE)\nopar <- par(mfrow=c(3,1))\nplot(q, Eprof, type=\"l\", main=\"Evenness\")\nabline(v=z$qstar, h=tsallis(y, scales=z$qstar, norm=TRUE), col=2)\nplot(q, Hprof, type=\"l\", main=\"Diversity\")\nabline(v=z$qstar, h=tsallis(y, scales=z$qstar), col=2)\nplot(q, Dprof, type=\"l\", main=\"Effective number of species\")\nabline(v=z$qstar, h=tsallis(y, scales=z$qstar, hill=TRUE), col=2)\npar(opar)\n\n\n"} {"package":"vegan","topic":"fisherfit","snippet":"### Name: fisherfit\n### Title: Fit Fisher's Logseries and Preston's Lognormal Model to\n### Abundance Data\n### Aliases: fisherfit as.fisher plot.fisherfit prestonfit prestondistr\n### as.preston plot.prestonfit lines.prestonfit plot.preston\n### lines.preston plot.fisher veiledspec\n### Keywords: univar distribution\n\n### ** Examples\n\ndata(BCI)\nmod <- fisherfit(BCI[5,])\nmod\n# prestonfit seems to need large samples\nmod.oct <- prestonfit(colSums(BCI))\nmod.ll <- prestondistr(colSums(BCI))\nmod.oct\nmod.ll\nplot(mod.oct) \nlines(mod.ll, line.col=\"blue3\") # Different\n## Smoothed density\nden <- density(log2(colSums(BCI)))\nlines(den$x, ncol(BCI)*den$y, lwd=2) # Fairly similar to mod.oct\n## Extrapolated richness\nveiledspec(mod.oct)\nveiledspec(mod.ll)\n\n\n"} {"package":"vegan","topic":"goodness","snippet":"### Name: goodness.cca\n### Title: Diagnostic Tools for [Constrained] Ordination (CCA, RDA, DCA,\n### CA, PCA)\n### Aliases: goodness goodness.cca inertcomp spenvcor intersetcor vif.cca\n### alias.cca\n### Keywords: multivariate\n\n### ** Examples\n\ndata(dune)\ndata(dune.env)\nmod <- cca(dune ~ A1 + Management + Condition(Moisture), data=dune.env)\ngoodness(mod, addprevious = TRUE)\ngoodness(mod, addprevious = TRUE, summ = TRUE)\n# Inertia components\ninertcomp(mod, prop = TRUE)\ninertcomp(mod)\n# vif.cca\nvif.cca(mod)\n# Aliased constraints\nmod <- cca(dune ~ ., dune.env)\nmod\nvif.cca(mod)\nalias(mod)\nwith(dune.env, table(Management, Manure))\n# The standard correlations (not recommended)\n## IGNORE_RDIFF_BEGIN\nspenvcor(mod)\nintersetcor(mod)\n## IGNORE_RDIFF_END\n\n\n"} {"package":"vegan","topic":"goodness.metaMDS","snippet":"### Name: goodness.metaMDS\n### Title: Goodness of Fit and Shepard Plot for Nonmetric Multidimensional\n### Scaling\n### Aliases: goodness.metaMDS goodness.monoMDS stressplot\n### stressplot.default stressplot.monoMDS\n### Keywords: multivariate\n\n### ** Examples\n\ndata(varespec)\nmod <- metaMDS(varespec)\nstressplot(mod)\ngof <- goodness(mod)\ngof\nplot(mod, display = \"sites\", type = \"n\")\npoints(mod, display = \"sites\", cex = 2*gof/mean(gof))\n\n\n"} {"package":"vegan","topic":"indpower","snippet":"### Name: indpower\n### Title: Indicator Power of Species\n### Aliases: indpower\n### Keywords: multivariate\n\n### ** Examples\n\ndata(dune)\n## IP values\nip <- indpower(dune)\n## and TIP values\ndiag(ip) <- NA\n(TIP <- rowMeans(ip, na.rm=TRUE))\n\n## p value calculation for a species\n## from Halme et al. 2009\n## i is ID for the species\ni <- 1\nfun <- function(x, i) indpower(x)[i,-i]\n## 'c0' randomizes species occurrences\nos <- oecosimu(dune, fun, \"c0\", i=i, nsimul=99)\n## get z values from oecosimu output\nz <- os$oecosimu$z\n## p-value\n(p <- sum(z) / sqrt(length(z)))\n## 'heterogeneity' measure\n(chi2 <- sum((z - mean(z))^2))\npchisq(chi2, df=length(z)-1)\n## Halme et al.'s suggested output\nout <- c(TIP=TIP[i], \n significance=p,\n heterogeneity=chi2,\n minIP=min(fun(dune, i=i)),\n varIP=sd(fun(dune, i=i)^2))\nout\n\n\n"} {"package":"vegan","topic":"hatvalues.cca","snippet":"### Name: influence.cca\n### Title: Linear Model Diagnostics for Constrained Ordination\n### Aliases: hatvalues.cca hatvalues.rda sigma.cca rstandard.cca\n### rstudent.cca cooks.distance.cca SSD.cca vcov.cca qr.cca\n### df.residual.cca\n### Keywords: models multivariate\n\n### ** Examples\n\n\ndata(varespec, varechem)\nmod <- cca(varespec ~ Al + P + K, varechem)\n## leverage\nhatvalues(mod)\nplot(hatvalues(mod), type = \"h\")\n## ordination plot with leverages: points with high leverage have\n## similar LC and WA scores\nplot(mod, type = \"n\")\nordispider(mod) # segment from LC to WA scores\npoints(mod, dis=\"si\", cex=5*hatvalues(mod), pch=21, bg=2) # WA scores\ntext(mod, dis=\"bp\", col=4)\n\n## deviation and influence\nhead(rstandard(mod))\nhead(cooks.distance(mod))\n\n## Influence measures from lm\ny <- decostand(varespec, \"chi.square\") # needed in cca\ny1 <- with(y, Cladstel) # take one species for lm\nlmod1 <- lm(y1 ~ Al + P + K, varechem, weights = rowSums(varespec))\n## numerically identical within 2e-15\nall(abs(cooks.distance(lmod1) - cooks.distance(mod)[, \"Cladstel\"]) < 1e-8)\n\n## t-values of regression coefficients based on type = \"canoco\"\n## residuals\ncoef(mod)\ncoef(mod)/sqrt(diag(vcov(mod, type = \"canoco\")))\n\n\n"} {"package":"vegan","topic":"isomap","snippet":"### Name: isomap\n### Title: Isometric Feature Mapping Ordination\n### Aliases: isomap isomapdist plot.isomap summary.isomap\n### Keywords: multivariate\n\n### ** Examples\n\n## The following examples also overlay minimum spanning tree to\n## the graphics in red.\nop <- par(mar=c(4,4,1,1)+0.2, mfrow=c(2,2))\ndata(BCI)\ndis <- vegdist(BCI)\ntr <- spantree(dis)\npl <- ordiplot(cmdscale(dis), main=\"cmdscale\")\nlines(tr, pl, col=\"red\")\nord <- isomap(dis, k=3)\nord\npl <- plot(ord, main=\"isomap k=3\")\nlines(tr, pl, col=\"red\")\npl <- plot(isomap(dis, k=5), main=\"isomap k=5\")\nlines(tr, pl, col=\"red\")\npl <- plot(isomap(dis, epsilon=0.45), main=\"isomap epsilon=0.45\")\nlines(tr, pl, col=\"red\")\npar(op)\n## colour points and web by the dominant species\ndom <- apply(BCI, 1, which.max)\n## need nine colours, but default palette has only eight\nop <- palette(c(palette(\"default\"), \"sienna\"))\nplot(ord, pch = 16, col = dom, n.col = dom) \npalette(op)\n\n\n"} {"package":"vegan","topic":"kendall.global","snippet":"### Name: kendall.global\n### Title: Kendall coefficient of concordance\n### Aliases: kendall.global kendall.post\n### Keywords: multivariate nonparametric\n\n### ** Examples\n\ndata(mite)\nmite.hel <- decostand(mite, \"hel\")\n\n# Reproduce the results shown in Table 2 of Legendre (2005), a single group\nmite.small <- mite.hel[c(4,9,14,22,31,34,45,53,61,69),c(13:15,23)]\nkendall.global(mite.small, nperm=49)\nkendall.post(mite.small, mult=\"holm\", nperm=49)\n\n# Reproduce the results shown in Tables 3 and 4 of Legendre (2005), 2 groups\ngroup <-c(1,1,2,1,1,1,1,1,2,1,1,1,1,1,1,2,1,2,1,1,1,1,2,1,2,1,1,1,1,1,2,2,2,2,2)\nkendall.global(mite.hel, group=group, nperm=49)\nkendall.post(mite.hel, group=group, mult=\"holm\", nperm=49)\n\n# NOTE: 'nperm' argument usually needs to be larger than 49.\n# It was set to this low value for demonstration purposes.\n\n\n"} {"package":"vegan","topic":"linestack","snippet":"### Name: linestack\n### Title: Plots One-dimensional Diagrams without Overwriting Labels\n### Aliases: linestack\n### Keywords: hplot aplot\n\n### ** Examples\n\n## First DCA axis\ndata(dune)\nord <- decorana(dune)\nlinestack(scores(ord, choices=1, display=\"sp\"))\nlinestack(scores(ord, choices=1, display=\"si\"), side=\"left\", add=TRUE)\ntitle(main=\"DCA axis 1\")\n\n## Expressions as labels\nN <- 10\t\t\t\t\t# Number of sites\ndf <- data.frame(Ca = rlnorm(N, 2), NO3 = rlnorm(N, 4),\n SO4 = rlnorm(N, 10), K = rlnorm(N, 3))\nord <- rda(df, scale = TRUE)\n### vector of expressions for labels\nlabs <- expression(Ca^{2+phantom()},\n NO[3]^{-phantom()},\n SO[4]^{2-phantom()},\n K^{+phantom()})\nscl <- \"sites\"\nlinestack(scores(ord, choices = 1, display = \"species\", scaling = scl),\n labels = labs, air = 2)\nlinestack(scores(ord, choices = 1, display = \"site\", scaling = scl),\n side = \"left\", add = TRUE)\ntitle(main = \"PCA axis 1\")\n\n\n"} {"package":"vegan","topic":"make.cepnames","snippet":"### Name: make.cepnames\n### Title: Abbreviates a Botanical or Zoological Latin Name into an\n### Eight-character Name\n### Aliases: make.cepnames\n### Keywords: character\n\n### ** Examples\n\nmake.cepnames(c(\"Aa maderoi\", \"Poa sp.\", \"Cladina rangiferina\",\n\"Cladonia cornuta\", \"Cladonia cornuta var. groenlandica\",\n\"Cladonia rangiformis\", \"Bryoerythrophyllum\"))\ndata(BCI)\ncolnames(BCI) <- make.cepnames(colnames(BCI))\n\n\n"} {"package":"vegan","topic":"mantel","snippet":"### Name: mantel\n### Title: Mantel and Partial Mantel Tests for Dissimilarity Matrices\n### Aliases: mantel mantel.partial\n### Keywords: multivariate htest\n\n### ** Examples\n\n## Is vegetation related to environment?\ndata(varespec)\ndata(varechem)\nveg.dist <- vegdist(varespec) # Bray-Curtis\nenv.dist <- vegdist(scale(varechem), \"euclid\")\nmantel(veg.dist, env.dist)\nmantel(veg.dist, env.dist, method=\"spear\")\n\n\n"} {"package":"vegan","topic":"mantel.correlog","snippet":"### Name: mantel.correlog\n### Title: Mantel Correlogram\n### Aliases: mantel.correlog plot.mantel.correlog\n### Keywords: multivariate\n\n### ** Examples\n \n# Mite data available in \"vegan\"\ndata(mite) \ndata(mite.xy) \nmite.hel <- decostand(mite, \"hellinger\")\n\n# Detrend the species data by regression on the site coordinates\nmite.hel.resid <- resid(lm(as.matrix(mite.hel) ~ ., data=mite.xy))\n\n# Compute the detrended species distance matrix\nmite.hel.D <- dist(mite.hel.resid)\n\n# Compute Mantel correlogram with cutoff, Pearson statistic\nmite.correlog <- mantel.correlog(mite.hel.D, XY=mite.xy, nperm=49)\nsummary(mite.correlog)\nmite.correlog \n# or: print(mite.correlog)\n# or: print.mantel.correlog(mite.correlog)\nplot(mite.correlog)\n\n# Compute Mantel correlogram without cutoff, Spearman statistic\nmite.correlog2 <- mantel.correlog(mite.hel.D, XY=mite.xy, cutoff=FALSE, \n r.type=\"spearman\", nperm=49)\nsummary(mite.correlog2)\nmite.correlog2\nplot(mite.correlog2)\n\n# NOTE: 'nperm' argument usually needs to be larger than 49.\n# It was set to this low value for demonstration purposes.\n\n\n\n"} {"package":"vegan","topic":"metaMDS","snippet":"### Name: metaMDS\n### Title: Nonmetric Multidimensional Scaling with Stable Solution from\n### Random Starts, Axis Scaling and Species Scores\n### Aliases: metaMDS metaMDSdist metaMDSiter metaMDSredist initMDS postMDS\n### plot.metaMDS points.metaMDS text.metaMDS scores.metaMDS\n### Keywords: multivariate\n\n### ** Examples\n\n## The recommended way of running NMDS (Minchin 1987)\n##\ndata(dune)\n## IGNORE_RDIFF_BEGIN\n## Global NMDS using monoMDS\nsol <- metaMDS(dune)\nsol\nplot(sol, type=\"t\")\n## Start from previous best solution\nsol <- metaMDS(dune, previous.best = sol)\n## Local NMDS and stress 2 of monoMDS\nsol2 <- metaMDS(dune, model = \"local\", stress=2)\nsol2\n## Use Arrhenius exponent 'z' as a binary dissimilarity measure\nsol <- metaMDS(dune, distfun = betadiver, distance = \"z\")\nsol\n## IGNORE_RDIFF_END\n\n\n"} {"package":"vegan","topic":"mite","snippet":"### Name: mite\n### Title: Oribatid Mite Data with Explanatory Variables\n### Aliases: mite mite.env mite.pcnm mite.xy\n### Keywords: datasets\n\n### ** Examples\n\ndata(mite)\n\n\n"} {"package":"vegan","topic":"monoMDS","snippet":"### Name: monoMDS\n### Title: Global and Local Non-metric Multidimensional Scaling and Linear\n### and Hybrid Scaling\n### Aliases: monoMDS scores.monoMDS plot.monoMDS points.monoMDS\n### text.monoMDS\n### Keywords: multivariate\n\n### ** Examples\n\ndata(dune)\ndis <- vegdist(dune)\nm <- monoMDS(dis, model = \"loc\")\nm\nplot(m)\n\n\n"} {"package":"vegan","topic":"mrpp","snippet":"### Name: mrpp\n### Title: Multi Response Permutation Procedure and Mean Dissimilarity\n### Matrix\n### Aliases: mrpp meandist summary.meandist plot.meandist\n### Keywords: multivariate nonparametric htest\n\n### ** Examples\n\ndata(dune)\ndata(dune.env)\ndune.mrpp <- with(dune.env, mrpp(dune, Management))\ndune.mrpp\n\n# Save and change plotting parameters\ndef.par <- par(no.readonly = TRUE)\nlayout(matrix(1:2,nr=1))\n\nplot(dune.ord <- metaMDS(dune, trace=0), type=\"text\", display=\"sites\" )\nwith(dune.env, ordihull(dune.ord, Management))\n\nwith(dune.mrpp, {\n fig.dist <- hist(boot.deltas, xlim=range(c(delta,boot.deltas)), \n main=\"Test of Differences Among Groups\")\n abline(v=delta); \n text(delta, 2*mean(fig.dist$counts), adj = -0.5,\n expression(bold(delta)), cex=1.5 ) }\n)\npar(def.par)\n## meandist\ndune.md <- with(dune.env, meandist(vegdist(dune), Management))\ndune.md\nsummary(dune.md)\nplot(dune.md)\nplot(dune.md, kind=\"histogram\")\n\n\n"} {"package":"vegan","topic":"mso","snippet":"### Name: mso\n### Title: Functions for performing and displaying a spatial partitioning\n### of cca or rda results\n### Aliases: mso msoplot\n### Keywords: spatial multivariate\n\n### ** Examples\n\n## Reconstruct worked example of Wagner (submitted):\nX <- matrix(c(1, 2, 3, 2, 1, 0), 3, 2)\nY <- c(3, -1, -2)\ntmat <- c(1:3)\n## Canonical correspondence analysis (cca):\nExample.cca <- cca(X, Y)\nExample.cca <- mso(Example.cca, tmat)\nmsoplot(Example.cca)\nExample.cca$vario\n\n## Correspondence analysis (ca):\nExample.ca <- mso(cca(X), tmat)\nmsoplot(Example.ca)\n\n## Unconstrained ordination with test for autocorrelation\n## using oribatid mite data set as in Wagner (2004)\ndata(mite)\ndata(mite.env)\ndata(mite.xy)\n\nmite.cca <- cca(log(mite + 1))\nmite.cca <- mso(mite.cca, mite.xy, grain = 1, permutations = 99)\nmsoplot(mite.cca)\nmite.cca\n\n## Constrained ordination with test for residual autocorrelation\n## and scale-invariance of species-environment relationships\nmite.cca <- cca(log(mite + 1) ~ SubsDens + WatrCont + Substrate + Shrub + Topo, mite.env)\nmite.cca <- mso(mite.cca, mite.xy, permutations = 99)\nmsoplot(mite.cca)\nmite.cca\n\n\n"} {"package":"vegan","topic":"multipart","snippet":"### Name: multipart\n### Title: Multiplicative Diversity Partitioning\n### Aliases: multipart multipart.default multipart.formula\n### Keywords: multivariate\n\n### ** Examples\n\n## NOTE: 'nsimul' argument usually needs to be >= 99\n## here much lower value is used for demonstration\n\ndata(mite)\ndata(mite.xy)\ndata(mite.env)\n## Function to get equal area partitions of the mite data\ncutter <- function (x, cut = seq(0, 10, by = 2.5)) {\n out <- rep(1, length(x))\n for (i in 2:(length(cut) - 1))\n out[which(x > cut[i] & x <= cut[(i + 1)])] <- i\n return(out)}\n## The hierarchy of sample aggregation\nlevsm <- with(mite.xy, data.frame(\n l2=cutter(y, cut = seq(0, 10, by = 2.5)),\n l3=cutter(y, cut = seq(0, 10, by = 5))))\n## Multiplicative diversity partitioning\nmultipart(mite, levsm, index=\"renyi\", scales=1, nsimul=19)\nmultipart(mite ~ l2 + l3, levsm, index=\"renyi\", scales=1, nsimul=19)\nmultipart(mite ~ ., levsm, index=\"renyi\", scales=1, nsimul=19, relative=TRUE)\nmultipart(mite ~ ., levsm, index=\"renyi\", scales=1, nsimul=19, global=TRUE)\n\n\n"} {"package":"vegan","topic":"nestedtemp","snippet":"### Name: nestedtemp\n### Title: Nestedness Indices for Communities of Islands or Patches\n### Aliases: nestedtemp nestedchecker nestedn0 nesteddisc nestednodf\n### nestedbetasor nestedbetajac plot.nestedtemp plot.nestednodf\n### Keywords: univar\n\n### ** Examples\n\ndata(sipoo)\n## Matrix temperature\nout <- nestedtemp(sipoo)\nout\nplot(out)\nplot(out, kind=\"incid\")\n## Use oecosimu to assess the non-randomness of checker board units\nnestedchecker(sipoo)\noecosimu(sipoo, nestedchecker, \"quasiswap\")\n## Another Null model and standardized checkerboard score\noecosimu(sipoo, nestedchecker, \"r00\", statistic = \"C.score\")\n\n\n"} {"package":"vegan","topic":"nullmodel","snippet":"### Name: nullmodel\n### Title: Null Model and Simulation\n### Aliases: nullmodel simmat print.nullmodel simulate.nullmodel\n### update.nullmodel str.nullmodel print.simmat smbind\n### Keywords: multivariate datagen\n\n### ** Examples\n\ndata(mite)\nx <- as.matrix(mite)[1:12, 21:30]\n\n## non-sequential nullmodel\n(nm <- nullmodel(x, \"r00\"))\n(sm <- simulate(nm, nsim=10))\n\n## sequential nullmodel\n(nm <- nullmodel(x, \"swap\"))\n(sm1 <- simulate(nm, nsim=10, thin=5))\n(sm2 <- simulate(nm, nsim=10, thin=5))\n\n## sequential nullmodel with burnin and extra updating\n(nm <- nullmodel(x, \"swap\"))\n(sm1 <- simulate(nm, burnin=10, nsim=10, thin=5))\n(sm2 <- simulate(nm, nsim=10, thin=5))\n\n## sequential nullmodel with separate initial burnin\n(nm <- nullmodel(x, \"swap\"))\nnm <- update(nm, nsim=10)\n(sm2 <- simulate(nm, nsim=10, thin=5))\n\n## combining multiple simmat objects\n\n## stratification\nnm1 <- nullmodel(x[1:6,], \"r00\")\nsm1 <- simulate(nm1, nsim=10)\nnm2 <- nullmodel(x[7:12,], \"r00\")\nsm2 <- simulate(nm2, nsim=10)\nsmbind(sm1, sm2, MARGIN=1)\n\n## binding subsequent samples from sequential algorithms\n## start, end, thin retained\nnm <- nullmodel(x, \"swap\")\nnm <- update(nm, nsim=10)\nsm1 <- simulate(nm, nsim=10, thin=5)\nsm2 <- simulate(nm, nsim=20, thin=5)\nsm3 <- simulate(nm, nsim=10, thin=5)\nsmbind(sm3, sm2, sm1, MARGIN=3)\n\n## 'replicate' based usage which is similar to the output\n## of 'parLapply' or 'mclapply' in the 'parallel' package\n## start, end, thin are set, also noting number of chains\nsmfun <- function(x, burnin, nsim, thin) {\n nm <- nullmodel(x, \"swap\")\n nm <- update(nm, nsim=burnin)\n simulate(nm, nsim=nsim, thin=thin)\n}\nsmlist <- replicate(3, smfun(x, burnin=50, nsim=10, thin=5), simplify=FALSE)\nsmbind(smlist, MARGIN=3) # Number of permuted matrices = 30\n\n## Not run: \n##D ## parallel null model calculations\n##D library(parallel)\n##D \n##D if (.Platform$OS.type == \"unix\") {\n##D ## forking on Unix systems\n##D smlist <- mclapply(1:3, function(i) smfun(x, burnin=50, nsim=10, thin=5))\n##D smbind(smlist, MARGIN=3)\n##D }\n##D \n##D ## socket type cluster, works on all platforms\n##D cl <- makeCluster(3)\n##D clusterEvalQ(cl, library(vegan))\n##D clusterExport(cl, c(\"smfun\", \"x\"))\n##D smlist <- parLapply(cl, 1:3, function(i) smfun(x, burnin=50, nsim=10, thin=5))\n##D stopCluster(cl)\n##D smbind(smlist, MARGIN=3)\n## End(Not run)\n\n\n"} {"package":"vegan","topic":"oecosimu","snippet":"### Name: oecosimu\n### Title: Evaluate Statistics with Null Models of Biological Communities\n### Aliases: oecosimu as.ts.oecosimu toCoda toCoda.oecosimu\n### Keywords: multivariate datagen nonparametric\n\n### ** Examples\n\n## Use the first eigenvalue of correspondence analysis as an index\n## of structure: a model for making your own functions.\ndata(sipoo)\n## Traditional nestedness statistics (number of checkerboard units)\noecosimu(sipoo, nestedchecker, \"r0\")\n## sequential model, one-sided test, a vector statistic\nout <- oecosimu(sipoo, decorana, \"swap\", burnin=100, thin=10, \n statistic=\"evals\", alt = \"greater\")\nout\n## Inspect the swap sequence as a time series object\nplot(as.ts(out))\nlag.plot(as.ts(out))\nacf(as.ts(out))\n## Density plot\ndensityplot(permustats(out), as.table = TRUE, layout = c(1,4))\n## Use quantitative null models to compare\n## mean Bray-Curtis dissimilarities\ndata(dune)\nmeandist <- function(x) mean(vegdist(x, \"bray\"))\nmbc1 <- oecosimu(dune, meandist, \"r2dtable\")\nmbc1\n\n## Define your own null model as a 'commsim' function: shuffle cells\n## in each row\nfoo <- function(x, n, nr, nc, ...) {\n out <- array(0, c(nr, nc, n))\n for (k in seq_len(n))\n out[,,k] <- apply(x, 2, function(z) sample(z, length(z)))\n out\n}\ncf <- commsim(\"myshuffle\", foo, isSeq = FALSE, binary = FALSE, \n mode = \"double\")\noecosimu(dune, meandist, cf)\n\n## Use pre-built null model\nnm <- simulate(nullmodel(sipoo, \"curveball\"), 99)\noecosimu(nm, nestedchecker)\n## Several chains of a sequential model -- this can be generalized\n## for parallel processing (see ?smbind)\nnm <- replicate(5, simulate(nullmodel(sipoo, \"swap\"), 99,\n thin=10, burnin=100), simplify = FALSE)\n## nm is now a list of nullmodels: use smbind to combine these into one\n## nullmodel with several chains\n## IGNORE_RDIFF_BEGIN\nnm <- smbind(nm, MARGIN = 3)\nnm\noecosimu(nm, nestedchecker)\n## IGNORE_RDIFF_END\n## After this you can use toCoda() and tools in the coda package to\n## analyse the chains (these will show that thin, burnin and nsimul are\n## all too low for real analysis).\n\n\n"} {"package":"vegan","topic":"ordiArrowMul","snippet":"### Name: ordiArrowTextXY\n### Title: Support Functions for Drawing Vectors\n### Aliases: ordiArrowMul ordiArrowTextXY\n### Keywords: utilities\n\n### ** Examples\n\n ## Scale arrows by hand to fill 80% of the plot\n ## Biplot arrows by hand\n data(varespec, varechem)\n ord <- cca(varespec ~ Al + P + K, varechem)\n plot(ord, display = c(\"species\",\"sites\"))\n\n ## biplot scores\n bip <- scores(ord, choices = 1:2, display = \"bp\")\n\n ## scaling factor for arrows to fill 80% of plot\n (mul <- ordiArrowMul(bip, fill = 0.8))\n bip.scl <- bip * mul # Scale the biplot scores\n labs <- rownames(bip) # Arrow labels\n\n ## calculate coordinate of labels for arrows\n (bip.lab <- ordiArrowTextXY(bip.scl, rescale = FALSE, labels = labs))\n\n ## draw arrows and text labels\n arrows(0, 0, bip.scl[,1], bip.scl[,2], length = 0.1)\n text(bip.lab, labels = labs)\n\n ## Handling of ordination objects directly\n mul2 <- ordiArrowMul(ord, display = \"bp\", fill = 0.8)\n stopifnot(all.equal(mul, mul2))\n\n\n"} {"package":"vegan","topic":"ordiarrows","snippet":"### Name: ordiarrows\n### Title: Add Arrows and Line Segments to Ordination Diagrams\n### Aliases: ordiarrows ordisegments ordigrid\n### Keywords: aplot\n\n### ** Examples\n\nexample(pyrifos)\nmod <- rda(pyrifos)\nplot(mod, type = \"n\")\n## Annual succession by ditches, colour by dose\nordiarrows(mod, ditch, label = TRUE, col = as.numeric(dose))\nlegend(\"topright\", levels(dose), lty=1, col=1:5, title=\"Dose\")\n## Show only control and highest Pyrifos treatment\nplot(mod, type = \"n\")\nordiarrows(mod, ditch, label = TRUE, \n show.groups = c(\"2\", \"3\", \"5\", \"11\"))\nordiarrows(mod, ditch, label = TRUE, show = c(\"6\", \"9\"),\n col = 2)\nlegend(\"topright\", c(\"Control\", \"Pyrifos 44\"), lty = 1, col = c(1,2))\n\n\n"} {"package":"vegan","topic":"ordihull","snippet":"### Name: ordihull\n### Title: Display Groups or Factor Levels in Ordination Diagrams\n### Aliases: ordihull ordispider ordiellipse ordibar ordicluster\n### summary.ordihull scores.ordihull summary.ordiellipse ordiareatest\n### Keywords: aplot\n\n### ** Examples\n\ndata(dune)\ndata(dune.env)\nmod <- cca(dune ~ Management, dune.env)\nplot(mod, type=\"n\", scaling = \"symmetric\")\n## Catch the invisible result of ordihull...\npl <- with(dune.env, ordihull(mod, Management,\n scaling = \"symmetric\", label = TRUE))\n## ... and find centres and areas of the hulls\nsummary(pl)\n## use more colours and add ellipsoid hulls\nplot(mod, type = \"n\")\npl <- with(dune.env, ordihull(mod, Management,\n scaling = \"symmetric\", col = 1:4,\n draw=\"polygon\", label =TRUE))\nwith(dune.env, ordiellipse(mod, Management, scaling = \"symmetric\",\n kind = \"ehull\", col = 1:4, lwd=3))\n## ordispider to connect WA and LC scores\nplot(mod, dis=c(\"wa\",\"lc\"), type=\"p\")\nordispider(mod)\n## Other types of plots\nplot(mod, type = \"p\", display=\"sites\")\ncl <- hclust(vegdist(dune))\nordicluster(mod, cl, prune=3, col = cutree(cl, 4))\n## confidence ellipse: location of the class centroids\nplot(mod, type=\"n\", display = \"sites\")\nwith(dune.env, text(mod, display=\"sites\", labels = as.character(Management),\n col=as.numeric(Management)))\npl <- with(dune.env, ordiellipse(mod, Management, kind=\"se\", conf=0.95, lwd=2,\n draw = \"polygon\", col=1:4, border=1:4,\n alpha=63))\nsummary(pl)\n## add confidence bars\nwith(dune.env, ordibar(mod, Management, kind=\"se\", conf=0.95, lwd=2, col=1:4,\n label=TRUE))\n\n\n"} {"package":"vegan","topic":"ordilabel","snippet":"### Name: ordilabel\n### Title: Add Text on Non-transparent Label to an Ordination Plot.\n### Aliases: ordilabel\n### Keywords: aplot\n\n### ** Examples\n\ndata(dune)\nord <- cca(dune)\nplot(ord, type = \"n\")\nordilabel(ord, dis=\"sites\", cex=1.2, font=3, fill=\"hotpink\", col=\"blue\")\n## You may prefer separate plots, but here species as well\nordilabel(ord, dis=\"sp\", font=2, priority=colSums(dune))\n\n\n"} {"package":"vegan","topic":"ordiplot","snippet":"### Name: ordiplot\n### Title: Alternative plot and identify Functions for Ordination\n### Aliases: ordiplot identify.ordiplot scores.ordiplot points.ordiplot\n### text.ordiplot\n### Keywords: hplot iplot aplot\n\n### ** Examples\n\n## Draw a plot for a non-vegan ordination (cmdscale).\ndata(dune)\ndune.dis <- vegdist(wisconsin(dune))\ndune.mds <- cmdscale(dune.dis, eig = TRUE)\ndune.mds$species <- wascores(dune.mds$points, dune, expand = TRUE)\npl <- ordiplot(dune.mds, type = \"none\")\npoints(pl, \"sites\", pch=21, col=\"red\", bg=\"yellow\")\ntext(pl, \"species\", col=\"blue\", cex=0.9)\n## Not run: \n##D ## same plot using pipes (pipes |> are available from R version 4.1.0)\n##D if (getRversion() >= \"4.1\") {\n##D ordiplot(dune.mds, type=\"n\") |>\n##D points(\"sites\", pch=21, col=\"red\", bg=\"yellow\") |>\n##D text(\"species\", col=\"blue\", cex=0.9)\n##D ## Some people think that species should be shown with arrows in PCA.\n##D ## Other ordination methods also return an invisible ordiplot object and\n##D ## we can use pipes to draw those arrows.\n##D mod <- rda(dune)\n##D plot(mod, type=\"n\") |>\n##D points(\"sites\", pch=16, col=\"red\") |>\n##D text(\"species\", arrows = TRUE, length=0.05, col=\"blue\")\n##D }\n## End(Not run)\n## Default plot of the previous using identify to label selected points\n## Not run: \n##D pl <- ordiplot(dune.mds)\n##D identify(pl, \"spec\")\n## End(Not run)\n\n\n"} {"package":"vegan","topic":"ordipointlabel","snippet":"### Name: ordipointlabel\n### Title: Ordination Plots with Points and Optimized Locations for Text\n### Aliases: ordipointlabel plot.ordipointlabel\n### Keywords: hplot aplot\n\n### ** Examples\n\ndata(dune)\nord <- cca(dune)\nplt <- ordipointlabel(ord)\n\n## set scaling - should be no warnings!\nordipointlabel(ord, scaling = \"sites\")\n\n## plot then add\nplot(ord, scaling = \"symmetric\", type = \"n\")\nordipointlabel(ord, display = \"species\", scaling = \"symm\", add = TRUE)\nordipointlabel(ord, display = \"sites\", scaling = \"symm\", add = TRUE)\n\n## redraw plot without rerunning SANN optimisation\nplot(plt)\n\n\n"} {"package":"vegan","topic":"ordiresids","snippet":"### Name: ordiresids\n### Title: Plots of Residuals and Fitted Values for Constrained Ordination\n### Aliases: ordiresids\n### Keywords: hplot\n\n### ** Examples\n\ndata(varespec)\ndata(varechem)\nmod <- cca(varespec ~ Al + P + K, varechem)\nordiresids(mod)\nordiresids(mod, formula = Residuals ~ Fitted | Species, residuals=\"standard\",\n cex = 0.5)\n\n\n"} {"package":"vegan","topic":"ordistep","snippet":"### Name: ordistep\n### Title: Choose a Model by Permutation Tests in Constrained Ordination\n### Aliases: ordistep ordiR2step\n### Keywords: multivariate models\n\n### ** Examples\n\n## See add1.cca for another example\n\n### Dune data\ndata(dune)\ndata(dune.env)\nmod0 <- rda(dune ~ 1, dune.env) # Model with intercept only\nmod1 <- rda(dune ~ ., dune.env) # Model with all explanatory variables\n\n## With scope present, the default direction is \"both\"\nmod <- ordistep(mod0, scope = formula(mod1))\nmod\n## summary table of steps\nmod$anova\n\n## Example of ordistep, forward\nordistep(mod0, scope = formula(mod1), direction=\"forward\")\n\n## Example of ordiR2step (always forward)\n## stops because R2 of 'mod1' exceeded\nordiR2step(mod0, mod1)\n\n\n"} {"package":"vegan","topic":"ordisurf","snippet":"### Name: ordisurf\n### Title: Fit and Plot Smooth Surfaces of Variables on Ordination.\n### Aliases: ordisurf ordisurf.default ordisurf.formula calibrate.ordisurf\n### plot.ordisurf\n### Keywords: multivariate aplot\n\n### ** Examples\n\ndata(varespec)\ndata(varechem)\nvare.dist <- vegdist(varespec)\nvare.mds <- monoMDS(vare.dist)\n## IGNORE_RDIFF_BEGIN\nordisurf(vare.mds ~ Baresoil, varechem, bubble = 5)\n\n## as above but without the extra penalties on smooth terms,\n## and using GCV smoothness selection (old behaviour of `ordisurf()`):\nordisurf(vare.mds ~ Baresoil, varechem, col = \"blue\", add = TRUE,\n select = FALSE, method = \"GCV.Cp\")\n\n## Cover of Cladina arbuscula\nfit <- ordisurf(vare.mds ~ Cladarbu, varespec, family=quasipoisson)\n## Get fitted values\ncalibrate(fit)\n## Variable selection via additional shrinkage penalties\n## This allows non-significant smooths to be selected out\n## of the model not just to a linear surface. There are 2\n## options available:\n## - option 1: `select = TRUE` --- the *default*\nordisurf(vare.mds ~ Baresoil, varechem, method = \"REML\", select = TRUE)\n## - option 2: use a basis with shrinkage\nordisurf(vare.mds ~ Baresoil, varechem, method = \"REML\", bs = \"ts\")\n## or bs = \"cs\" with `isotropic = FALSE`\n## IGNORE_RDIFF_END\n## Plot method\nplot(fit, what = \"contour\")\n\n## Plotting the \"gam\" object\nplot(fit, what = \"gam\") ## 'col' and 'cex' not passed on\n## or via plot.gam directly\nlibrary(mgcv)\nplot.gam(fit, cex = 2, pch = 1, col = \"blue\")\n## 'col' effects all objects drawn...\n\n### controlling the basis functions used\n## Use Duchon splines\nordisurf(vare.mds ~ Baresoil, varechem, bs = \"ds\")\n\n## A fixed degrees of freedom smooth, must use 'select = FALSE'\nordisurf(vare.mds ~ Baresoil, varechem, knots = 4,\n fx = TRUE, select = FALSE)\n\n## An anisotropic smoother with cubic regression spline bases\nordisurf(vare.mds ~ Baresoil, varechem, isotropic = FALSE,\n bs = \"cr\", knots = 4)\n\n## An anisotropic smoother with cubic regression spline with\n## shrinkage bases & different degrees of freedom in each dimension\nordisurf(vare.mds ~ Baresoil, varechem, isotropic = FALSE,\n bs = \"cs\", knots = c(3,4), fx = TRUE,\n select = FALSE)\n\n\n"} {"package":"vegan","topic":"orditkplot","snippet":"### Name: orditkplot\n### Title: Ordination Plot with Movable Labels\n### Aliases: orditkplot plot.orditkplot scores.orditkplot points.orditkplot\n### text.orditkplot\n### Keywords: iplot dynamic\n\n### ** Examples\n\n## The example needs user interaction and is not executed directly.\n## It should work when pasted to the window.\n## Not run: \n##D data(varespec)\n##D ord <- cca(varespec)\n##D ## Do something with the graph and end by clicking \"Dismiss\"\n##D orditkplot(ord, mar = c(4,4,1,1)+.1, font=3)\n##D ## Use ordipointlabel to produce a plot that has both species and site\n##D ## scores in different colors and plotting symbols\n##D pl <- ordipointlabel(ord)\n##D orditkplot(pl)\n## End(Not run)\n\n\n"} {"package":"vegan","topic":"orditorp","snippet":"### Name: orditorp\n### Title: Add Text or Points to Ordination Plots\n### Aliases: orditorp\n### Keywords: aplot hplot\n\n### ** Examples\n\n## A cluttered ordination plot :\ndata(BCI)\nmod <- cca(BCI)\nplot(mod, dis=\"sp\", type=\"t\")\n# Now with orditorp and abbreviated species names\ncnam <- make.cepnames(names(BCI))\nplot(mod, dis=\"sp\", type=\"n\")\nstems <- colSums(BCI)\norditorp(mod, \"sp\", label = cnam, priority=stems, pch=\"+\", pcol=\"grey\")\n\n## show select in action\nset.seed(1)\ntake <- sample(ncol(BCI), 50)\nplot(mod, dis=\"sp\", type=\"n\")\nstems <- colSums(BCI)\norditorp(mod, \"sp\", label = cnam, priority=stems, select = take,\n pch=\"+\", pcol=\"grey\")\n## Don't show: \n## example(orditorp) should not set random seed in the user session\nrm(.Random.seed)\n## End(Don't show)\n\n\n"} {"package":"vegan","topic":"ordixyplot","snippet":"### Name: ordixyplot\n### Title: Trellis (Lattice) Plots for Ordination\n### Aliases: ordixyplot ordisplom ordicloud panel.ordi panel.ordiarrows\n### panel.ordi3d prepanel.ordi3d ordilattice.getEnvfit\n### Keywords: hplot\n\n### ** Examples\n\ndata(dune, dune.env)\nord <- cca(dune)\n## Pairs plots\nordisplom(ord)\nordisplom(ord, data=dune.env, choices=1:2)\nordisplom(ord, data=dune.env, form = ~ . | Management, groups=Manure)\n## Scatter plot with polygons\nordixyplot(ord, data=dune.env, form = CA1 ~ CA2 | Management,\n groups=Manure, type = c(\"p\",\"polygon\"))\n## Choose a different scaling\nordixyplot(ord, scaling = \"symmetric\")\n## ... Slices of third axis\nordixyplot(ord, form = CA1 ~ CA2 | equal.count(CA3, 4),\n type = c(\"g\",\"p\", \"polygon\"))\n## Display environmental variables\nordixyplot(ord, envfit = envfit(ord ~ Management + A1, dune.env, choices=1:3))\n## 3D Scatter plots\nordicloud(ord, form = CA2 ~ CA3*CA1, groups = Manure, data = dune.env)\nordicloud(ord, form = CA2 ~ CA3*CA1 | Management, groups = Manure,\n data = dune.env, auto.key = TRUE, type = c(\"p\",\"h\"))\n\n\n"} {"package":"vegan","topic":"pcnm","snippet":"### Name: pcnm\n### Title: Principal Coordinates of Neighbourhood Matrix\n### Aliases: pcnm scores.pcnm\n### Keywords: spatial multivariate\n\n### ** Examples\n\n## Example from Borcard & Legendre (2002)\ndata(mite.xy)\npcnm1 <- pcnm(dist(mite.xy))\nop <- par(mfrow=c(1,3))\n## Map of PCNMs in the sample plot\nordisurf(mite.xy, scores(pcnm1, choi=1), bubble = 4, main = \"PCNM 1\")\nordisurf(mite.xy, scores(pcnm1, choi=2), bubble = 4, main = \"PCNM 2\")\nordisurf(mite.xy, scores(pcnm1, choi=3), bubble = 4, main = \"PCNM 3\")\npar(op)\n## Plot first PCNMs against each other\nordisplom(pcnm1, choices=1:4)\n## Weighted PCNM for CCA\ndata(mite)\nrs <- rowSums(mite)/sum(mite)\npcnmw <- pcnm(dist(mite.xy), w = rs)\nord <- cca(mite ~ scores(pcnmw))\n## Multiscale ordination: residual variance should have no distance\n## trend\nmsoplot(mso(ord, mite.xy))\n\n\n"} {"package":"vegan","topic":"permatfull","snippet":"### Name: permat\n### Title: Matrix Permutation Algorithms for Presence-Absence and Count\n### Data\n### Aliases: permatfull permatswap summary.permat print.summary.permat\n### print.permat plot.permat lines.permat as.ts.permat toCoda.permat\n### Keywords: multivariate datagen\n\n### ** Examples\n\n## A simple artificial community data matrix.\nm <- matrix(c(\n 1,3,2,0,3,1,\n 0,2,1,0,2,1,\n 0,0,1,2,0,3,\n 0,0,0,1,4,3\n ), 4, 6, byrow=TRUE)\n## Using the quasiswap algorithm to create a \n## list of permuted matrices, where\n## row/columns sums and matrix fill are preserved:\nx1 <- permatswap(m, \"quasiswap\")\nsummary(x1)\n## Unrestricted permutation retaining\n## row/columns sums but not matrix fill:\nx2 <- permatfull(m)\nsummary(x2)\n## Unrestricted permutation of presence-absence type\n## not retaining row/columns sums:\nx3 <- permatfull(m, \"none\", mtype=\"prab\")\nx3$orig ## note: original matrix is binarized!\nsummary(x3)\n## Restricted permutation,\n## check sums within strata:\nx4 <- permatfull(m, strata=c(1,1,2,2))\nsummary(x4)\n\n## NOTE: 'times' argument usually needs to be >= 99\n## here much lower value is used for demonstration\n\n## Not sequential algorithm\ndata(BCI)\na <- permatswap(BCI, \"quasiswap\", times=19)\n## Sequential algorithm\nb <- permatswap(BCI, \"abuswap\", fixedmar=\"col\",\n burnin=0, thin=100, times=19)\nopar <- par(mfrow=c(2,2))\nplot(a, main=\"Not sequential\")\nplot(b, main=\"Sequential\")\nplot(a, \"chisq\")\nplot(b, \"chisq\")\npar(opar)\n## Extract Bray-Curtis dissimilarities\n## as time series\nbc <- as.ts(b)\n## Lag plot\nlag.plot(bc)\n## First order autoregressive model\nmar <- arima(bc, c(1,0,0))\nmar\n## Ljung-Box test of residuals\nBox.test(residuals(mar))\n## Graphical diagnostics\ntsdiag(mar)\n\n\n"} {"package":"vegan","topic":"permustats","snippet":"### Name: permustats\n### Title: Extract, Analyse and Display Permutation Results\n### Aliases: permustats permustats.anosim permustats.anova.cca\n### permustats.CCorA permustats.envfit permustats.factorfit\n### permustats.mantel permustats.mrpp permustats.mso permustats.oecosimu\n### permustats.ordiareatest permustats.permutest.betadisper\n### permustats.permutest.cca permustats.protest permustats.vectorfit\n### summary.permustats c.permustats densityplot.permustats\n### density.permustats qqnorm.permustats qqmath.permustats\n### boxplot.permustats pairs.permustats\n### Keywords: distribution smooth\n\n### ** Examples\n\ndata(dune, dune.env)\nmod <- adonis2(dune ~ Management + A1, data = dune.env)\n## use permustats\nperm <- permustats(mod)\nsummary(perm)\ndensityplot(perm)\nqqmath(perm)\nboxplot(perm, scale=TRUE, lty=1, pch=16, cex=0.6, col=\"hotpink\", ylab=\"SES\")\nabline(h=0, col=\"skyblue\")\n## example of multiple types of statistic\nmod <- with(dune.env, betadisper(vegdist(dune), Management))\npmod <- permutest(mod, nperm = 99, pairwise = TRUE)\nperm <- permustats(pmod)\nsummary(perm, interval = 0.90)\n\n\n"} {"package":"vegan","topic":"permutest.betadisper","snippet":"### Name: permutest.betadisper\n### Title: Permutation test of multivariate homogeneity of groups\n### dispersions (variances)\n### Aliases: permutest.betadisper\n### Keywords: methods multivariate\n\n### ** Examples\n\ndata(varespec)\n\n## Bray-Curtis distances between samples\ndis <- vegdist(varespec)\n\n## First 16 sites grazed, remaining 8 sites ungrazed\ngroups <- factor(c(rep(1,16), rep(2,8)), labels = c(\"grazed\",\"ungrazed\"))\n\n## Calculate multivariate dispersions\nmod <- betadisper(dis, groups)\nmod\n\n## Perform test\nanova(mod)\n\n## Permutation test for F\npmod <- permutest(mod, permutations = 99, pairwise = TRUE)\n\n## Tukey's Honest Significant Differences\n(mod.HSD <- TukeyHSD(mod))\nplot(mod.HSD)\n\n## Has permustats() method\npstat <- permustats(pmod)\ndensityplot(pstat, scales = list(x = list(relation = \"free\")))\nqqmath(pstat, scales = list(relation = \"free\"))\n\n\n"} {"package":"vegan","topic":"plot.cca","snippet":"### Name: plot.cca\n### Title: Plot or Extract Results of Constrained Correspondence Analysis\n### or Redundancy Analysis\n### Aliases: plot.cca text.cca points.cca scores.cca scores.rda summary.cca\n### print.summary.cca head.summary.cca tail.summary.cca labels.cca\n### Keywords: hplot aplot\n\n### ** Examples\n\ndata(dune)\ndata(dune.env)\nmod <- cca(dune ~ A1 + Moisture + Management, dune.env)\n## better control -- remember to set scaling etc identically\nplot(mod, type=\"n\", scaling=\"sites\")\ntext(mod, dis=\"cn\", scaling=\"sites\")\npoints(mod, pch=21, col=\"red\", bg=\"yellow\", cex=1.2, scaling=\"sites\")\ntext(mod, \"species\", col=\"blue\", cex=0.8, scaling=\"sites\")\n## catch the invisible result and use ordiplot support - the example\n## will make a biplot with arrows for species and correlation scaling\npca <- rda(dune)\npl <- plot(pca, type=\"n\", scaling=\"sites\", correlation=TRUE)\nwith(dune.env, points(pl, \"site\", pch=21, col=1, bg=Management))\ntext(pl, \"sp\", arrow=TRUE, length=0.05, col=4, cex=0.6, xpd=TRUE)\nwith(dune.env, legend(\"bottomleft\", levels(Management), pch=21, pt.bg=1:4, bty=\"n\"))\n## Limited output of 'summary' (NB. Signs of axes are arbitrary and can change\n## when the command is run repeatedly).\n## IGNORE_RDIFF_BEGIN\nhead(summary(mod), tail=2)\n## IGNORE_RDIFF_END\n## Scaling can be numeric or more user-friendly names\n## e.g. Hill's scaling for (C)CA\nscrs <- scores(mod, scaling = \"sites\", hill = TRUE)\n## or correlation-based scores in PCA/RDA\nscrs <- scores(rda(dune ~ A1 + Moisture + Management, dune.env),\n scaling = \"sites\", correlation = TRUE)\n\n\n"} {"package":"vegan","topic":"prc","snippet":"### Name: prc\n### Title: Principal Response Curves for Treatments with Repeated\n### Observations\n### Aliases: prc summary.prc plot.prc\n### Keywords: multivariate\n\n### ** Examples\n\n## Chlorpyrifos experiment and experimental design: Pesticide\n## treatment in ditches (replicated) and followed over from 4 weeks\n## before to 24 weeks after exposure \ndata(pyrifos)\nweek <- gl(11, 12, labels=c(-4, -1, 0.1, 1, 2, 4, 8, 12, 15, 19, 24))\ndose <- factor(rep(c(0.1, 0, 0, 0.9, 0, 44, 6, 0.1, 44, 0.9, 0, 6), 11))\nditch <- gl(12, 1, length=132)\n\n## IGNORE_RDIFF_BEGIN\n## PRC\nmod <- prc(pyrifos, dose, week)\nmod # RDA\nsummary(mod) # PRC\nlogabu <- colSums(pyrifos)\nplot(mod, select = logabu > 100)\n## IGNORE_RDIFF_END\n## Ditches are randomized, we have a time series, and are only\n## interested in the first axis\nctrl <- how(plots = Plots(strata = ditch,type = \"free\"),\n within = Within(type = \"series\"), nperm = 99)\nanova(mod, permutations = ctrl, first=TRUE)\n\n\n"} {"package":"vegan","topic":"fitted.cca","snippet":"### Name: predict.cca\n### Title: Prediction Tools for [Constrained] Ordination (CCA, RDA, DCA,\n### CA, PCA)\n### Aliases: fitted.cca fitted.rda fitted.capscale fitted.dbrda\n### residuals.cca predict.cca predict.rda predict.decorana coef.cca\n### coef.rda calibrate.cca calibrate\n### Keywords: multivariate\n\n### ** Examples\n\ndata(dune)\ndata(dune.env)\nmod <- cca(dune ~ A1 + Management + Condition(Moisture), data=dune.env)\n# Definition of the concepts 'fitted' and 'residuals'\nmod\ncca(fitted(mod))\ncca(residuals(mod))\n# Remove rare species (freq==1) from 'cca' and find their scores\n# 'passively'.\nfreq <- specnumber(dune, MARGIN=2)\nfreq\nmod <- cca(dune[, freq>1] ~ A1 + Management + Condition(Moisture), dune.env)\n## IGNORE_RDIFF_BEGIN\npredict(mod, type=\"sp\", newdata=dune[, freq==1], scaling=\"species\")\n# New sites\npredict(mod, type=\"lc\", new=data.frame(A1 = 3, Management=\"NM\", Moisture=\"2\"), scal=2)\n# Calibration and residual plot\nmod <- cca(dune ~ A1 + Moisture, dune.env)\npred <- calibrate(mod)\npred\n## IGNORE_RDIFF_END\nwith(dune.env, plot(A1, pred[,\"A1\"] - A1, ylab=\"Prediction Error\"))\nabline(h=0)\n\n\n"} {"package":"vegan","topic":"procrustes","snippet":"### Name: procrustes\n### Title: Procrustes Rotation of Two Configurations and PROTEST\n### Aliases: procrustes summary.procrustes plot.procrustes\n### points.procrustes text.procrustes lines.procrustes\n### residuals.procrustes fitted.procrustes predict.procrustes protest\n### Keywords: multivariate htest\n\n### ** Examples\n\n## IGNORE_RDIFF_BEGIN\ndata(varespec)\nvare.dist <- vegdist(wisconsin(varespec))\nmds.null <- monoMDS(vare.dist, y = cmdscale(vare.dist))\nmds.alt <- monoMDS(vare.dist)\nvare.proc <- procrustes(mds.alt, mds.null)\nvare.proc\nsummary(vare.proc)\nplot(vare.proc)\nplot(vare.proc, kind=2)\nresiduals(vare.proc)\n## IGNORE_RDIFF_END\n\n\n"} {"package":"vegan","topic":"pyrifos","snippet":"### Name: pyrifos\n### Title: Response of Aquatic Invertebrates to Insecticide Treatment\n### Aliases: pyrifos\n### Keywords: datasets\n\n### ** Examples\n\ndata(pyrifos)\nditch <- gl(12, 1, length=132)\nweek <- gl(11, 12, labels=c(-4, -1, 0.1, 1, 2, 4, 8, 12, 15, 19, 24))\ndose <- factor(rep(c(0.1, 0, 0, 0.9, 0, 44, 6, 0.1, 44, 0.9, 0, 6), 11))\n\n\n"} {"package":"vegan","topic":"radfit","snippet":"### Name: radfit\n### Title: Rank - Abundance or Dominance / Diversity Models\n### Aliases: radfit radfit.default radfit.data.frame AIC.radfit\n### AIC.radfit.frame as.rad coef.radfit coef.radfit.frame deviance.radfit\n### deviance.radfit.frame 'logLik, radfit' 'logLik, radfit.frame'\n### fitted.radfit fitted.radfit.frame lines.radline lines.radfit\n### plot.radfit.frame plot.radfit plot.radline plot.rad radlattice\n### points.radline points.radfit summary.radfit.frame rad.preempt\n### rad.lognormal rad.zipf rad.zipfbrot rad.null predict.radline\n### predict.radfit predict.radfit.frame\n### Keywords: univar distribution\n\n### ** Examples\n\ndata(BCI)\nmod <- rad.lognormal(BCI[5,])\nmod\nplot(mod)\nmod <- radfit(BCI[1,])\n## Standard plot overlaid for all models\n## Preemption model is a line\nplot(mod)\n## log for both axes: Zipf model is a line\nplot(mod, log = \"xy\")\n## Lattice graphics separately for each model\nradlattice(mod)\n# Take a subset of BCI to save time and nerves\nmod <- radfit(BCI[3:5,])\nmod\nplot(mod, pch=\".\")\n\n\n"} {"package":"vegan","topic":"rankindex","snippet":"### Name: rankindex\n### Title: Compares Dissimilarity Indices for Gradient Detection\n### Aliases: rankindex\n### Keywords: multivariate\n\n### ** Examples\n\ndata(varespec)\ndata(varechem)\n## The variables are automatically scaled\nrankindex(varechem, varespec)\nrankindex(varechem, wisconsin(varespec))\n## Using non vegdist indices as functions\nfuns <- list(Manhattan=function(x) dist(x, \"manhattan\"),\n Gower=function(x) cluster:::daisy(x, \"gower\"),\n Ochiai=function(x) designdist(x, \"1-J/sqrt(A*B)\"))\nrankindex(scale(varechem), varespec, funs)\n\n\n"} {"package":"vegan","topic":"rarefy","snippet":"### Name: rarefy\n### Title: Rarefaction Species Richness\n### Aliases: rarefy rrarefy drarefy rarecurve rareslope\n### Keywords: univar\n\n### ** Examples\n\ndata(BCI)\nS <- specnumber(BCI) # observed number of species\n(raremax <- min(rowSums(BCI)))\nSrare <- rarefy(BCI, raremax)\nplot(S, Srare, xlab = \"Observed No. of Species\", ylab = \"Rarefied No. of Species\")\nabline(0, 1)\nrarecurve(BCI, step = 20, sample = raremax, col = \"blue\", cex = 0.6)\n\n\n"} {"package":"vegan","topic":"raupcrick","snippet":"### Name: raupcrick\n### Title: Raup-Crick Dissimilarity with Unequal Sampling Densities of\n### Species\n### Aliases: raupcrick\n### Keywords: multivariate\n\n### ** Examples\n\n## data set with variable species richness\ndata(sipoo)\n## default raupcrick\ndr1 <- raupcrick(sipoo)\n## use null model \"r0\" of oecosimu\ndr0 <- raupcrick(sipoo, null = \"r0\")\n## vegdist(..., method = \"raup\") corresponds to 'null = \"r0\"'\nd <- vegdist(sipoo, \"raup\")\nop <- par(mfrow=c(2,1), mar=c(4,4,1,1)+.1)\nplot(dr1 ~ d, xlab = \"Raup-Crick with Null R1\", ylab=\"vegdist\")\nplot(dr0 ~ d, xlab = \"Raup-Crick with Null R0\", ylab=\"vegdist\")\npar(op)\n\n## The calculation is essentially as in the following oecosimu() call,\n## except that designdist() is replaced with faster code\n## Not run: ##D \n##D oecosimu(sipoo, function(x) designdist(x, \"J\", \"binary\"), method = \"r1\")\n## End(Not run)\n\n\n"} {"package":"vegan","topic":"read.cep","snippet":"### Name: read.cep\n### Title: Reads a CEP (Canoco) data file\n### Aliases: read.cep\n### Keywords: IO file\n\n### ** Examples\n\n## Provided that you have the file \"dune.spe\"\n## Not run: \n##D theclassic <- read.cep(\"dune.spe\")\n## End(Not run)\n\n\n"} {"package":"vegan","topic":"renyi","snippet":"### Name: renyi\n### Title: Renyi and Hill Diversities and Corresponding Accumulation Curves\n### Aliases: renyi plot.renyi renyiaccum plot.renyiaccum persp.renyiaccum\n### Keywords: univar\n\n### ** Examples\n\ndata(BCI)\ni <- sample(nrow(BCI), 12)\nmod <- renyi(BCI[i,])\nplot(mod)\nmod <- renyiaccum(BCI[i,])\nplot(mod, as.table=TRUE, col = c(1, 2, 2))\npersp(mod)\n\n\n"} {"package":"vegan","topic":"reorder.hclust","snippet":"### Name: reorder.hclust\n### Title: Reorder a Hierarchical Clustering Tree\n### Aliases: reorder.hclust rev.hclust cutreeord scores.hclust\n### Keywords: multivariate\n\n### ** Examples\n\n## reorder by water content of soil\ndata(mite, mite.env)\nhc <- hclust(vegdist(wisconsin(sqrt(mite))))\nohc <- with(mite.env, reorder(hc, WatrCont))\nplot(hc)\nplot(ohc)\n\n## label leaves by the observed value, and each branching point\n## (internal node) by the cluster mean\nwith(mite.env, plot(ohc, labels=round(WatrCont), cex=0.7))\nordilabel(scores(ohc), label=round(ohc$value), cex=0.7)\n\n## Slightly different from reordered 'dendrogram' which ignores group\n## sizes in assessing means.\nden <- as.dendrogram(hc)\nden <- with(mite.env, reorder(den, WatrCont, agglo.FUN = mean))\nplot(den)\n\n\n"} {"package":"vegan","topic":"scores","snippet":"### Name: scores\n### Title: Get Species or Site Scores from an Ordination\n### Aliases: scores scores.default scores.lda\n### Keywords: multivariate\n\n### ** Examples\n\ndata(varespec)\nvare.pca <- prcomp(varespec)\nscores(vare.pca, choices=c(1,2))\n\n\n"} {"package":"vegan","topic":"screeplot.cca","snippet":"### Name: screeplot.cca\n### Title: Screeplots for Ordination Results and Broken Stick Distributions\n### Aliases: screeplot.cca screeplot.princomp screeplot.prcomp\n### screeplot.decorana bstick bstick.default bstick.cca bstick.prcomp\n### bstick.princomp bstick.decorana\n### Keywords: multivariate\n\n### ** Examples\n\ndata(varespec)\nvare.pca <- rda(varespec, scale = TRUE)\nbstick(vare.pca)\nscreeplot(vare.pca, bstick = TRUE, type = \"lines\")\n\n\n"} {"package":"vegan","topic":"simper","snippet":"### Name: simper\n### Title: Similarity Percentages\n### Aliases: simper summary.simper\n### Keywords: multivariate\n\n### ** Examples\n\ndata(dune)\ndata(dune.env)\n(sim <- with(dune.env, simper(dune, Management, permutations = 99)))\n## IGNORE_RDIFF_BEGIN\nsummary(sim)\n## IGNORE_RDIFF_END\n\n\n"} {"package":"vegan","topic":"simulate.rda","snippet":"### Name: simulate.rda\n### Title: Simulate Responses with Gaussian Error or Permuted Residuals for\n### Constrained Ordination\n### Aliases: simulate.rda simulate.cca simulate.capscale\n### Keywords: models datagen multivariate\n\n### ** Examples\n\ndata(dune)\ndata(dune.env)\nmod <- rda(dune ~ Moisture + Management, dune.env)\n## One simulation\nupdate(mod, simulate(mod) ~ .)\n## An impression of confidence regions of site scores\nplot(mod, display=\"sites\")\nfor (i in 1:5) lines(procrustes(mod, update(mod, simulate(mod) ~ .)), col=\"blue\")\n## Simulate a set of null communities with permutation of residuals\nsimulate(mod, indx = shuffleSet(nrow(dune), 99))\n\n\n"} {"package":"vegan","topic":"spantree","snippet":"### Name: spantree\n### Title: Minimum Spanning Tree\n### Aliases: spantree cophenetic.spantree as.hclust.spantree plot.spantree\n### lines.spantree spandepth\n### Keywords: multivariate\n\n### ** Examples\n\ndata(dune)\ndis <- vegdist(dune)\ntr <- spantree(dis)\n## Add tree to a metric scaling\nplot(tr, cmdscale(dis), type = \"t\")\n## Find a configuration to display the tree neatly\nplot(tr, type = \"t\")\n## Depths of nodes\ndepths <- spandepth(tr)\nplot(tr, type = \"t\", label = depths)\n## Plot as a dendrogram\ncl <- as.hclust(tr)\nplot(cl)\n## cut hclust tree to classes and show in colours in spantree\nplot(tr, col = cutree(cl, 5), pch=16)\n\n\n"} {"package":"vegan","topic":"specaccum","snippet":"### Name: specaccum\n### Title: Species Accumulation Curves\n### Aliases: specaccum print.specaccum summary.specaccum plot.specaccum\n### lines.specaccum boxplot.specaccum fitspecaccum plot.fitspecaccum\n### lines.fitspecaccum predict.specaccum predict.fitspecaccum\n### AIC.fitspecaccum deviance.fitspecaccum logLik.fitspecaccum\n### nobs.fitspecaccum specslope\n### Keywords: univar models\n\n### ** Examples\n\ndata(BCI)\nsp1 <- specaccum(BCI)\nsp2 <- specaccum(BCI, \"random\")\nsp2\nsummary(sp2)\nplot(sp1, ci.type=\"poly\", col=\"blue\", lwd=2, ci.lty=0, ci.col=\"lightblue\")\nboxplot(sp2, col=\"yellow\", add=TRUE, pch=\"+\")\n## Fit Lomolino model to the exact accumulation\nmod1 <- fitspecaccum(sp1, \"lomolino\")\ncoef(mod1)\nfitted(mod1)\nplot(sp1)\n## Add Lomolino model using argument 'add'\nplot(mod1, add = TRUE, col=2, lwd=2)\n## Fit Arrhenius models to all random accumulations\nmods <- fitspecaccum(sp2, \"arrh\")\nplot(mods, col=\"hotpink\")\nboxplot(sp2, col = \"yellow\", border = \"blue\", lty=1, cex=0.3, add= TRUE)\n## Use nls() methods to the list of models\nsapply(mods$models, AIC)\n\n\n"} {"package":"vegan","topic":"specpool","snippet":"### Name: specpool\n### Title: Extrapolated Species Richness in a Species Pool\n### Aliases: specpool specpool2vect poolaccum summary.poolaccum\n### plot.poolaccum estimateR estimateR.default estimateR.matrix\n### estimateR.data.frame estaccumR\n### Keywords: univar\n\n### ** Examples\n\ndata(dune)\ndata(dune.env)\npool <- with(dune.env, specpool(dune, Management))\npool\nop <- par(mfrow=c(1,2))\nboxplot(specnumber(dune) ~ Management, data = dune.env,\n col = \"hotpink\", border = \"cyan3\")\nboxplot(specnumber(dune)/specpool2vect(pool) ~ Management,\n data = dune.env, col = \"hotpink\", border = \"cyan3\")\npar(op)\ndata(BCI)\n## Accumulation model\npool <- poolaccum(BCI)\nsummary(pool, display = \"chao\")\nplot(pool)\n## Quantitative model\nestimateR(BCI[1:5,])\n\n\n"} {"package":"vegan","topic":"sppscores","snippet":"### Name: sppscores\n### Title: Add or Replace Species Scores in Distance-Based Ordination\n### Aliases: sppscores sppscores<- sppscores<-.dbrda sppscores<-.capscale\n### sppscores<-.metaMDS\n### Keywords: multivariate\n\n### ** Examples\n\ndata(BCI, BCI.env)\nmod <- dbrda(vegdist(BCI) ~ Habitat, BCI.env)\n## add species scores\nsppscores(mod) <- BCI\n## Euclidean distances of BCI differ from used dissimilarity\nplot(vegdist(BCI), dist(BCI))\n## more linear relationship\nplot(vegdist(BCI), dist(sqrt(decostand(BCI, \"total\"))))\n## better species scores\nsppscores(mod) <- sqrt(decostand(BCI, \"total\"))\n\n\n"} {"package":"vegan","topic":"stepacross","snippet":"### Name: stepacross\n### Title: Stepacross as Flexible Shortest Paths or Extended\n### Dissimilarities\n### Aliases: stepacross\n### Keywords: multivariate\n\n### ** Examples\n\n# There are no data sets with high beta diversity in vegan, but this\n# should give an idea.\ndata(dune)\ndis <- vegdist(dune)\nedis <- stepacross(dis)\nplot(edis, dis, xlab = \"Shortest path\", ylab = \"Original\")\n## Manhattan distance have no fixed upper limit.\ndis <- vegdist(dune, \"manhattan\")\nis.na(dis) <- no.shared(dune)\ndis <- stepacross(dis, toolong=0)\n\n\n"} {"package":"vegan","topic":"stressplot.wcmdscale","snippet":"### Name: stressplot.wcmdscale\n### Title: Display Ordination Distances Against Observed Distances in\n### Eigenvector Ordinations\n### Aliases: stressplot.wcmdscale stressplot.cca stressplot.rda\n### stressplot.capscale stressplot.dbrda stressplot.prcomp\n### stressplot.princomp\n### Keywords: multivariate\n\n### ** Examples\n\ndata(dune, dune.env)\nmod <- rda(dune)\nstressplot(mod)\nmod <- rda(dune ~ Management, dune.env)\nstressplot(mod, k=3)\n\n\n"} {"package":"vegan","topic":"taxondive","snippet":"### Name: taxondive\n### Title: Indices of Taxonomic Diversity and Distinctness\n### Aliases: taxondive summary.taxondive plot.taxondive taxa2dist\n### Keywords: univar\n\n### ** Examples\n\n## Preliminary: needs better data and some support functions\ndata(dune)\ndata(dune.taxon)\n# Taxonomic distances from a classification table with variable step lengths.\ntaxdis <- taxa2dist(dune.taxon, varstep=TRUE)\nplot(hclust(taxdis), hang = -1)\n# Indices\nmod <- taxondive(dune, taxdis)\nmod\nsummary(mod)\nplot(mod)\n\n\n"} {"package":"vegan","topic":"tolerance","snippet":"### Name: tolerance\n### Title: Species tolerances and sample heterogeneities\n### Aliases: tolerance tolerance.cca tolerance.decorana\n\n### ** Examples\n\ndata(dune)\ndata(dune.env)\nmod <- cca(dune ~ ., data = dune.env)\n\n## defaults to species tolerances\ntolerance(mod)\n\n## sample heterogeneities for CCA axes 1:6\ntolerance(mod, which = \"sites\", choices = 1:6)\n## average should be 1 with scaling = \"sites\", hill = TRUE\ntol <- tolerance(mod, which = \"sites\", scaling = \"sites\", hill = TRUE,\n choices = 1:4)\ncolMeans(tol)\napply(tol, 2, sd)\n## Rescaling tries to set all tolerances to 1\ntol <- tolerance(decorana(dune))\ncolMeans(tol)\napply(tol, 2, sd)\n\n\n"} {"package":"vegan","topic":"treedive","snippet":"### Name: treedive\n### Title: Functional Diversity and Community Distances from Species Trees\n### Aliases: treedive treeheight treedist\n### Keywords: univar\n\n### ** Examples\n\n\n## There is no data set on species properties yet, and we demonstrate\n## the methods using phylogenetic trees\ndata(dune)\ndata(dune.phylodis)\ncl <- hclust(dune.phylodis)\ntreedive(dune, cl)\n## Significance test using Null model communities.\n## The current choice fixes numbers of species and picks species\n## proportionally to their overall frequency\noecosimu(dune, treedive, \"r1\", tree = cl, verbose = FALSE)\n## Phylogenetically ordered community table\ndtree <- treedist(dune, cl)\ntabasco(dune, hclust(dtree), cl)\n## Use tree distances in capscale\ncapscale(dtree ~ 1, comm=dune)\n\n\n"} {"package":"vegan","topic":"tsallis","snippet":"### Name: tsallis\n### Title: Tsallis Diversity and Corresponding Accumulation Curves\n### Aliases: tsallis tsallisaccum persp.tsallisaccum\n### Keywords: multivariate\n\n### ** Examples\n\ndata(BCI)\ni <- sample(nrow(BCI), 12)\nx1 <- tsallis(BCI[i,])\nx1\ndiversity(BCI[i,],\"simpson\") == x1[[\"2\"]]\nplot(x1)\nx2 <- tsallis(BCI[i,],norm=TRUE)\nx2\nplot(x2)\nmod1 <- tsallisaccum(BCI[i,])\nplot(mod1, as.table=TRUE, col = c(1, 2, 2))\npersp(mod1)\nmod2 <- tsallisaccum(BCI[i,], norm=TRUE)\npersp(mod2,theta=100,phi=30)\n\n\n"} {"package":"vegan","topic":"varechem","snippet":"### Name: varespec\n### Title: Vegetation and environment in lichen pastures\n### Aliases: varechem varespec\n### Keywords: datasets\n\n### ** Examples\n\ndata(varespec)\ndata(varechem)\n\n\n"} {"package":"vegan","topic":"varpart","snippet":"### Name: varpart\n### Title: Partition the Variation of Community Matrix by 2, 3, or 4\n### Explanatory Matrices\n### Aliases: varpart varpart2 varpart3 varpart4 showvarparts\n### summary.varpart plot.varpart plot.varpart234 simpleRDA2 simpleDBRDA\n### Keywords: multivariate\n\n### ** Examples\n\ndata(mite)\ndata(mite.env)\ndata(mite.pcnm)\n\n# Two explanatory data frames -- Hellinger-transform Y\nmod <- varpart(mite, mite.env, mite.pcnm, transfo=\"hel\")\nmod\nsummary(mod)\n\n## Use fill colours\nshowvarparts(2, bg = c(\"hotpink\",\"skyblue\"))\nplot(mod, bg = c(\"hotpink\",\"skyblue\"))\n## Test fraction [a] using partial RDA, '~ .' in formula tells to use\n## all variables of data mite.env.\naFrac <- rda(decostand(mite, \"hel\"), mite.env, mite.pcnm)\nanova(aFrac)\n## RsquareAdj gives the same result as component [a] of varpart\nRsquareAdj(aFrac)\n\n## Partition Bray-Curtis dissimilarities\nvarpart(vegdist(mite), mite.env, mite.pcnm)\n## Three explanatory tables with formula interface\nmod <- varpart(mite, ~ SubsDens + WatrCont, ~ Substrate + Shrub + Topo,\n mite.pcnm, data=mite.env, transfo=\"hel\")\nmod\nsummary(mod)\nshowvarparts(3, bg=2:4)\nplot(mod, bg=2:4)\n\n## Use RDA to test fraction [a]\n## Matrix can be an argument in formula\nrda.result <- rda(decostand(mite, \"hell\") ~ SubsDens + WatrCont +\n Condition(Substrate + Shrub + Topo) +\n Condition(as.matrix(mite.pcnm)), data = mite.env)\nanova(rda.result)\n\n## Four explanatory tables\nmod <- varpart(mite, ~ SubsDens + WatrCont, ~Substrate + Shrub + Topo,\n mite.pcnm[,1:11], mite.pcnm[,12:22], data=mite.env, transfo=\"hel\")\nmod\nsummary(mod)\nplot(mod, bg=2:5)\n## Show values for all partitions by putting 'cutoff' low enough:\nplot(mod, cutoff = -Inf, cex = 0.7, bg=2:5)\n\n\n"} {"package":"vegan","topic":"vegan-package","snippet":"### Name: vegan-package\n### Title: Community Ecology Package: Ordination, Diversity and\n### Dissimilarities\n### Aliases: vegan-package vegan\n### Keywords: package multivariate univar models spatial nonparametric\n### htest regression\n\n### ** Examples\n\n### Example 1: Unconstrained ordination\n## NMDS\ndata(varespec)\ndata(varechem)\nord <- metaMDS(varespec)\nplot(ord, type = \"t\")\n## Fit environmental variables\nef <- envfit(ord, varechem)\nef\nplot(ef, p.max = 0.05)\n### Example 2: Constrained ordination (RDA)\n## The example uses formula interface to define the model\ndata(dune)\ndata(dune.env)\n## No constraints: PCA\nmod0 <- rda(dune ~ 1, dune.env)\nmod0\nplot(mod0)\n## All environmental variables: Full model\nmod1 <- rda(dune ~ ., dune.env)\nmod1\nplot(mod1)\n## Automatic selection of variables by permutation P-values\nmod <- ordistep(mod0, scope=formula(mod1))\nmod\nplot(mod)\n## Permutation test for all variables\nanova(mod)\n## Permutation test of \"type III\" effects, or significance when a term\n## is added to the model after all other terms\nanova(mod, by = \"margin\")\n## Plot only sample plots, use different symbols and draw SD ellipses \n## for Managemenet classes\nplot(mod, display = \"sites\", type = \"n\")\nwith(dune.env, points(mod, disp = \"si\", pch = as.numeric(Management)))\nwith(dune.env, legend(\"topleft\", levels(Management), pch = 1:4,\n title = \"Management\"))\nwith(dune.env, ordiellipse(mod, Management, label = TRUE))\n## add fitted surface of diversity to the model\nordisurf(mod, diversity(dune), add = TRUE)\n### Example 3: analysis of dissimilarites a.k.a. non-parametric\n### permutational anova\nadonis2(dune ~ ., dune.env)\nadonis2(dune ~ Management + Moisture, dune.env)\n\n\n"} {"package":"vegan","topic":"vegemite","snippet":"### Name: vegemite\n### Title: Display Compact Ordered Community Tables\n### Aliases: vegemite tabasco coverscale\n### Keywords: print manip hplot\n\n### ** Examples\n\ndata(varespec)\n## Print only more common species\nfreq <- apply(varespec > 0, 2, sum)\nvegemite(varespec, scale=\"Hult\", sp.ind = freq > 10)\n## Order by correspondence analysis, use Hill scaling and layout:\ndca <- decorana(varespec)\nvegemite(varespec, dca, \"Hill\", zero=\"-\")\n## Show one class from cluster analysis, but retain the ordering above\nclus <- hclust(vegdist(varespec))\ncl <- cutree(clus, 3)\nsel <- vegemite(varespec, use=dca, select = cl == 3, scale=\"Br\")\n## Re-create previous\nvegemite(varespec, sp=sel$sp, site=sel$site, scale=\"Hult\")\n## Re-order clusters by ordination\nclus <- as.dendrogram(clus)\nclus <- reorder(clus, scores(dca, choices=1, display=\"sites\"), agglo.FUN = mean)\nvegemite(varespec, clus, scale = \"Hult\")\n\n## Abundance values have such a wide range that they must be rescaled\ntabasco(varespec, dca, scale=\"Braun\")\n\n## Classification trees for species\ndata(dune, dune.taxon)\ntaxontree <- hclust(taxa2dist(dune.taxon))\nplotree <- hclust(vegdist(dune), \"average\")\n## Automatic reordering of clusters\ntabasco(dune, plotree, sp.ind = taxontree)\n## No reordering of taxonomy\ntabasco(dune, plotree, sp.ind = taxontree, Colv = FALSE)\n## Species cluster: most dissimilarity indices do a bad job when\n## comparing rare and common species, but Raup-Crick makes sense\nsptree <- hclust(vegdist(t(dune), \"raup\"), \"average\")\ntabasco(dune, plotree, sptree)\n\n\n"} {"package":"vegan","topic":"wascores","snippet":"### Name: wascores\n### Title: Weighted Averages Scores for Species\n### Aliases: wascores eigengrad\n### Keywords: multivariate univar\n\n### ** Examples\n\ndata(varespec)\ndata(varechem)\nvare.dist <- vegdist(wisconsin(varespec))\nvare.mds <- monoMDS(vare.dist)\nvare.points <- postMDS(vare.mds$points, vare.dist)\nvare.wa <- wascores(vare.points, varespec)\nplot(scores(vare.points), pch=\"+\", asp=1)\ntext(vare.wa, rownames(vare.wa), cex=0.8, col=\"blue\")\n## Omit rare species (frequency <= 4)\nfreq <- apply(varespec>0, 2, sum)\nplot(scores(vare.points), pch=\"+\", asp=1)\ntext(vare.wa[freq > 4,], rownames(vare.wa)[freq > 4],cex=0.8,col=\"blue\")\n## Works for environmental variables, too.\nwascores(varechem, varespec)\n## And the strengths of these variables are:\neigengrad(varechem, varespec)\n\n\n"} {"package":"vegan","topic":"wcmdscale","snippet":"### Name: wcmdscale\n### Title: Weighted Classical (Metric) Multidimensional Scaling\n### Aliases: wcmdscale scores.wcmdscale plot.wcmdscale\n### Keywords: multivariate\n\n### ** Examples\n\n## Correspondence analysis as a weighted principal coordinates\n## analysis of Euclidean distances of Chi-square transformed data\ndata(dune)\nrs <- rowSums(dune)/sum(dune)\nd <- dist(decostand(dune, \"chi\"))\nord <- wcmdscale(d, w = rs, eig = TRUE)\n## Ordinary CA\nca <- cca(dune)\n\n## IGNORE_RDIFF_BEGIN\n## Eigevalues are numerically similar\nca$CA$eig - ord$eig\n## Configurations are similar when site scores are scaled by\n## eigenvalues in CA\nprocrustes(ord, ca, choices=1:19, scaling = \"sites\")\n## IGNORE_RDIFF_END\n\nplot(procrustes(ord, ca, choices=1:2, scaling=\"sites\"))\n## Reconstruction of non-Euclidean distances with negative eigenvalues\nd <- vegdist(dune)\nord <- wcmdscale(d, eig = TRUE)\n## Only positive eigenvalues:\ncor(d, dist(ord$points))\n## Correction with negative eigenvalues:\ncor(d, sqrt(dist(ord$points)^2 - dist(ord$negaxes)^2))\n\n\n"} {"package":"symmetry","topic":"symmetry_test","snippet":"### Name: symmetry_test\n### Title: Perform symmetry tests\n### Aliases: symmetry_test symmetry_test.default symmetry_test.lm\n### symmetry_test.fGARCH\n\n### ** Examples\n\nset.seed(1)\n\n# IID samples\nx <- rnorm(50)\nsymmetry_test(x, \"MOI\", bootstrap = FALSE, k = 3, mu = 0)\nsymmetry_test(x, \"MOI\", bootstrap = TRUE, k = 3, mu = 0)\nsymmetry_test(x, \"MOI\", bootstrap = TRUE, k = 3)\nx <- rsl(50, alpha = 1.5)\nsymmetry_test(x, \"MOI\", bootstrap = FALSE, k = 3, mu = 0)\nsymmetry_test(x, \"MOI\", bootstrap = TRUE, k = 3, mu = 0)\nsymmetry_test(x, \"MOI\", bootstrap = TRUE, k = 3)\n\n# Linear models\nlin_model <- lm(dist ~ speed, cars)\nsymmetry_test(lin_model, \"B1\")\n\n# Garch models\nlibrary(fGarch)\nspecskew19 = fGarch::garchSpec(model = list(omega = 0.1,\n alpha = 0.3,\n beta = 0.3,\n skew = 1.9),\n cond.dist = \"snorm\")\n\nx <- fGarch::garchSim(specskew19, n = 500)\ng <- fGarch::garchFit(~garch(1,1), x, cond.dist = \"QMLE\",\n include.mean = FALSE, trace = FALSE)\n## No test: \nsymmetry_test(g, \"FM\", B=400, burn = 100)\n## End(No test) # slower\n## No test: \nsymmetry_test(g, \"FM\", B=400, burn = 100, approximate = TRUE)\n## End(No test)\n\n\n\n"} {"package":"sms","topic":"addDataAssociation","snippet":"### Name: addDataAssociation\n### Title: addDataAssociation\n### Aliases: addDataAssociation\n\n### ** Examples\n\nlibrary(sms)\ndata(survey)\ndata(census)\nin.lexicon=createLexicon()\nin.lexicon=addDataAssociation(in.lexicon, c(\"he\",\"he\"))\nin.lexicon=addDataAssociation(in.lexicon, c(\"females\",\"female\"))\nprint(in.lexicon)\n\n\n"} {"package":"sms","topic":"calculate_error","snippet":"### Name: calculate_error\n### Title: Calculate error of a selection\n### Aliases: calculate_error\n\n### ** Examples\n\nlibrary(sms)\ndata(survey) #load the data\ndata(census)\nin.lexicon=createLexicon() # Create a data lexicon for holding the associated column names.\nin.lexicon=addDataAssociation(in.lexicon, c(\"he\",\"he\"))\nin.lexicon=addDataAssociation(in.lexicon, c(\"females\",\"female\"))\n\n#Select the first area from the census table\nthis_area=as.data.frame(census[1,]) \n\n#make a random selection of individuals for this area.\nselection=random_panel_selection( survey, this_area$population ) \n\n#evaluate the Total Absolute Error (TAE) for this selection\nerror=calculate_error( selection, this_area, in.lexicon ) \nprint( error ) # print the error of the selection\n\n\n"} {"package":"sms","topic":"check_lexicon","snippet":"### Name: check_lexicon\n### Title: check_lexicon\n### Aliases: check_lexicon\n\n### ** Examples\n\nlibrary(sms)\ndf=createLexicon()\ndf=addDataAssociation(df, c(\"ena\",\"duo\"))\ncheck_lexicon(df) \n\n\n"} {"package":"sms","topic":"createLexicon","snippet":"### Name: createLexicon\n### Title: createLexicon\n### Aliases: createLexicon\n\n### ** Examples\n\nlibrary(sms)\ndata(survey)\ndata(census)\nin.lexicon=createLexicon()\nin.lexicon=addDataAssociation(in.lexicon, c(\"he\",\"he\"))\nin.lexicon=addDataAssociation(in.lexicon, c(\"females\",\"female\"))\nprint(in.lexicon)\n\n\n"} {"package":"sms","topic":"find_best_selection","snippet":"### Name: find_best_selection\n### Title: find_best_selection\n### Aliases: find_best_selection\n\n### ** Examples\n\nlibrary(sms)\ndata(survey) #load the data\ndata(census)\nin.lexicon=createLexicon() # Create a data lexicon for holding the associated column names.\nin.lexicon=addDataAssociation(in.lexicon, c(\"he\",\"he\"))\nin.lexicon=addDataAssociation(in.lexicon, c(\"females\",\"female\"))\n\nthis_area=as.data.frame(census[1,]) #Select the first area from the census table\ninsms= new(\"microsimulation\",census=census,panel=survey, lexicon=in.lexicon, iterations=10)\nbest=find_best_selection(this_area, insms)\nprint(best)\n\n\n"} {"package":"sms","topic":"find_best_selection_SA","snippet":"### Name: find_best_selection_SA\n### Title: find_best_selection_SA\n### Aliases: find_best_selection_SA\n\n### ** Examples\n\nlibrary(sms)\ndata(survey)\ndata(census)\nin.lexicon=createLexicon()\nin.lexicon=addDataAssociation(in.lexicon, c(\"he\",\"he\"))\nin.lexicon=addDataAssociation(in.lexicon, c(\"females\",\"female\"))\n\nthis_area=as.data.frame(census[1,]) #Select the first area from the census table\ninsms= new(\"microsimulation\",census=census, panel=survey, lexicon=in.lexicon, iterations=5)\nmyselection= find_best_selection_SA( this_area, insms, inseed=1900)\nprint(myselection)\n\n\n"} {"package":"sms","topic":"mysetSeed","snippet":"### Name: mysetSeed\n### Title: mysetSeed\n### Aliases: mysetSeed\n\n### ** Examples\n\nlibrary(sms)\nsms::mysetSeed(1900)\n\n\n"} {"package":"sms","topic":"plotTries","snippet":"### Name: plotTries\n### Title: Plot selection results\n### Aliases: plotTries\n\n### ** Examples\n\nlibrary(sms)\ndata(survey) #load the data\ndata(census)\nin.lexicon=createLexicon() # Create a data lexicon for holding the associated column names.\nin.lexicon=addDataAssociation(in.lexicon, c(\"he\",\"he\"))\nin.lexicon=addDataAssociation(in.lexicon, c(\"females\",\"female\"))\n\nansms = new(\"microsimulation\", census=census, panel=survey, lexicon=in.lexicon, iterations=5)\nsa = run_parallel_SA(ansms, inseed=1900)\nplotTries( sa, 1 )\n\n\n"} {"package":"sms","topic":"random_panel_selection","snippet":"### Name: random_panel_selection\n### Title: random_panel_selection\n### Aliases: random_panel_selection\n\n### ** Examples\n\nlibrary(sms)\ndata(survey) #load the data\ndata(census)\n \nsome.individuals=random_panel_selection(survey,4)\nprint(some.individuals) # Print the selection of individuals\n\n\n"} {"package":"sms","topic":"run_parallel_HC","snippet":"### Name: run_parallel_HC\n### Title: run_parallel_HC\n### Aliases: run_parallel_HC\n\n### ** Examples\n\nlibrary(sms)\ndata(survey) #load the data\ndata(census)\nin.lexicon=createLexicon() # Create a data lexicon for holding the associated column names.\nin.lexicon=addDataAssociation(in.lexicon, c(\"he\",\"he\"))\nin.lexicon=addDataAssociation(in.lexicon, c(\"females\",\"female\"))\n\ninsms= new(\"microsimulation\",census=census,panel=survey, lexicon=in.lexicon, iterations=10)\nre=run_parallel_HC(insms, inseed=1900)\nprint(re)\n\n\n\n"} {"package":"sms","topic":"run_parallel_SA","snippet":"### Name: run_parallel_SA\n### Title: run_parallel_SA\n### Aliases: run_parallel_SA\n\n### ** Examples\n\nlibrary(sms)\ndata(survey)\ndata(census)\nin.lexicon=createLexicon()\nin.lexicon=addDataAssociation(in.lexicon, c(\"he\",\"he\"))\nin.lexicon=addDataAssociation(in.lexicon, c(\"females\",\"female\"))\n\ninsms= new(\"microsimulation\",census=census, panel=survey, lexicon=in.lexicon, iterations=5)\nresults= run_parallel_SA(insms, inseed=1900)\nprint(results)\n\n\n"} {"package":"sms","topic":"run_serial","snippet":"### Name: run_serial\n### Title: Run_serial\n### Aliases: run_serial\n\n### ** Examples\n\nlibrary(sms)\ndata(survey)\ndata(census)\nin.lexicon=createLexicon()\nin.lexicon=addDataAssociation(in.lexicon, c(\"he\",\"he\"))\nin.lexicon=addDataAssociation(in.lexicon, c(\"females\",\"female\"))\n\ninsms= new(\"microsimulation\",census=census, panel=survey, lexicon=in.lexicon, iterations=5)\nresults= run_serial( insms)\nprint(results)\n\n\n"} {"package":"sms","topic":"selection_for_area","snippet":"### Name: selection_for_area\n### Title: selection_for_area\n### Aliases: selection_for_area\n\n### ** Examples\n\nlibrary(sms)\ndata(survey) #load the data\ndata(census)\nin.lexicon=createLexicon() # Create a data lexicon for holding the associated column names.\nin.lexicon=addDataAssociation(in.lexicon, c(\"he\",\"he\"))\nin.lexicon=addDataAssociation(in.lexicon, c(\"females\",\"female\"))\n\n# Select the first area from the census table\nthis_area=as.data.frame(census[1,]) \n\n#make a representation for this area.\nsel=selection_for_area(survey, this_area, in.lexicon) \n\nprint(sel) #print the representation\n\n\n"} {"package":"tomba","topic":"account","snippet":"### Name: account\n### Title: Account\n### Aliases: account account,Tomba-method\n\n### ** Examples\n\n## Not run: \n##D client <- Tomba(key=\"ta_xxxx\",secret=\"ts_xxxx\")\n##D result <- account(obj)\n## End(Not run)\n\n\n"} {"package":"tomba","topic":"autocomplete","snippet":"### Name: autocomplete\n### Title: Company Autocomplete\n### Aliases: autocomplete autocomplete,Tomba-method\n\n### ** Examples\n\n## Not run: \n##D client <- Tomba(key=\"ta_xxxx\",secret=\"ts_xxxx\")\n##D result <- autocomplete(obj,search=\"google\")\n## End(Not run)\n\n\n"} {"package":"tomba","topic":"client","snippet":"### Name: client\n### Title: Tomba Client\n### Aliases: client client,Tomba-method\n\n### ** Examples\n\n## Not run: \n##D client <- Tomba(key=\"ta_xxxx\",secret=\"ts_xxxx\")\n##D result <- client(obj,\"https://api.tomba.io/v1/me\",null)\n## End(Not run)\n\n\n"} {"package":"tomba","topic":"count","snippet":"### Name: count\n### Title: Email Count\n### Aliases: count count,Tomba-method\n\n### ** Examples\n\n## Not run: \n##D client <- Tomba(key=\"ta_xxxx\",secret=\"ts_xxxx\")\n##D result <- count(client,domain=\"tomba.io\")\n## End(Not run)\n\n\n"} {"package":"tomba","topic":"domain_search","snippet":"### Name: domain_search\n### Title: Domain search\n### Aliases: domain_search domain_search,Tomba-method\n\n### ** Examples\n\n## Not run: \n##D client <- Tomba(key=\"ta_xxxx\",secret=\"ts_xxxx\")\n##D result <- domain_search(obj,domain=\"stripe\")\n## End(Not run)\n\n\n"} {"package":"tomba","topic":"email_finder","snippet":"### Name: email_finder\n### Title: Email Finder\n### Aliases: email_finder email_finder,Tomba-method\n\n### ** Examples\n\n## Not run: \n##D client <- Tomba(key=\"ta_xxxx\",secret=\"ts_xxxx\")\n##D result <- email_finder(obj,fname=\"FIRST_NAME\",lname=\"LASST_NAME\")\n## End(Not run)\n\n\n"} {"package":"tomba","topic":"email_sources","snippet":"### Name: email_sources\n### Title: Email Sources\n### Aliases: email_sources email_sources,Tomba-method\n\n### ** Examples\n\n## Not run: \n##D client <- Tomba(key=\"ta_xxxx\",secret=\"ts_xxxx\")\n##D result <- email_sources(obj,email=\"info@tomba.io\")\n## End(Not run)\n\n\n"} {"package":"tomba","topic":"email_verifier","snippet":"### Name: email_verifier\n### Title: Email Verifier\n### Aliases: email_verifier email_verifier,Tomba-method\n\n### ** Examples\n\n## Not run: \n##D client <- Tomba(key=\"ta_xxxx\",secret=\"ts_xxxx\")\n##D result <- email_verifier(obj,email=\"info@tomba.io\")\n## End(Not run)\n\n\n"} {"package":"tomba","topic":"logs","snippet":"### Name: logs\n### Title: Logs\n### Aliases: logs logs,Tomba-method\n\n### ** Examples\n\n## Not run: \n##D client <- Tomba(key=\"ta_xxxx\",secret=\"ts_xxxx\")\n##D result <- logs(client)\n## End(Not run)\n\n\n"} {"package":"tomba","topic":"status","snippet":"### Name: status\n### Title: Domain status\n### Aliases: status status,Tomba-method\n\n### ** Examples\n\n## Not run: \n##D client <- Tomba(key=\"ta_xxxx\",secret=\"ts_xxxx\")\n##D result <- status(client,domain=\"gmail.com\")\n## End(Not run)\n\n\n"} {"package":"tomba","topic":"usage","snippet":"### Name: usage\n### Title: Usage\n### Aliases: usage usage,Tomba-method\n\n### ** Examples\n\n## Not run: \n##D client <- Tomba(key=\"ta_xxxx\",secret=\"ts_xxxx\")\n##D result <- usage(client)\n## End(Not run)\n\n\n"} {"package":"eff2","topic":"estimateEffect","snippet":"### Name: estimateEffect\n### Title: Estimate the total causal effect\n### Aliases: estimateEffect\n\n### ** Examples\n\ndata(\"ex1\")\nresult <- estimateEffect(ex1$data, c(5,3), 7, ex1$amat.cpdag, bootstrap=TRUE)\nprint(result$effect)\nprint(result$effect - 1.96 * sqrt(diag(result$se.cov)))\nprint(result$effect + 1.96 * sqrt(diag(result$se.cov)))\n# compare with truth\nprint(ex1$true.effects)\n\n## Not run: \n##D # throws an error because the effect is not identified\n##D estimateEffect(ex1$data, 3, 7, ex1$amat.cpdag)\n## End(Not run)\n\n\n"} {"package":"eff2","topic":"isIdentified","snippet":"### Name: isIdentified\n### Title: Check if a total causal effect is identified\n### Aliases: isIdentified\n\n### ** Examples\n\ndata(\"ex1\")\n# identified\nisIdentified(ex1$amat.cpdag, c(3, 5), 7)\n# not identified\nisIdentified(ex1$amat.cpdag, 3, 7)\nisIdentified(ex1$amat.cpdag, c(3, 5), 10)\n\n\n"} {"package":"rchallenge","topic":"icon","snippet":"### Name: icon\n### Title: HTML code for icons.\n### Aliases: icon\n\n### ** Examples\n\nrmd <- '\n```{r}\nlibrary(rchallenge)\n```\n\n`r icon(\"fa-user\")`\n`r icon(\"fa-user fa-lg\")`\n`r icon(\"fa-user fa-2x\")`\n`r icon(\"fa-user fa-3x\")`\n`r icon(\"fa-user fa-3x fa-border\")`\n'\nfile <- tempfile()\ncat(rmd, file=file)\nwriteLines(readLines(file))\nif (rmarkdown::pandoc_available('1.12.3')) {\n rmarkdown::render(file)\n}\n\n\n"} {"package":"rchallenge","topic":"new_challenge","snippet":"### Name: new_challenge\n### Title: Install a new challenge.\n### Aliases: new_challenge\n\n### ** Examples\n\npath <- tempdir()\nwd <- setwd(path)\n# english version\nnew_challenge()\n# french version\nnew_challenge(template = \"fr\")\nsetwd(wd)\nunlink(path)\n\n\n"} {"package":"rchallenge","topic":"new_team","snippet":"### Name: new_team\n### Title: Create new teams submission folders in your challenge.\n### Aliases: new_team\n\n### ** Examples\n\npath <- tempdir()\nwd <- setwd(path)\nnew_challenge()\nnew_team(\"team_foo\", \"team_bar\")\nsetwd(wd)\nunlink(path)\n\n\n"} {"package":"rchallenge","topic":"publish","snippet":"### Name: publish\n### Title: Render your challenge R Markdown script to a HTML page.\n### Aliases: publish\n\n### ** Examples\n\npath <- tempdir()\nwd <- setwd(path)\nnew_challenge()\noutdir = tempdir()\nif (rmarkdown::pandoc_available('1.12.3')) {\n publish(output_dir = outdir, output_options = list(self_contained = FALSE))\n}\nunlink(outdir)\nsetwd(wd)\nunlink(path)\n\n\n"} {"package":"piton","topic":"peg_sum","snippet":"### Name: peg_sum\n### Title: Example PEG\n### Aliases: peg_sum\n\n### ** Examples\n\n# Simple example\npeg_sum(\"1,2, 5, 91, 34\")\n\n\n\n"} {"package":"RI2by2","topic":"AE.CI","snippet":"### Name: AE.CI\n### Title: Attributable effects based confidence interval for a treatment\n### effect on a binary outcome\n### Aliases: AE.CI\n### Keywords: attributable effects randomization inference\n\n### ** Examples\n\n ex = matrix(c(8,2,3,7),2,2,byrow=TRUE)\n AE.CI(ex,0.05)\n\n\n"} {"package":"RI2by2","topic":"Perm.CI.RLH","snippet":"### Name: Perm.CI.RLH\n### Title: Permutation test confidence interval for a treatment effect on a\n### binary outcome\n### Aliases: Perm.CI.RLH\n\n### ** Examples\n\nex = matrix(c(11,1,7,21),2,2,byrow=TRUE)\nPerm.CI.RLH(ex,0.05)\n\nex = matrix(c(7,5,1,27),2,2,byrow=TRUE)\nPerm.CI.RLH(ex,0.05)\nPerm.CI.RLH(ex,0.05, verbose=TRUE)\n\nex = matrix(c(33,15,11,37),2,2,byrow=TRUE)\nPerm.CI.RLH(ex,0.05, total_tests=1000)\n## No test: \nPerm.CI.RLH(ex,0.05)\n## End(No test)\n\n\n"} {"package":"RI2by2","topic":"Perm.CI","snippet":"### Name: Perm.CI\n### Title: Permutation test confidence interval for a treatment effect on a\n### binary outcome\n### Aliases: Perm.CI\n### Keywords: permutation test randomization inference\n\n### ** Examples\n\n ex = matrix(c(8,2,3,7),2,2,byrow=TRUE)\n Perm.CI(ex,0.05,100)\n\n\n"} {"package":"RI2by2","topic":"Robins.CI","snippet":"### Name: Robins.CI\n### Title: Asymptotic confidence interval for a treatment effect on a\n### binary outcome\n### Aliases: Robins.CI\n### Keywords: randomization inference\n\n### ** Examples\n\n#Example 1 from Robins (1988)\nex = matrix(c(40,60,15,85),2,2,byrow=TRUE)\nRobins.CI(ex,0.05)\n\n\n"} {"package":"IRTShiny","topic":"startIRT","snippet":"### Name: startIRT\n### Title: This function will start IRTShiny\n### Aliases: startIRT\n### Keywords: IRT\n\n### ** Examples\n\n## Not run: \n##D library(shiny)\n##D startIRT()\n## End(Not run)\n\n\n"} {"package":"dann","topic":"dann.data.frame","snippet":"### Name: dann.data.frame\n### Title: Discriminant Adaptive Nearest Neighbor Classification\n### Aliases: dann.data.frame\n\n### ** Examples\n\nlibrary(dann)\nlibrary(mlbench)\nlibrary(magrittr)\nlibrary(dplyr)\n\nset.seed(1)\ntrain <- mlbench.circle(300, 2) %>%\n tibble::as_tibble()\ncolnames(train) <- c(\"X1\", \"X2\", \"Y\")\ny <- train$Y\nx <- train[, 1:2]\n\ndann(x, y)\n\n\n"} {"package":"dann","topic":"dann.formula","snippet":"### Name: dann.formula\n### Title: Discriminant Adaptive Nearest Neighbor Classification\n### Aliases: dann.formula\n\n### ** Examples\n\nlibrary(dann)\nlibrary(mlbench)\nlibrary(magrittr)\nlibrary(dplyr)\n\nset.seed(1)\ntrain <- mlbench.circle(300, 2) %>%\n tibble::as_tibble()\ncolnames(train) <- c(\"X1\", \"X2\", \"Y\")\n\ndann(Y ~ X1 + X2, train)\n\n\n"} {"package":"dann","topic":"dann.matrix","snippet":"### Name: dann.matrix\n### Title: Discriminant Adaptive Nearest Neighbor Classification\n### Aliases: dann.matrix\n\n### ** Examples\n\nlibrary(dann)\nlibrary(mlbench)\nlibrary(magrittr)\nlibrary(dplyr)\n\nset.seed(1)\ntrain <- mlbench.circle(300, 2) %>%\n tibble::as_tibble()\ncolnames(train) <- c(\"X1\", \"X2\", \"Y\")\ny <- as.numeric(train$Y)\nx <- cbind(train$X1, train$X2)\n\ndann(x, y)\n\n\n"} {"package":"dann","topic":"dann.recipe","snippet":"### Name: dann.recipe\n### Title: Discriminant Adaptive Nearest Neighbor Classification\n### Aliases: dann.recipe\n\n### ** Examples\n\nlibrary(dann)\nlibrary(mlbench)\nlibrary(magrittr)\nlibrary(dplyr)\nlibrary(recipes)\n\nset.seed(1)\ntrain <- mlbench.circle(300, 2) %>%\n tibble::as_tibble()\ncolnames(train) <- c(\"X1\", \"X2\", \"Y\")\n\nrec_obj <- recipe(Y ~ X1 + X2, data = train)\n\ndann(rec_obj, train)\n\n\n"} {"package":"dann","topic":"graph_eigenvalues.data.frame","snippet":"### Name: graph_eigenvalues.data.frame\n### Title: A helper for sub_dann\n### Aliases: graph_eigenvalues.data.frame\n\n### ** Examples\n\nlibrary(dann)\nlibrary(mlbench)\nlibrary(magrittr)\nlibrary(dplyr)\n\nset.seed(1)\ntrain <- mlbench.circle(300, 2) %>%\n tibble::as_tibble()\ncolnames(train) <- c(\"X1\", \"X2\", \"Y\")\n\n#' # Add 5 unrelated variables\ntrain <- train %>%\n mutate(\n U1 = runif(300, -1, 1),\n U2 = runif(300, -1, 1),\n U3 = runif(300, -1, 1),\n U4 = runif(300, -1, 1),\n U5 = runif(300, -1, 1)\n )\n\ny <- train$Y\nx <- cbind(train[, 1:2], train[, 4:8])\n\ngraph_eigenvalues(x, y)\n\n\n"} {"package":"dann","topic":"graph_eigenvalues.formula","snippet":"### Name: graph_eigenvalues.formula\n### Title: A helper for sub_dann\n### Aliases: graph_eigenvalues.formula\n\n### ** Examples\n\nlibrary(dann)\nlibrary(mlbench)\nlibrary(magrittr)\nlibrary(dplyr)\n\nset.seed(1)\ntrain <- mlbench.circle(300, 2) %>%\n tibble::as_tibble()\ncolnames(train) <- c(\"X1\", \"X2\", \"Y\")\n\n# Add 5 unrelated variables\ntrain <- train %>%\n mutate(\n U1 = runif(300, -1, 1),\n U2 = runif(300, -1, 1),\n U3 = runif(300, -1, 1),\n U4 = runif(300, -1, 1),\n U5 = runif(300, -1, 1)\n )\n\ngraph_eigenvalues(Y ~ X1 + X2 + U1 + U2 + U3 + U4 + U5, train)\n\n\n"} {"package":"dann","topic":"graph_eigenvalues.matrix","snippet":"### Name: graph_eigenvalues.matrix\n### Title: A helper for sub_dann\n### Aliases: graph_eigenvalues.matrix\n\n### ** Examples\n\nlibrary(dann)\nlibrary(mlbench)\nlibrary(magrittr)\nlibrary(dplyr)\n\nset.seed(1)\ntrain <- mlbench.circle(300, 2) %>%\n tibble::as_tibble()\ncolnames(train) <- c(\"X1\", \"X2\", \"Y\")\n\n# Add 5 unrelated variables\ntrain <- train %>%\n mutate(\n U1 = runif(300, -1, 1),\n U2 = runif(300, -1, 1),\n U3 = runif(300, -1, 1),\n U4 = runif(300, -1, 1),\n U5 = runif(300, -1, 1)\n )\n\ny <- as.numeric(train$Y)\nx <- cbind(train$X1, train$X2, train$U1, train$U2, train$U3, train$U4, train$U5)\n\ngraph_eigenvalues(x, y)\n\n\n"} {"package":"dann","topic":"graph_eigenvalues.recipe","snippet":"### Name: graph_eigenvalues.recipe\n### Title: A helper for sub_dann\n### Aliases: graph_eigenvalues.recipe\n\n### ** Examples\n\nlibrary(dann)\nlibrary(mlbench)\nlibrary(magrittr)\nlibrary(dplyr)\nlibrary(recipes)\n\nset.seed(1)\ntrain <- mlbench.circle(300, 2) %>%\n tibble::as_tibble()\ncolnames(train) <- c(\"X1\", \"X2\", \"Y\")\n\n# Add 5 unrelated variables\ntrain <- train %>%\n mutate(\n U1 = runif(300, -1, 1),\n U2 = runif(300, -1, 1),\n U3 = runif(300, -1, 1),\n U4 = runif(300, -1, 1),\n U5 = runif(300, -1, 1)\n )\n\nrec_obj <- recipe(Y ~ X1 + X2 + U1 + U2 + U3 + U4 + U5, data = train)\n\ngraph_eigenvalues(rec_obj, train)\n\n\n"} {"package":"dann","topic":"predict.dann","snippet":"### Name: predict.dann\n### Title: Discriminant Adaptive Nearest Neighbor Classification\n### Aliases: predict.dann\n\n### ** Examples\n\nlibrary(dann)\nlibrary(mlbench)\nlibrary(magrittr)\nlibrary(dplyr)\n\nset.seed(1)\ntrain <- mlbench.circle(300, 2) %>%\n tibble::as_tibble()\ncolnames(train) <- c(\"X1\", \"X2\", \"Y\")\n\ntest <- mlbench.circle(300, 2) %>%\n tibble::as_tibble()\ncolnames(test) <- c(\"X1\", \"X2\", \"Y\")\n\nmodel <- dann(Y ~ X1 + X2, train)\npredict(model, test, \"class\")\n\npredict(model, test, \"prob\")\n\n\n"} {"package":"dann","topic":"predict.sub_dann","snippet":"### Name: predict.sub_dann\n### Title: Discriminant Adaptive Nearest Neighbor With Subspace Reduction\n### Aliases: predict.sub_dann\n\n### ** Examples\n\nlibrary(dann)\nlibrary(mlbench)\nlibrary(magrittr)\nlibrary(dplyr)\n\nset.seed(1)\ntrain <- mlbench.circle(300, 2) %>%\n tibble::as_tibble()\ncolnames(train) <- c(\"X1\", \"X2\", \"Y\")\n\ntest <- mlbench.circle(300, 2) %>%\n tibble::as_tibble()\ncolnames(test) <- c(\"X1\", \"X2\", \"Y\")\n\nmodel <- sub_dann(Y ~ X1 + X2, train)\npredict(model, test, \"class\")\n\npredict(model, test, \"prob\")\n\n\n"} {"package":"dann","topic":"print.dann","snippet":"### Name: print.dann\n### Title: Print dann model\n### Aliases: print.dann\n\n### ** Examples\n\nlibrary(dann)\nlibrary(mlbench)\nlibrary(magrittr)\nlibrary(dplyr)\n\nset.seed(1)\ntrain <- mlbench.circle(300, 2) %>%\n tibble::as_tibble()\ncolnames(train) <- c(\"X1\", \"X2\", \"Y\")\n\nmodel <- dann(Y ~ X1 + X2, train)\nprint(model)\n\n\n"} {"package":"dann","topic":"print.sub_dann","snippet":"### Name: print.sub_dann\n### Title: Print dann model.\n### Aliases: print.sub_dann\n\n### ** Examples\n\nlibrary(dann)\nlibrary(mlbench)\nlibrary(magrittr)\nlibrary(dplyr)\n\nset.seed(1)\ntrain <- mlbench.circle(300, 2) %>%\n tibble::as_tibble()\ncolnames(train) <- c(\"X1\", \"X2\", \"Y\")\n\nmodel <- sub_dann(Y ~ X1 + X2, train)\nprint(model)\n\n\n"} {"package":"dann","topic":"sub_dann.data.frame","snippet":"### Name: sub_dann.data.frame\n### Title: Discriminant Adaptive Nearest Neighbor With Subspace Reduction\n### Aliases: sub_dann.data.frame\n\n### ** Examples\n\nlibrary(dann)\nlibrary(mlbench)\nlibrary(magrittr)\nlibrary(dplyr)\n\nset.seed(1)\ntrain <- mlbench.circle(300, 2) %>%\n tibble::as_tibble()\ncolnames(train) <- c(\"X1\", \"X2\", \"Y\")\ny <- train$Y\nx <- train[, 1:2]\n\nsub_dann(x, y)\n\n\n"} {"package":"dann","topic":"sub_dann.formula","snippet":"### Name: sub_dann.formula\n### Title: Discriminant Adaptive Nearest Neighbor With Subspace Reduction\n### Aliases: sub_dann.formula\n\n### ** Examples\n\nlibrary(dann)\nlibrary(mlbench)\nlibrary(magrittr)\nlibrary(dplyr)\n\nset.seed(1)\ntrain <- mlbench.circle(300, 2) %>%\n tibble::as_tibble()\ncolnames(train) <- c(\"X1\", \"X2\", \"Y\")\n\nsub_dann(Y ~ X1 + X2, train)\n\n\n"} {"package":"dann","topic":"sub_dann.matrix","snippet":"### Name: sub_dann.matrix\n### Title: Discriminant Adaptive Nearest Neighbor With Subspace Reduction\n### Aliases: sub_dann.matrix\n\n### ** Examples\n\nlibrary(dann)\nlibrary(mlbench)\nlibrary(magrittr)\nlibrary(dplyr)\n\nset.seed(1)\ntrain <- mlbench.circle(300, 2) %>%\n tibble::as_tibble()\ncolnames(train) <- c(\"X1\", \"X2\", \"Y\")\ny <- as.numeric(train$Y)\nx <- cbind(train$X1, train$X2)\n\nsub_dann(x, y)\n\n\n"} {"package":"dann","topic":"sub_dann.recipe","snippet":"### Name: sub_dann.recipe\n### Title: Discriminant Adaptive Nearest Neighbor With Subspace Reduction\n### Aliases: sub_dann.recipe\n\n### ** Examples\n\nlibrary(dann)\nlibrary(mlbench)\nlibrary(magrittr)\nlibrary(dplyr)\nlibrary(recipes)\n\nset.seed(1)\ntrain <- mlbench.circle(300, 2) %>%\n tibble::as_tibble()\ncolnames(train) <- c(\"X1\", \"X2\", \"Y\")\n\nrec_obj <- recipe(Y ~ X1 + X2, data = train)\n\nsub_dann(rec_obj, train)\n\n\n"} {"package":"agricolaeplotr","topic":"DOE_obj","snippet":"### Name: DOE_obj\n### Title: Measures of a Field Design\n### Aliases: DOE_obj\n\n### ** Examples\n\nlibrary(agricolae)\nlibrary(agricolaeplotr)\ntrt = c(2,3,4,5,6)\noutdesign1 <-design.crd(trt,r=5,serie=2,2543,'Mersenne-Twister')\np <- plot_design_crd(outdesign1,\n ncols = 7,\n nrows = 4,\n width = 10,\n height = 10,\n reverse_y = TRUE)\nstats <- DOE_obj(p)\nstats\n\n\n"} {"package":"agricolaeplotr","topic":"citations","snippet":"### Name: citations\n### Title: Citation\n### Aliases: citations\n\n### ** Examples\n\nlibrary(ggplot2)\nlibrary(agricolaeplotr)\nlibrary(agricolae)\nlibrary(raster)\ncitations()\n\n\n"} {"package":"agricolaeplotr","topic":"full_control_positions","snippet":"### Name: full_control_positions\n### Title: full_control_positions\n### Aliases: full_control_positions\n\n### ** Examples\n\nlibrary(agricolaeplotr)\nlibrary(agricolae)\nvarieties<-c('perricholi','yungay','maria bonita','tomasa')\noutdesign <-design.youden(varieties,r=2,serie=2,seed=23)\ndesign <- outdesign$book\ndesign\np <- full_control_positions(design,\"col\",\"row\",\"varieties\",\"plots\",\n width=3,height=4.5,\n space_width=0.5,space_height=0.5,\n shift_x=(-0.5*3) + (-0.5*3*0.5),shift_y=-0.5*4.5 + (-0.5*4.5*0.5))\np\np <- full_control_positions(design,\"col\",\"row\",\"varieties\",\"plots\",\n width=3,height=4.5,\n space_width=0.13,space_height=0.445,\n shift_x=(-0.5*3) + (-0.5*3*(1-0.13)),shift_y=-0.5*4.5 + (-0.5*4.5*(1-0.445)))\n p\n\np <- full_control_positions(design,\"col\",\"row\",\"varieties\",\"plots\",\n width=3,height=4.5,\n space_width=1,space_height=1,\n shift_x=-0.5*3,shift_y=-0.5*4.5)\np\n\np <- full_control_positions(design,\"col\",\"row\",\"varieties\",\"plots\",\n width=3,height=4.5,\n space_width=0.93,space_height=0.945,\n start_origin = TRUE)\n p\n\n\n"} {"package":"agricolaeplotr","topic":"make_polygons","snippet":"### Name: make_polygons\n### Title: make_polygons\n### Aliases: make_polygons\n\n### ** Examples\n\nlibrary(agricolaeplotr)\nlibrary(agricolae)\ntrt = c(2,3,4)\noutdesign1 <-design.crd(trt,r=5,serie=2,2543,'Mersenne-Twister')\nplt <- plot_design_crd(outdesign1,ncols = 13,nrows = 3)\nspat_df <- make_polygons(plt)\nspat_df\n\n\n"} {"package":"agricolaeplotr","topic":"plot_alpha","snippet":"### Name: plot_alpha\n### Title: Plot Alpha design Experiments\n### Aliases: plot_alpha\n\n### ** Examples\n\nlibrary(agricolaeplotr)\nlibrary(agricolae)\ntrt<-1:30\nt <- length(trt)\n# size block k\nk<-3\n# Blocks s\ns<-t/k\n# replications r\nr <- 2\noutdesign<- design.alpha(trt,k,r,serie=2)\nplot_alpha(outdesign)\n\n\n"} {"package":"agricolaeplotr","topic":"plot_bib","snippet":"### Name: plot_bib\n### Title: Plot Randomized Balanced Incomplete Block Designs\n### Aliases: plot_bib\n\n### ** Examples\n\nlibrary(agricolaeplotr)\nlibrary(agricolae)\ntrt<-c('A','B','C','D')\nk<-3\noutdesign<-design.bib(trt,k,serie=2,seed =41,kinds ='Super-Duper') # seed = 41\nplot_bib(outdesign)\n#now let us change position of the columns\nplot_bib(outdesign,reverse_x = TRUE)\n\n\n"} {"package":"agricolaeplotr","topic":"plot_cyclic","snippet":"### Name: plot_cyclic\n### Title: Plot Cyclic Design\n### Aliases: plot_cyclic\n\n### ** Examples\n\nlibrary(agricolaeplotr)\nlibrary(agricolae)\nk <- 2\nr <- 6\ntrt <-c('CIP-101','CIP-201','CIP-301','CIP-401','CIP-501',LETTERS[1:2])\noutdesign<- design.cyclic(trt,k=k, r=r, serie=3, rowcol=TRUE)\nplot_cyclic(outdesign, factor_name = 'trt')\n\n\n"} {"package":"agricolaeplotr","topic":"plot_dau","snippet":"### Name: plot_dau\n### Title: Plot Design of Augmented Blocks (dau)\n### Aliases: plot_dau\n\n### ** Examples\n\nlibrary(agricolaeplotr)\nlibrary(agricolae)\nT1<-c('A','B','C','D','E','F')\nT2<-letters[19:26]\noutdesign <-design.dau(T1,T2, r=5,serie=2)\nplot_dau(outdesign)\nplot_dau(outdesign,reverse_y = TRUE)\n\n\n"} {"package":"agricolaeplotr","topic":"plot_design.factorial_crd","snippet":"### Name: plot_design.factorial_crd\n### Title: Plot Factorial Complete Randomized Designs (crd)\n### Aliases: plot_design.factorial_crd\n\n### ** Examples\n\nlibrary(agricolaeplotr)\nlibrary(agricolae)\ntrt<-c(3,2) # factorial 3x2\noutdesign <- design.ab(trt, r=3, serie=2,design = 'crd')\nplot_design.factorial_crd(outdesign,ncols = 8,nrows = 6)\nplot_design.factorial_crd(outdesign,reverse_y = TRUE,ncols = 8,nrows = 6)\nplot_design.factorial_crd(outdesign,reverse_y = TRUE,reverse_x = TRUE,ncols = 8,nrows = 6)\n\n\n"} {"package":"agricolaeplotr","topic":"plot_design.factorial_lsd","snippet":"### Name: plot_design.factorial_lsd\n### Title: Plot Factorial Latin Square Designs (lsd)\n### Aliases: plot_design.factorial_lsd\n\n### ** Examples\n\nlibrary(agricolaeplotr)\nlibrary(agricolae)\ntrt<-c(3,2) # factorial 3x2\noutdesign <-design.ab(trt, r=3, serie=2,design = 'lsd')\nplot_design.factorial_lsd(outdesign,factor_name = 'B',reverse_x = TRUE)\n\n\n\n"} {"package":"agricolaeplotr","topic":"plot_design.factorial_rcbd","snippet":"### Name: plot_design.factorial_rcbd\n### Title: Plot Factorial Designs with rcbd Design\n### Aliases: plot_design.factorial_rcbd\n\n### ** Examples\n\nlibrary(agricolaeplotr)\nlibrary(agricolae)\ntrt<-c(2,4)\nk=6\noutdesign<-design.ab(trt, r=k, serie=3,design='rcbd')\nplot_design.factorial_rcbd(design=outdesign,factor_name = 'B')\nplot_design.factorial_rcbd(outdesign,reverse_y = TRUE,reverse_x = TRUE)\n\n\n\n"} {"package":"agricolaeplotr","topic":"plot_design_crd","snippet":"### Name: plot_design_crd\n### Title: Plot Complete Randomized Design\n### Aliases: plot_design_crd\n\n### ** Examples\n\nlibrary(agricolaeplotr)\nlibrary(agricolae)\ntrt = c(2,3,4,5,6)\noutdesign1 <-design.crd(trt,r=5,serie=2,2543,'Mersenne-Twister')\nplot_design_crd(outdesign1,ncols = 13,nrows = 3)\n\n\n"} {"package":"agricolaeplotr","topic":"plot_fieldhub","snippet":"### Name: plot_fieldhub\n### Title: Plot FielDHub Design\n### Aliases: plot_fieldhub\n\n### ** Examples\n\n## Not run: \n##D library(agricolaeplotr)\n##D library(FielDHub)\n##D H <- paste(\"H\", 1:4, sep = \"\")\n##D V <- paste(\"V\", 1:5, sep = \"\")\n##D \n##D strip1 <- FielDHub::strip_plot(Hplots = H,\n##D Vplots = V,\n##D b = 1,\n##D l = 1,\n##D plotNumber = 101,\n##D planter = \"serpentine\",\n##D locationNames = \"A\",\n##D seed = 333)\n##D \n##D \n##D strip1$fieldBook$ROW <- as.numeric(ordered(strip1$fieldBook$VSTRIP,\n##D levels = unique(strip1$fieldBook$VSTRIP)))\n##D strip1$fieldBook$COLUMN <- as.numeric(ordered(strip1$fieldBook$HSTRIP,\n##D levels = unique(strip1$fieldBook$HSTRIP)))\n##D \n##D plot_fieldhub(strip1,\n##D x = \"ROW\",\n##D y = \"COLUMN\",\n##D labels = \"HSTRIP\",\n##D factor_name = \"HSTRIP\",\n##D width = 12,\n##D height = 10,\n##D reverse_y = FALSE,\n##D reverse_x = FALSE)\n##D \n## End(Not run)\n\n\n\n"} {"package":"agricolaeplotr","topic":"plot_graeco","snippet":"### Name: plot_graeco\n### Title: Plot Graeco Latin Square Design\n### Aliases: plot_graeco\n\n### ** Examples\n\nlibrary(agricolaeplotr)\nlibrary(agricolae)\nT1<-c('a','b','c','d')\nT2<-c('v','w','x','y','z','zz')\noutdesign <- design.graeco(trt1=T1, trt2=T2, serie = 2,\n seed = 0, kinds = 'Super-Duper',randomization=TRUE)\nplot_graeco(outdesign, factor_name = 'T2',reverse_y = TRUE)\nplot_graeco(outdesign, factor_name = 'T2',reverse_x = TRUE)\n\n\n"} {"package":"agricolaeplotr","topic":"plot_latin_square","snippet":"### Name: plot_latin_square\n### Title: Plot Latin Square Design\n### Aliases: plot_latin_square\n\n### ** Examples\n\nlibrary(agricolaeplotr)\nlibrary(agricolae)\ntrt<-LETTERS[1:9]\noutdesign<- design.lsd(trt,serie=2)\nplot_latin_square(outdesign, reverse_y = TRUE)\n\n\n"} {"package":"agricolaeplotr","topic":"plot_lattice_simple","snippet":"### Name: plot_lattice_simple\n### Title: Plot Simple Lattice Design\n### Aliases: plot_lattice_simple\n\n### ** Examples\n\nlibrary(agricolaeplotr)\nlibrary(agricolae)\ntrt<-1:100\noutdesign<-design.lattice(trt,r=2,serie=3) # simple lattice design, 10x10\nplot_lattice_simple(outdesign,width = 2, height = 1)\n\n\n"} {"package":"agricolaeplotr","topic":"plot_lattice_triple","snippet":"### Name: plot_lattice_triple\n### Title: Plot Triple Lattice Design\n### Aliases: plot_lattice_triple\n\n### ** Examples\n\nlibrary(agricolaeplotr)\nlibrary(agricolae)\ntrt<-LETTERS[1:9]\noutdesign<-design.lattice(trt,r=3,serie=2)\nplot_lattice_triple(design=outdesign,reverse_x=TRUE)\n\n\n\n"} {"package":"agricolaeplotr","topic":"plot_rcdb","snippet":"### Name: plot_rcdb\n### Title: Plot randomized complete block designs\n### Aliases: plot_rcdb\n\n### ** Examples\n\nlibrary(agricolaeplotr)\nlibrary(agricolae)\n# 5 treatments and 6 blocks\ntrt<-c('A','B','C','D','E')\noutdesign <-design.rcbd(trt,6,serie=2,986,'Wichmann-Hill') # seed = 986\nplot_rcdb(outdesign)\nplot_rcdb(outdesign,reverse_y = TRUE,reverse_x = TRUE)\n\n\n\n"} {"package":"agricolaeplotr","topic":"plot_split_crd","snippet":"### Name: plot_split_crd\n### Title: Plot Split Plot Designs (crd)\n### Aliases: plot_split_crd\n\n### ** Examples\n\nlibrary(agricolaeplotr)\nlibrary(agricolae)\nT1<-c('a','b','c','d','e','f','g')\nT2<-c('v','w','x','y','zzz')\nr <- 4\noutdesign2 <- design.split(trt1=T1, trt2=T2, r=r,\nserie = 2, seed = 0, kinds = 'Super-Duper',\nrandomization=TRUE,first=TRUE,design = 'crd')\nplot_split_crd(outdesign2,ncols = 6,nrows=5)\n\noutdesign2 <- design.split(trt1=T1, trt2=T2, r=r,\nserie = 2, seed = 0, kinds = 'Super-Duper',\nrandomization=FALSE,first=TRUE,design = 'crd')\nplot_split_crd(outdesign2,ncols = 6,nrows=5)\n\n\n"} {"package":"agricolaeplotr","topic":"plot_split_lsd","snippet":"### Name: plot_split_lsd\n### Title: Plot Split Plot Design lsd\n### Aliases: plot_split_lsd\n\n### ** Examples\n\nlibrary(agricolaeplotr)\nlibrary(agricolae)\nT1<-c('a','b','c','d','e')\nT2<-c('v','w','x','y')\noutdesign2 <- design.split(trt1=T1, trt2=T2, r=r,serie = 2,\n seed = 0, kinds = 'Super-Duper',\n randomization=TRUE,first=TRUE,design = 'lsd')\nplot_split_lsd(outdesign2,width = 4,height = 4)\n\n\n"} {"package":"agricolaeplotr","topic":"plot_split_rcbd","snippet":"### Name: plot_split_rcbd\n### Title: Plot Split Plot Designs with rcbd\n### Aliases: plot_split_rcbd\n\n### ** Examples\n\nlibrary(agricolaeplotr)\nlibrary(agricolae)\nT1<-c('a','b','c','d','e')\nT2<-c('v','w','x','y','z','zz')\nr = 3\noutdesign2 <- design.split(trt1=T1, trt2=T2, r=r,serie = 2,\n seed = 0, kinds = 'Super-Duper',randomization=TRUE,\n first=TRUE,design = 'rcbd')\nplot_split_rcbd(outdesign2,width = 1,height = 1)\nplot_split_rcbd(outdesign2,width = 1,height = 1,reverse_y = TRUE)\nplot_split_rcbd(outdesign2,width = 1,height = 1,reverse_x = TRUE,reverse_y = TRUE)\n\n\n"} {"package":"agricolaeplotr","topic":"plot_strip","snippet":"### Name: plot_strip\n### Title: Plot Strip Design\n### Aliases: plot_strip\n\n### ** Examples\n\nlibrary(agricolaeplotr)\nlibrary(agricolae)\nT1<-c('a','b','c','d')\nT2<-c('v','w','x','y','z')\nr = 3\noutdesign <- design.strip(trt1=T1, trt2=T2, r=r,serie = 2,\n seed = 0, kinds = 'Super-Duper',randomization=TRUE)\nplot_strip(outdesign,factor_name_1 = \"T1\",factor_name_2=\"T2\")\nplot_strip(outdesign,factor_name_1 = \"T1\",factor_name_2=\"T2\",reverse_x = TRUE)\n\n\n"} {"package":"agricolaeplotr","topic":"plot_youden","snippet":"### Name: plot_youden\n### Title: Plot Youden Design\n### Aliases: plot_youden\n\n### ** Examples\n\nlibrary(agricolaeplotr)\nlibrary(agricolae)\nvarieties<-c('perricholi','yungay','maria bonita','tomasa')\noutdesign <-design.youden(varieties,r=2,serie=2,seed=23)\nplot_youden(outdesign, labels = 'varieties')\n\n\n"} {"package":"agricolaeplotr","topic":"serpentine","snippet":"### Name: serpentine\n### Title: Serpentine\n### Aliases: serpentine\n\n### ** Examples\n\nserpentine(n=20,times = 15)\nserpentine(n=20,times = 15,m=4)\n\n\n"} {"package":"agricolaeplotr","topic":"summary","snippet":"### Name: summary\n### Title: summary of a field Layout\n### Aliases: summary\n\n### ** Examples\n\nlibrary(agricolaeplotr)\nlibrary(agricolae)\nvarieties<-c('perricholi','yungay','maria bonita','tomasa')\noutdesign <-design.youden(varieties,r=2,serie=2,seed=23)\np <- plot_youden(outdesign, labels = 'varieties')\nstats <- DOE_obj(p)\n# print plot summary for net plot (plots without space)\nsummary(stats, part = \"net_plot\")\n# print plot summary for gross plot (plots with space)\nsummary(stats, part = \"gross_plot\")\n# print plot summary for entire field\nsummary(stats, part = \"field\")\n# print plot summary for design summary\nsummary(stats, part = \"experiment\")\n# print plot summary for all information shown above in one output\nsummary(stats, part = \"all\")\n\n\n"} {"package":"agricolaeplotr","topic":"test_input_extend","snippet":"### Name: test_input_extend\n### Title: Test if input for width and height is numeric\n### Aliases: test_input_extend\n\n### ** Examples\n\nlibrary(agricolaeplotr)\ntest_input_extend(3)\n\n\n"} {"package":"agricolaeplotr","topic":"test_input_ncols","snippet":"### Name: test_input_ncols\n### Title: checks matrix column input\n### Aliases: test_input_ncols\n\n### ** Examples\n\nlibrary(agricolaeplotr)\ntest_input_ncols(9)\n\n\n"} {"package":"agricolaeplotr","topic":"test_input_nrows","snippet":"### Name: test_input_nrows\n### Title: checks matrix rows input\n### Aliases: test_input_nrows\n\n### ** Examples\n\nlibrary(agricolaeplotr)\ntest_input_nrows(10)\n\n\n"} {"package":"agricolaeplotr","topic":"test_input_reverse","snippet":"### Name: test_input_reverse\n### Title: Test if input is a logical\n### Aliases: test_input_reverse\n\n### ** Examples\n\nlibrary(agricolaeplotr)\ntest_input_reverse(TRUE)\n\n\n"} {"package":"agricolaeplotr","topic":"test_input_shift","snippet":"### Name: test_input_shift\n### Title: Test if input for shift parameter is numeric\n### Aliases: test_input_shift\n\n### ** Examples\n\nlibrary(agricolaeplotr)\ntest_input_shift(0.5)\n\n\n"} {"package":"agricolaeplotr","topic":"test_name_in_column","snippet":"### Name: test_name_in_column\n### Title: Test if input column names\n### Aliases: test_name_in_column\n\n### ** Examples\n\nlibrary(agricolaeplotr)\nlibrary(agricolae)\ntrt<-c(2,4)\nk=6\noutdesign<-design.ab(trt, r=k, serie=3,design='rcbd')\ntest_name_in_column('B',outdesign)\n\n\n"} {"package":"agricolaeplotr","topic":"test_names_design","snippet":"### Name: test_names_design\n### Title: Test of experimental design\n### Aliases: test_names_design\n\n### ** Examples\n\nlibrary(agricolaeplotr)\nlibrary(agricolae)\ntrt<-c(2,4)\nk=6\noutdesign<-design.ab(trt, r=k, serie=3,design='rcbd')\ntest_names_design(outdesign)\n\n\n"} {"package":"agricolaeplotr","topic":"test_string","snippet":"### Name: test_string\n### Title: Test if input is a string\n### Aliases: test_string\n\n### ** Examples\n\nlibrary(agricolaeplotr)\ntest_string('smallstring')\n\n\n"} {"package":"agricolaeplotr","topic":"theme_gi","snippet":"### Name: theme_gi\n### Title: theme_gi\n### Aliases: theme_gi\n\n### ** Examples\n\n# example borrowed from ggplot2\nlibrary(ggplot2)\ndf <- data.frame(\ngp = factor(rep(letters[1:3], each = 10)),\ny = rnorm(30))\n\np <- ggplot() +\ngeom_point(data = df, aes(gp, y))\np <- p + theme_gi();p\n\n\n"} {"package":"agricolaeplotr","topic":"theme_poster","snippet":"### Name: theme_poster\n### Title: ggplot2 theme for poster presentation\n### Aliases: theme_poster\n\n### ** Examples\n\nlibrary(agricolaeplotr)\nlibrary(agricolae)\nT1<-c('a','b','c','d','e','f','g')\nT2<-c('v','w','x','y','z')\nr <- 4\noutdesign2 <- design.split(trt1=T1, trt2=T2, r=r,\nserie = 2, seed = 0, kinds = 'Super-Duper',\nrandomization=FALSE,first=TRUE,design = 'crd')\nplot_split_crd(outdesign2,ncols = 6,nrows=5)+\ntheme_poster()\n\n\n"} {"package":"agricolaeplotr","topic":"theme_pres","snippet":"### Name: theme_pres\n### Title: ggplot2 theme for outdoor presentation\n### Aliases: theme_pres\n\n### ** Examples\n\nlibrary(agricolaeplotr)\nlibrary(agricolae)\nT1<-c('a','b','c','d','e','f','g')\nT2<-c('v','w','x','y','z')\nr <- 4\noutdesign2 <- design.split(trt1=T1, trt2=T2, r=r,\nserie = 2, seed = 0, kinds = 'Super-Duper',\nrandomization=FALSE,first=TRUE,design = 'crd')\nplot_split_crd(outdesign2,ncols = 6,nrows=5)+\ntheme_pres()\n\n\n"} {"package":"agricolaeplotr","topic":"to_table","snippet":"### Name: to_table\n### Title: to_table\n### Aliases: to_table\n\n### ** Examples\n\nlibrary(agricolaeplotr)\nlibrary(agricolae)\nvarieties<-c('perricholi','yungay','maria bonita','tomasa')\noutdesign <-design.youden(varieties,r=2,serie=2,seed=23)\np <- plot_youden(outdesign, labels = 'varieties', width=4, height=3)\nstats <- DOE_obj(p)\nr <- to_table(stats,part = \"net_plot\", digits = 2)\nr\nr <- to_table(stats,part = \"gross_plot\", digits = 2)\nr\nr <- to_table(stats,part = \"field\", digits = 2)\nr\nr <- to_table(stats,part = \"experiment\", digits = 2)\nr\nr <- to_table(stats,part = \"all\", digits = 2)\nr\n\n\n"} {"package":"rms","topic":"ExProb","snippet":"### Name: ExProb\n### Title: Function Generator For Exceedance Probabilities\n### Aliases: ExProb ExProb.orm plot.ExProb\n\n### ** Examples\n\nset.seed(1)\nx1 <- runif(200)\nyvar <- x1 + runif(200)\nf <- orm(yvar ~ x1)\nd <- ExProb(f)\nlp <- predict(f, newdata=data.frame(x1=c(.2,.8)))\nw <- d(lp)\ns1 <- abs(x1 - .2) < .1\ns2 <- abs(x1 - .8) < .1\nplot(w, data=data.frame(x1=c(rep(.2, sum(s1)), rep(.8, sum(s2))),\n yvar=c(yvar[s1], yvar[s2])))\n\nqu <- Quantile(f)\nabline(h=c(.1,.5), col='gray80')\nabline(v=qu(.5, lp), col='gray80')\nabline(v=qu(.9, lp), col='green')\n\n\n"} {"package":"rms","topic":"Function.rms","snippet":"### Name: Function\n### Title: Compose an S Function to Compute X beta from a Fit\n### Aliases: Function.rms Function.cph sascode perlcode\n### Keywords: regression methods interface models survival math\n\n### ** Examples\n\nsuppressWarnings(RNGversion(\"3.5.0\"))\nset.seed(1331)\nx1 <- exp(rnorm(100))\nx2 <- factor(sample(c('a','b'),100,rep=TRUE))\ndd <- datadist(x1, x2)\noptions(datadist='dd')\ny <- log(x1)^2+log(x1)*(x2=='b')+rnorm(100)/4\nf <- ols(y ~ pol(log(x1),2)*x2)\nf$coef\ng <- Function(f, digits=5)\ng\nsascode(g)\ncat(perlcode(g), '\\n')\ng()\ng(x1=c(2,3), x2='b') #could omit x2 since b is default category\npredict(f, expand.grid(x1=c(2,3),x2='b'))\ng8 <- Function(f) # default is 8 sig. digits\ng8(x1=c(2,3), x2='b')\noptions(datadist=NULL)\n\n\n## Not run: \n##D require(survival)\n##D # Make self-contained functions for computing survival probabilities\n##D # using a log-normal regression\n##D f <- psm(Surv(d.time, death) ~ rcs(age,4)*sex, dist='gaussian')\n##D g <- Function(f)\n##D surv <- Survival(f)\n##D # Compute 2 and 5-year survival estimates for 50 year old male\n##D surv(c(2,5), g(age=50, sex='male'))\n## End(Not run)\n\n\n"} {"package":"rms","topic":"Glm","snippet":"### Name: Glm\n### Title: rms Version of glm\n### Aliases: Glm\n### Keywords: models regression\n\n### ** Examples\n\n\n## Dobson (1990) Page 93: Randomized Controlled Trial :\ncounts <- c(18,17,15,20,10,20,25,13,12)\noutcome <- gl(3,1,9)\ntreatment <- gl(3,3)\nf <- glm(counts ~ outcome + treatment, family=poisson())\nf\nanova(f)\nsummary(f)\nf <- Glm(counts ~ outcome + treatment, family=poisson())\n# could have had rcs( ) etc. if there were continuous predictors\nf\nanova(f)\nsummary(f, outcome=c('1','2','3'), treatment=c('1','2','3'))\n\n\n\n"} {"package":"rms","topic":"Gls","snippet":"### Name: Gls\n### Title: Fit Linear Model Using Generalized Least Squares\n### Aliases: Gls print.Gls\n### Keywords: models\n\n### ** Examples\n\n## Not run: \n##D require(ggplot2)\n##D ns <- 20 # no. subjects\n##D nt <- 10 # no. time points/subject\n##D B <- 10 # no. bootstrap resamples\n##D # usually do 100 for variances, 1000 for nonparametric CLs\n##D rho <- .5 # AR(1) correlation parameter\n##D V <- matrix(0, nrow=nt, ncol=nt)\n##D V <- rho^abs(row(V)-col(V)) # per-subject correlation/covariance matrix\n##D \n##D d <- expand.grid(tim=1:nt, id=1:ns)\n##D d$trt <- factor(ifelse(d$id <= ns/2, 'a', 'b'))\n##D true.beta <- c(Intercept=0,tim=.1,'tim^2'=0,'trt=b'=1)\n##D d$ey <- true.beta['Intercept'] + true.beta['tim']*d$tim +\n##D true.beta['tim^2']*(d$tim^2) + true.beta['trt=b']*(d$trt=='b')\n##D set.seed(13)\n##D library(MASS) # needed for mvrnorm\n##D d$y <- d$ey + as.vector(t(mvrnorm(n=ns, mu=rep(0,nt), Sigma=V)))\n##D \n##D dd <- datadist(d); options(datadist='dd')\n##D f <- Gls(y ~ pol(tim,2) + trt, correlation=corCAR1(form= ~tim | id),\n##D data=d, B=B)\n##D f\n##D AIC(f)\n##D f$var # bootstrap variances\n##D f$varBeta # original variances\n##D summary(f)\n##D anova(f)\n##D ggplot(Predict(f, tim, trt))\n##D # v <- Variogram(f, form=~tim|id, data=d)\n##D nlme:::summary.gls(f)$tTable # print matrix of estimates etc.\n##D \n##D options(datadist=NULL)\n## End(Not run)\n\n\n"} {"package":"rms","topic":"LRupdate","snippet":"### Name: LRupdate\n### Title: LRupdate\n### Aliases: LRupdate\n\n### ** Examples\n\n## Not run: \n##D a <- aregImpute(~ y + x1 + x2, n.impute=30, data=d)\n##D f <- fit.mult.impute(y ~ x1 + x2, lrm, a, data=d, lrt=TRUE)\n##D a <- processMI(f, 'anova')\n##D f <- LRupdate(f, a)\n##D print(f, r2=1:4) # print all imputation-corrected R2 measures\n## End(Not run)\n\n\n"} {"package":"rms","topic":"Predict","snippet":"### Name: Predict\n### Title: Compute Predicted Values and Confidence Limits\n### Aliases: Predict print.Predict rbind.Predict\n### Keywords: models\n\n### ** Examples\n\nn <- 1000 # define sample size\nset.seed(17) # so can reproduce the results\nage <- rnorm(n, 50, 10)\nblood.pressure <- rnorm(n, 120, 15)\ncholesterol <- rnorm(n, 200, 25)\nsex <- factor(sample(c('female','male'), n,TRUE))\nlabel(age) <- 'Age' # label is in Hmisc\nlabel(cholesterol) <- 'Total Cholesterol'\nlabel(blood.pressure) <- 'Systolic Blood Pressure'\nlabel(sex) <- 'Sex'\nunits(cholesterol) <- 'mg/dl' # uses units.default in Hmisc\nunits(blood.pressure) <- 'mmHg'\n\n# Specify population model for log odds that Y=1\nL <- .4*(sex=='male') + .045*(age-50) +\n (log(cholesterol - 10)-5.2)*(-2*(sex=='female') + 2*(sex=='male'))\n# Simulate binary y to have Prob(y=1) = 1/[1+exp(-L)]\ny <- ifelse(runif(n) < plogis(L), 1, 0)\n\nddist <- datadist(age, blood.pressure, cholesterol, sex)\noptions(datadist='ddist')\n\nfit <- lrm(y ~ blood.pressure + sex * (age + rcs(cholesterol,4)))\nPredict(fit, age, cholesterol, np=4)\nPredict(fit, age=seq(20,80,by=10), sex, conf.int=FALSE)\nPredict(fit, age=seq(20,80,by=10), sex='male') # works if datadist not used\n# Get simultaneous confidence limits accounting for making 7 estimates\n# Predict(fit, age=seq(20,80,by=10), sex='male', conf.type='simult')\n# (this needs the multcomp package)\n\nddist$limits$age[2] <- 30 # make 30 the reference value for age\n# Could also do: ddist$limits[\"Adjust to\",\"age\"] <- 30\nfit <- update(fit) # make new reference value take effect\nPredict(fit, age, ref.zero=TRUE, fun=exp)\n\n# Make two curves, and plot the predicted curves as two trellis panels\nw <- Predict(fit, age, sex)\nrequire(lattice)\nxyplot(yhat ~ age | sex, data=w, type='l')\n# To add confidence bands we need to use the Hmisc xYplot function in\n# place of xyplot\nxYplot(Cbind(yhat,lower,upper) ~ age | sex, data=w, \n method='filled bands', type='l', col.fill=gray(.95))\n# If non-displayed variables were in the model, add a subtitle to show\n# their settings using title(sub=paste('Adjusted to',attr(w,'info')$adjust),adj=0)\n# Easier: feed w into plot.Predict, ggplot.Predict, plotp.Predict\n## Not run: \n##D # Predictions form a parametric survival model\n##D require(survival)\n##D n <- 1000\n##D set.seed(731)\n##D age <- 50 + 12*rnorm(n)\n##D label(age) <- \"Age\"\n##D sex <- factor(sample(c('Male','Female'), n, \n##D rep=TRUE, prob=c(.6, .4)))\n##D cens <- 15*runif(n)\n##D h <- .02*exp(.04*(age-50)+.8*(sex=='Female'))\n##D t <- -log(runif(n))/h\n##D label(t) <- 'Follow-up Time'\n##D e <- ifelse(t<=cens,1,0)\n##D t <- pmin(t, cens)\n##D units(t) <- \"Year\"\n##D ddist <- datadist(age, sex)\n##D Srv <- Surv(t,e)\n##D \n##D # Fit log-normal survival model and plot median survival time vs. age\n##D f <- psm(Srv ~ rcs(age), dist='lognormal')\n##D med <- Quantile(f) # Creates function to compute quantiles\n##D # (median by default)\n##D Predict(f, age, fun=function(x)med(lp=x))\n##D # Note: This works because med() expects the linear predictor (X*beta)\n##D # as an argument. Would not work if use \n##D # ref.zero=TRUE or adj.zero=TRUE.\n##D # Also, confidence intervals from this method are approximate since\n##D # they don't take into account estimation of scale parameter\n##D \n##D # Fit an ols model to log(y) and plot the relationship between x1\n##D # and the predicted mean(y) on the original scale without assuming\n##D # normality of residuals; use the smearing estimator. Before doing\n##D # that, show confidence intervals for mean and individual log(y),\n##D # and for the latter, also show bootstrap percentile nonparametric\n##D # pointwise confidence limits\n##D set.seed(1)\n##D x1 <- runif(300)\n##D x2 <- runif(300)\n##D ddist <- datadist(x1,x2); options(datadist='ddist')\n##D y <- exp(x1+ x2 - 1 + rnorm(300))\n##D f <- ols(log(y) ~ pol(x1,2) + x2, x=TRUE, y=TRUE) # x y for bootcov\n##D fb <- bootcov(f, B=100)\n##D pb <- Predict(fb, x1, x2=c(.25,.75))\n##D p1 <- Predict(f, x1, x2=c(.25,.75))\n##D p <- rbind(normal=p1, boot=pb)\n##D plot(p)\n##D \n##D p1 <- Predict(f, x1, conf.type='mean')\n##D p2 <- Predict(f, x1, conf.type='individual')\n##D p <- rbind(mean=p1, individual=p2)\n##D plot(p, label.curve=FALSE) # uses superposition\n##D plot(p, ~x1 | .set.) # 2 panels\n##D \n##D r <- resid(f)\n##D smean <- function(yhat)smearingEst(yhat, exp, res, statistic='mean')\n##D formals(smean) <- list(yhat=numeric(0), res=r[!is.na(r)])\n##D #smean$res <- r[!is.na(r)] # define default res argument to function\n##D Predict(f, x1, fun=smean)\n##D \n##D ## Example using offset\n##D g <- Glm(Y ~ offset(log(N)) + x1 + x2, family=poisson)\n##D Predict(g, offset=list(N=100))\n## End(Not run)\noptions(datadist=NULL)\n\n\n"} {"package":"rms","topic":"Rq","snippet":"### Name: Rq\n### Title: rms Package Interface to quantreg Package\n### Aliases: Rq RqFit print.Rq latex.Rq predict.Rq\n### Keywords: models nonparametric\n\n### ** Examples\n\n## Not run: \n##D set.seed(1)\n##D n <- 100\n##D x1 <- rnorm(n)\n##D y <- exp(x1 + rnorm(n)/4)\n##D dd <- datadist(x1); options(datadist='dd')\n##D fq2 <- Rq(y ~ pol(x1,2))\n##D anova(fq2)\n##D fq3 <- Rq(y ~ pol(x1,2), tau=.75)\n##D anova(fq3)\n##D pq2 <- Predict(fq2, x1)\n##D pq3 <- Predict(fq3, x1)\n##D p <- rbind(Median=pq2, Q3=pq3)\n##D plot(p, ~ x1 | .set.)\n##D # For superpositioning, with true curves superimposed\n##D a <- function(x, y, ...) {\n##D x <- unique(x)\n##D col <- trellis.par.get('superpose.line')$col\n##D llines(x, exp(x), col=col[1], lty=2)\n##D llines(x, exp(x + qnorm(.75)/4), col=col[2], lty=2)\n##D }\n##D plot(p, addpanel=a)\n## End(Not run)\n\n\n"} {"package":"rms","topic":"anova.rms","snippet":"### Name: anova.rms\n### Title: Analysis of Variance (Wald, LR, and F Statistics)\n### Aliases: anova.rms print.anova.rms plot.anova.rms latex.anova.rms\n### html.anova.rms\n### Keywords: models regression htest aplot\n\n### ** Examples\n\nrequire(ggplot2)\nn <- 1000 # define sample size\nset.seed(17) # so can reproduce the results\ntreat <- factor(sample(c('a','b','c'), n,TRUE))\nnum.diseases <- sample(0:4, n,TRUE)\nage <- rnorm(n, 50, 10)\ncholesterol <- rnorm(n, 200, 25)\nweight <- rnorm(n, 150, 20)\nsex <- factor(sample(c('female','male'), n,TRUE))\nlabel(age) <- 'Age' # label is in Hmisc\nlabel(num.diseases) <- 'Number of Comorbid Diseases'\nlabel(cholesterol) <- 'Total Cholesterol'\nlabel(weight) <- 'Weight, lbs.'\nlabel(sex) <- 'Sex'\nunits(cholesterol) <- 'mg/dl' # uses units.default in Hmisc\n\n\n# Specify population model for log odds that Y=1\nL <- .1*(num.diseases-2) + .045*(age-50) +\n (log(cholesterol - 10)-5.2)*(-2*(treat=='a') +\n 3.5*(treat=='b')+2*(treat=='c'))\n# Simulate binary y to have Prob(y=1) = 1/[1+exp(-L)]\ny <- ifelse(runif(n) < plogis(L), 1, 0)\n\n\nfit <- lrm(y ~ treat + scored(num.diseases) + rcs(age) +\n log(cholesterol+10) + treat:log(cholesterol+10),\n x=TRUE, y=TRUE) # x, y needed for test='LR'\na <- anova(fit) # Test all factors\nb <- anova(fit, treat, cholesterol) # Test these 2 by themselves\n # to get their pooled effects\na\nb\na2 <- anova(fit, test='LR')\nb2 <- anova(fit, treat, cholesterol, test='LR')\na2\nb2\n\n# Add a new line to the plot with combined effects\ns <- rbind(a2, 'treat+cholesterol'=b2['TOTAL',])\n\nclass(s) <- 'anova.rms'\nplot(s, margin=c('chisq', 'proportion chisq'))\n\ng <- lrm(y ~ treat*rcs(age))\ndd <- datadist(treat, num.diseases, age, cholesterol)\noptions(datadist='dd')\np <- Predict(g, age, treat=\"b\")\ns <- anova(g)\ntx <- paste(capture.output(s), collapse='\\n')\nggplot(p) + annotate('text', x=27, y=3.2, family='mono', label=tx,\n hjust=0, vjust=1, size=1.5)\n\nplot(s, margin=c('chisq', 'proportion chisq'))\n# new plot - dot chart of chisq-d.f. with 2 other stats in right margin\n# latex(s) # nice printout - creates anova.g.tex\noptions(datadist=NULL)\n\n\n# Simulate data with from a given model, and display exactly which\n# hypotheses are being tested\n\n\nset.seed(123)\nage <- rnorm(500, 50, 15)\ntreat <- factor(sample(c('a','b','c'), 500, TRUE))\nbp <- rnorm(500, 120, 10)\ny <- ifelse(treat=='a', (age-50)*.05, abs(age-50)*.08) + 3*(treat=='c') +\n pmax(bp, 100)*.09 + rnorm(500)\nf <- ols(y ~ treat*lsp(age,50) + rcs(bp,4))\nprint(names(coef(f)), quote=FALSE)\nspecs(f)\nanova(f)\nan <- anova(f)\noptions(digits=3)\nprint(an, 'subscripts')\nprint(an, 'dots')\n\n\nan <- anova(f, test='Chisq', ss=FALSE)\n# plot(0:1) # make some plot\n# tab <- pantext(an, 1.2, .6, lattice=FALSE, fontfamily='Helvetica')\n# create function to write table; usually omit fontfamily\n# tab() # execute it; could do tab(cex=.65)\nplot(an) # new plot - dot chart of chisq-d.f.\n# Specify plot(an, trans=sqrt) to use a square root scale for this plot\n# latex(an) # nice printout - creates anova.f.tex\n\n\n## Example to save partial R^2 for all predictors, along with overall\n## R^2, from two separate fits, and to combine them with ggplot2\n\nrequire(ggplot2)\nset.seed(1)\nn <- 100\nx1 <- runif(n)\nx2 <- runif(n)\ny <- (x1-.5)^2 + x2 + runif(n)\ngroup <- c(rep('a', n/2), rep('b', n/2))\nA <- NULL\nfor(g in c('a','b')) {\n f <- ols(y ~ pol(x1,2) + pol(x2,2) + pol(x1,2) %ia% pol(x2,2),\n subset=group==g)\n a <- plot(anova(f),\n what='partial R2', pl=FALSE, rm.totals=FALSE, sort='none')\n a <- a[-grep('NONLINEAR', names(a))]\n d <- data.frame(group=g, Variable=factor(names(a), names(a)),\n partialR2=unname(a))\n A <- rbind(A, d)\n }\nggplot(A, aes(x=partialR2, y=Variable)) + geom_point() +\n facet_wrap(~ group) + xlab(ex <- expression(partial~R^2)) +\n scale_y_discrete(limits=rev)\nggplot(A, aes(x=partialR2, y=Variable, color=group)) + geom_point() +\n xlab(ex <- expression(partial~R^2)) +\n scale_y_discrete(limits=rev)\n\n# Suppose that a researcher wants to make a big deal about a variable\n# because it has the highest adjusted chi-square. We use the\n# bootstrap to derive 0.95 confidence intervals for the ranks of all\n# the effects in the model. We use the plot method for anova, with\n# pl=FALSE to suppress actual plotting of chi-square - d.f. for each\n# bootstrap repetition.\n# It is important to tell plot.anova.rms not to sort the results, or\n# every bootstrap replication would have ranks of 1,2,3,... for the stats.\n\nn <- 300\nset.seed(1)\nd <- data.frame(x1=runif(n), x2=runif(n), x3=runif(n),\n x4=runif(n), x5=runif(n), x6=runif(n), x7=runif(n),\n x8=runif(n), x9=runif(n), x10=runif(n), x11=runif(n),\n x12=runif(n))\nd$y <- with(d, 1*x1 + 2*x2 + 3*x3 + 4*x4 + 5*x5 + 6*x6 +\n 7*x7 + 8*x8 + 9*x9 + 10*x10 + 11*x11 +\n 12*x12 + 9*rnorm(n))\n\nf <- ols(y ~ x1+x2+x3+x4+x5+x6+x7+x8+x9+x10+x11+x12, data=d)\nB <- 20 # actually use B=1000\nranks <- matrix(NA, nrow=B, ncol=12)\nrankvars <- function(fit)\n rank(plot(anova(fit), sort='none', pl=FALSE))\nRank <- rankvars(f)\nfor(i in 1:B) {\n j <- sample(1:n, n, TRUE)\n bootfit <- update(f, data=d, subset=j)\n ranks[i,] <- rankvars(bootfit)\n }\nlim <- t(apply(ranks, 2, quantile, probs=c(.025,.975)))\npredictor <- factor(names(Rank), names(Rank))\nw <- data.frame(predictor, Rank, lower=lim[,1], upper=lim[,2])\nggplot(w, aes(x=predictor, y=Rank)) + geom_point() + coord_flip() +\n scale_y_continuous(breaks=1:12) +\n geom_errorbar(aes(ymin=lim[,1], ymax=lim[,2]), width=0)\n\n\n"} {"package":"rms","topic":"bj","snippet":"### Name: bj\n### Title: Buckley-James Multiple Regression Model\n### Aliases: bj bj.fit residuals.bj print.bj validate.bj bjplot\n### Keywords: models survival\n\n### ** Examples\n\nrequire(survival)\nsuppressWarnings(RNGversion(\"3.5.0\"))\nset.seed(1)\nftime <- 10*rexp(200)\nstroke <- ifelse(ftime > 10, 0, 1)\nftime <- pmin(ftime, 10)\nunits(ftime) <- \"Month\"\nage <- rnorm(200, 70, 10)\nhospital <- factor(sample(c('a','b'),200,TRUE))\ndd <- datadist(age, hospital)\noptions(datadist=\"dd\")\n\n# Prior to rms 6.0 and R 4.0 the following worked with 5 knots\nf <- bj(Surv(ftime, stroke) ~ rcs(age,3) + hospital, x=TRUE, y=TRUE)\n# add link=\"identity\" to use a censored normal regression model instead\n# of a lognormal one\nanova(f)\nfastbw(f)\nvalidate(f, B=15)\nplot(Predict(f, age, hospital))\n# needs datadist since no explicit age,hosp.\ncoef(f) # look at regression coefficients\ncoef(psm(Surv(ftime, stroke) ~ rcs(age,3) + hospital, dist='lognormal'))\n # compare with coefficients from likelihood-based\n # log-normal regression model\n # use dist='gau' not under R \n\n\nr <- resid(f, 'censored.normalized')\nsurvplot(npsurv(r ~ 1), conf='none') \n # plot Kaplan-Meier estimate of \n # survival function of standardized residuals\nsurvplot(npsurv(r ~ cut2(age, g=2)), conf='none') \n # may desire both strata to be n(0,1)\noptions(datadist=NULL)\n\n\n"} {"package":"rms","topic":"bootBCa","snippet":"### Name: bootBCa\n### Title: BCa Bootstrap on Existing Bootstrap Replicates\n### Aliases: bootBCa\n### Keywords: bootstrap\n\n### ** Examples\n\n## Not run: \n##D x1 <- runif(100); x2 <- runif(100); y <- sample(0:1, 100, TRUE)\n##D f <- lrm(y ~ x1 + x2, x=TRUE, y=TRUE)\n##D seed <- .Random.seed\n##D b <- bootcov(f)\n##D # Get estimated log odds at x1=.4, x2=.6\n##D X <- cbind(c(1,1), x1=c(.4,2), x2=c(.6,3))\n##D est <- X ##D \n##D ests <- t(X ##D \n##D bootBCa(est, ests, n=100, seed=seed)\n##D bootBCa(est, ests, type='bca', n=100, seed=seed)\n##D bootBCa(est, ests, type='basic', n=100, seed=seed)\n## End(Not run)\n\n"} {"package":"rms","topic":"bootcov","snippet":"### Name: bootcov\n### Title: Bootstrap Covariance and Distribution for Regression\n### Coefficients\n### Aliases: bootcov bootplot bootplot.bootcov confplot confplot.bootcov\n### histdensity\n### Keywords: models regression htest methods hplot\n\n### ** Examples\n\nset.seed(191)\nx <- exp(rnorm(200))\nlogit <- 1 + x/2\ny <- ifelse(runif(200) <= plogis(logit), 1, 0)\nf <- lrm(y ~ pol(x,2), x=TRUE, y=TRUE)\ng <- bootcov(f, B=50, pr=TRUE, seed=3)\nanova(g) # using bootstrap covariance estimates\nfastbw(g) # using bootstrap covariance estimates\nbeta <- g$boot.Coef[,1]\nhist(beta, nclass=15) #look at normality of parameter estimates\nqqnorm(beta)\n# bootplot would be better than these last two commands\n\n\n# A dataset contains a variable number of observations per subject,\n# and all observations are laid out in separate rows. The responses\n# represent whether or not a given segment of the coronary arteries\n# is occluded. Segments of arteries may not operate independently\n# in the same patient. We assume a \"working independence model\" to\n# get estimates of the coefficients, i.e., that estimates assuming\n# independence are reasonably efficient. The job is then to get\n# unbiased estimates of variances and covariances of these estimates.\n\n\nset.seed(2)\nn.subjects <- 30\nages <- rnorm(n.subjects, 50, 15)\nsexes <- factor(sample(c('female','male'), n.subjects, TRUE))\nlogit <- (ages-50)/5\nprob <- plogis(logit) # true prob not related to sex\nid <- sample(1:n.subjects, 300, TRUE) # subjects sampled multiple times\ntable(table(id)) # frequencies of number of obs/subject\nage <- ages[id]\nsex <- sexes[id]\n# In truth, observations within subject are independent:\ny <- ifelse(runif(300) <= prob[id], 1, 0)\nf <- lrm(y ~ lsp(age,50)*sex, x=TRUE, y=TRUE)\ng <- bootcov(f, id, B=50, seed=3) # usually do B=200 or more\ndiag(g$var)/diag(f$var)\n# add ,group=w to re-sample from within each level of w\nanova(g) # cluster-adjusted Wald statistics\n# fastbw(g) # cluster-adjusted backward elimination\nplot(Predict(g, age=30:70, sex='female')) # cluster-adjusted confidence bands\n\n\n# Get design effects based on inflation of the variances when compared\n# with bootstrap estimates which ignore clustering\ng2 <- bootcov(f, B=50, seed=3)\ndiag(g$var)/diag(g2$var)\n\n\n# Get design effects based on pooled tests of factors in model\nanova(g2)[,1] / anova(g)[,1]\n\n\n# Simulate binary data where there is a strong \n# age x sex interaction with linear age effects \n# for both sexes, but where not knowing that\n# we fit a quadratic model. Use the bootstrap\n# to get bootstrap distributions of various\n# effects, and to get pointwise and simultaneous\n# confidence limits\n\n\nset.seed(71)\nn <- 500\nage <- rnorm(n, 50, 10)\nsex <- factor(sample(c('female','male'), n, rep=TRUE))\nL <- ifelse(sex=='male', 0, .1*(age-50))\ny <- ifelse(runif(n)<=plogis(L), 1, 0)\n\n\nf <- lrm(y ~ sex*pol(age,2), x=TRUE, y=TRUE)\nb <- bootcov(f, B=50, loglik=TRUE, pr=TRUE, seed=3) # better: B=500\n\n\npar(mfrow=c(2,3))\n# Assess normality of regression estimates\nbootplot(b, which=1:6, what='qq')\n# They appear somewhat non-normal\n\n\n# Plot histograms and estimated densities \n# for 6 coefficients\nw <- bootplot(b, which=1:6)\n# Print bootstrap quantiles\nw$quantiles\n\n# Show box plots for bootstrap reps for all coefficients\nbootplot(b, what='box')\n\n\n# Estimate regression function for females\n# for a sequence of ages\nages <- seq(25, 75, length=100)\nlabel(ages) <- 'Age'\n\n\n# Plot fitted function and pointwise normal-\n# theory confidence bands\npar(mfrow=c(1,1))\np <- Predict(f, age=ages, sex='female')\nplot(p)\n# Save curve coordinates for later automatic\n# labeling using labcurve in the Hmisc library\ncurves <- vector('list',8)\ncurves[[1]] <- with(p, list(x=age, y=lower))\ncurves[[2]] <- with(p, list(x=age, y=upper))\n\n\n# Add pointwise normal-distribution confidence \n# bands using unconditional variance-covariance\n# matrix from the 500 bootstrap reps\np <- Predict(b, age=ages, sex='female')\ncurves[[3]] <- with(p, list(x=age, y=lower))\ncurves[[4]] <- with(p, list(x=age, y=upper))\n\n\ndframe <- expand.grid(sex='female', age=ages)\nX <- predict(f, dframe, type='x') # Full design matrix\n\n\n# Add pointwise bootstrap nonparametric \n# confidence limits\np <- confplot(b, X=X, against=ages, method='pointwise',\n add=TRUE, lty.conf=4)\ncurves[[5]] <- list(x=ages, y=p$lower)\ncurves[[6]] <- list(x=ages, y=p$upper)\n\n\n# Add simultaneous bootstrap confidence band\np <- confplot(b, X=X, against=ages, add=TRUE, lty.conf=5)\ncurves[[7]] <- list(x=ages, y=p$lower)\ncurves[[8]] <- list(x=ages, y=p$upper)\nlab <- c('a','a','b','b','c','c','d','d')\nlabcurve(curves, lab, pl=TRUE)\n\n\n# Now get bootstrap simultaneous confidence set for\n# female:male odds ratios for a variety of ages\n\n\ndframe <- expand.grid(age=ages, sex=c('female','male'))\nX <- predict(f, dframe, type='x') # design matrix\nf.minus.m <- X[1:100,] - X[101:200,]\n# First 100 rows are for females. By subtracting\n# design matrices are able to get Xf*Beta - Xm*Beta\n# = (Xf - Xm)*Beta\n\n\nconfplot(b, X=f.minus.m, against=ages,\n method='pointwise', ylab='F:M Log Odds Ratio')\nconfplot(b, X=f.minus.m, against=ages,\n lty.conf=3, add=TRUE)\n\n\n# contrast.rms makes it easier to compute the design matrix for use\n# in bootstrapping contrasts:\n\n\nf.minus.m <- contrast(f, list(sex='female',age=ages),\n list(sex='male', age=ages))$X\nconfplot(b, X=f.minus.m)\n\n\n# For a quadratic binary logistic regression model use bootstrap\n# bumping to estimate coefficients under a monotonicity constraint\nset.seed(177)\nn <- 400\nx <- runif(n)\nlogit <- 3*(x^2-1)\ny <- rbinom(n, size=1, prob=plogis(logit))\nf <- lrm(y ~ pol(x,2), x=TRUE, y=TRUE)\nk <- coef(f)\nk\nvertex <- -k[2]/(2*k[3])\nvertex\n\n\n# Outside [0,1] so fit satisfies monotonicity constraint within\n# x in [0,1], i.e., original fit is the constrained MLE\n\n\ng <- bootcov(f, B=50, coef.reps=TRUE, loglik=TRUE, seed=3)\nbootcoef <- g$boot.Coef # 100x3 matrix\nvertex <- -bootcoef[,2]/(2*bootcoef[,3])\ntable(cut2(vertex, c(0,1)))\nmono <- !(vertex >= 0 & vertex <= 1)\nmean(mono) # estimate of Prob{monotonicity in [0,1]}\n\n\nvar(bootcoef) # var-cov matrix for unconstrained estimates\nvar(bootcoef[mono,]) # for constrained estimates\n\n\n# Find second-best vector of coefficient estimates, i.e., best\n# from among bootstrap estimates\ng$boot.Coef[order(g$boot.loglik[-length(g$boot.loglik)])[1],]\n# Note closeness to MLE\n\n## Not run: \n##D # Get the bootstrap distribution of the difference in two ROC areas for\n##D # two binary logistic models fitted on the same dataset. This analysis\n##D # does not adjust for the bias ROC area (C-index) due to overfitting.\n##D # The same random number seed is used in two runs to enforce pairing.\n##D \n##D set.seed(17)\n##D x1 <- rnorm(100)\n##D x2 <- rnorm(100)\n##D y <- sample(0:1, 100, TRUE)\n##D f <- lrm(y ~ x1, x=TRUE, y=TRUE)\n##D g <- lrm(y ~ x1 + x2, x=TRUE, y=TRUE)\n##D f <- bootcov(f, stat='C', seed=4)\n##D g <- bootcov(g, stat='C', seed=4)\n##D dif <- g$boot.stats - f$boot.stats\n##D hist(dif)\n##D quantile(dif, c(.025,.25,.5,.75,.975))\n##D # Compute a z-test statistic. Note that comparing ROC areas is far less\n##D # powerful than likelihood or Brier score-based methods\n##D z <- (g$stats['C'] - f$stats['C'])/sd(dif)\n##D names(z) <- NULL\n##D c(z=z, P=2*pnorm(-abs(z)))\n## End(Not run)\n\n\n"} {"package":"rms","topic":"bplot","snippet":"### Name: bplot\n### Title: 3-D Plots Showing Effects of Two Continuous Predictors in a\n### Regression Model Fit\n### Aliases: bplot perimeter\n### Keywords: models hplot htest\n\n### ** Examples\n\nn <- 1000 # define sample size\nset.seed(17) # so can reproduce the results\nage <- rnorm(n, 50, 10)\nblood.pressure <- rnorm(n, 120, 15)\ncholesterol <- rnorm(n, 200, 25)\nsex <- factor(sample(c('female','male'), n,TRUE))\nlabel(age) <- 'Age' # label is in Hmisc\nlabel(cholesterol) <- 'Total Cholesterol'\nlabel(blood.pressure) <- 'Systolic Blood Pressure'\nlabel(sex) <- 'Sex'\nunits(cholesterol) <- 'mg/dl' # uses units.default in Hmisc\nunits(blood.pressure) <- 'mmHg'\n\n# Specify population model for log odds that Y=1\nL <- .4*(sex=='male') + .045*(age-50) +\n (log(cholesterol - 10)-5.2)*(-2*(sex=='female') + 2*(sex=='male'))\n# Simulate binary y to have Prob(y=1) = 1/[1+exp(-L)]\ny <- ifelse(runif(n) < plogis(L), 1, 0)\n\nddist <- datadist(age, blood.pressure, cholesterol, sex)\noptions(datadist='ddist')\n\nfit <- lrm(y ~ blood.pressure + sex * (age + rcs(cholesterol,4)),\n x=TRUE, y=TRUE)\np <- Predict(fit, age, cholesterol, sex, np=50) # vary sex last\nrequire(lattice)\nbplot(p) # image plot for age, cholesterol with color\n # coming from yhat; use default ranges for\n # both continuous predictors; two panels (for sex)\nbplot(p, lfun=wireframe) # same as bplot(p,,wireframe)\n# View from different angle, change y label orientation accordingly\n# Default is z=40, x=-60\nbplot(p,, wireframe, screen=list(z=40, x=-75), ylabrot=-25)\nbplot(p,, contourplot) # contour plot\nbounds <- perimeter(age, cholesterol, lowess=TRUE)\nplot(age, cholesterol) # show bivariate data density and perimeter\nlines(bounds[,c('x','ymin')]); lines(bounds[,c('x','ymax')])\np <- Predict(fit, age, cholesterol) # use only one sex\nbplot(p, perim=bounds) # draws image() plot\n # don't show estimates where data are sparse\n # doesn't make sense here since vars don't interact\nbplot(p, plogis(yhat) ~ age*cholesterol) # Probability scale\noptions(datadist=NULL)\n\n\n"} {"package":"rms","topic":"calibrate","snippet":"### Name: calibrate\n### Title: Resampling Model Calibration\n### Aliases: calibrate calibrate.default calibrate.cph calibrate.psm\n### print.calibrate print.calibrate.default plot.calibrate\n### plot.calibrate.default\n### Keywords: methods models regression survival hplot\n\n### ** Examples\n\nrequire(survival)\nset.seed(1)\nn <- 200\nd.time <- rexp(n)\nx1 <- runif(n)\nx2 <- factor(sample(c('a', 'b', 'c'), n, TRUE))\nf <- cph(Surv(d.time) ~ pol(x1,2) * x2, x=TRUE, y=TRUE, surv=TRUE, time.inc=1.5)\n#or f <- psm(S ~ \\dots)\npa <- requireNamespace('polspline')\nif(pa) {\n cal <- calibrate(f, u=1.5, B=20) # cmethod='hare'\n plot(cal)\n}\ncal <- calibrate(f, u=1.5, cmethod='KM', m=50, B=20) # usually B=200 or 300\nplot(cal, add=pa)\n\nset.seed(1)\ny <- sample(0:2, n, TRUE)\nx1 <- runif(n)\nx2 <- runif(n)\nx3 <- runif(n)\nx4 <- runif(n)\nf <- lrm(y ~ x1 + x2 + x3 * x4, x=TRUE, y=TRUE)\ncal <- calibrate(f, kint=2, predy=seq(.2, .8, length=60), \n group=y)\n# group= does k-sample validation: make resamples have same \n# numbers of subjects in each level of y as original sample\n\nplot(cal)\n#See the example for the validate function for a method of validating\n#continuation ratio ordinal logistic models. You can do the same\n#thing for calibrate\n\n\n"} {"package":"rms","topic":"contrast","snippet":"### Name: contrast.rms\n### Title: General Contrasts of Regression Coefficients\n### Aliases: contrast contrast.rms print.contrast.rms\n### Keywords: htest models regression\n\n### ** Examples\n\nrequire(ggplot2)\nset.seed(1)\nage <- rnorm(200,40,12)\nsex <- factor(sample(c('female','male'),200,TRUE))\nlogit <- (sex=='male') + (age-40)/5\ny <- ifelse(runif(200) <= plogis(logit), 1, 0)\nf <- lrm(y ~ pol(age,2)*sex)\nanova(f)\n# Compare a 30 year old female to a 40 year old male\n# (with or without age x sex interaction in the model)\ncontrast(f, list(sex='female', age=30), list(sex='male', age=40))\n# Test for interaction between age and sex, duplicating anova\ncontrast(f, list(sex='female', age=30),\n list(sex='male', age=30),\n list(sex='female', age=c(40,50)),\n list(sex='male', age=c(40,50)), type='joint')\n# Duplicate overall sex effect in anova with 3 d.f.\ncontrast(f, list(sex='female', age=c(30,40,50)),\n list(sex='male', age=c(30,40,50)), type='joint')\n# For females get an array of odds ratios against age=40\nk <- contrast(f, list(sex='female', age=30:50),\n list(sex='female', age=40))\nprint(k, fun=exp)\n# Plot odds ratios with pointwise 0.95 confidence bands using log scale\nk <- as.data.frame(k[c('Contrast','Lower','Upper')])\nggplot(k, aes(x=30:50, y=exp(Contrast))) + geom_line() +\n geom_ribbon(aes(ymin=exp(Lower), ymax=exp(Upper)),\n alpha=0.15, linetype=0) +\n scale_y_continuous(trans='log10', n.breaks=10,\n minor_breaks=c(seq(0.1, 1, by=.1), seq(1, 10, by=.5))) +\n xlab('Age') + ylab('OR against age 40')\n\n# For a model containing two treatments, centers, and treatment\n# x center interaction, get 0.95 confidence intervals separately\n# by center\ncenter <- factor(sample(letters[1 : 8], 500, TRUE))\ntreat <- factor(sample(c('a','b'), 500, TRUE))\ny <- 8*(treat == 'b') + rnorm(500, 100, 20)\nf <- ols(y ~ treat*center)\n\n\nlc <- levels(center)\ncontrast(f, list(treat='b', center=lc),\n list(treat='a', center=lc))\n\n\n# Get 'Type III' contrast: average b - a treatment effect over\n# centers, weighting centers equally (which is almost always\n# an unreasonable thing to do)\ncontrast(f, list(treat='b', center=lc),\n list(treat='a', center=lc),\n type='average')\n\n\n# Get 'Type II' contrast, weighting centers by the number of\n# subjects per center. Print the design contrast matrix used.\nk <- contrast(f, list(treat='b', center=lc),\n list(treat='a', center=lc),\n type='average', weights=table(center))\nprint(k, X=TRUE)\n# Note: If other variables had interacted with either treat \n# or center, we may want to list settings for these variables\n# inside the list()'s, so as to not use default settings\n\n\n# For a 4-treatment study, get all comparisons with treatment 'a'\ntreat <- factor(sample(c('a','b','c','d'), 500, TRUE))\ny <- 8*(treat == 'b') + rnorm(500, 100, 20)\ndd <- datadist(treat, center); options(datadist='dd')\nf <- ols(y ~ treat*center)\nlt <- levels(treat)\ncontrast(f, list(treat=lt[-1]),\n list(treat=lt[ 1]),\n cnames=paste(lt[-1], lt[1], sep=':'), conf.int=1 - .05 / 3)\n\n\n# Compare each treatment with average of all others\nfor(i in 1 : length(lt)) {\n cat('Comparing with', lt[i], '\\n\\n')\n print(contrast(f, list(treat=lt[-i]),\n list(treat=lt[ i]), type='average'))\n}\noptions(datadist=NULL)\n\n# Six ways to get the same thing, for a variable that\n# appears linearly in a model and does not interact with\n# any other variables. We estimate the change in y per\n# unit change in a predictor x1. Methods 4, 5 also\n# provide confidence limits. Method 6 computes nonparametric\n# bootstrap confidence limits. Methods 2-6 can work\n# for models that are nonlinear or non-additive in x1.\n# For that case more care is needed in choice of settings\n# for x1 and the variables that interact with x1.\n\n\n## Not run: \n##D coef(fit)['x1'] # method 1\n##D diff(predict(fit, gendata(x1=c(0,1)))) # method 2\n##D g <- Function(fit) # method 3\n##D g(x1=1) - g(x1=0)\n##D summary(fit, x1=c(0,1)) # method 4\n##D k <- contrast(fit, list(x1=1), list(x1=0)) # method 5\n##D print(k, X=TRUE)\n##D fit <- update(fit, x=TRUE, y=TRUE) # method 6\n##D b <- bootcov(fit, B=500)\n##D contrast(fit, list(x1=1), list(x1=0))\n##D \n##D \n##D # In a model containing age, race, and sex,\n##D # compute an estimate of the mean response for a\n##D # 50 year old male, averaged over the races using\n##D # observed frequencies for the races as weights\n##D \n##D \n##D f <- ols(y ~ age + race + sex)\n##D contrast(f, list(age=50, sex='male', race=levels(race)),\n##D type='average', weights=table(race))\n##D \n##D # For a Bayesian model get the highest posterior interval for the\n##D # difference in two nonlinear functions of predicted values\n##D # Start with the mean from a proportional odds model\n##D g <- blrm(y ~ x)\n##D M <- Mean(g)\n##D contrast(g, list(x=1), list(x=0), fun=M)\n##D \n##D # For the median we have to make sure that contrast can pass the\n##D # per-posterior-draw vector of intercepts through\n##D qu <- Quantile(g)\n##D med <- function(lp, intercepts) qu(0.5, lp, intercepts=intercepts)\n##D contrast(g, list(x=1), list(x=0), fun=med)\n## End(Not run)\n\n\n# Plot the treatment effect (drug - placebo) as a function of age\n# and sex in a model in which age nonlinearly interacts with treatment\n# for females only\n\nset.seed(1)\nn <- 800\ntreat <- factor(sample(c('drug','placebo'), n,TRUE))\nsex <- factor(sample(c('female','male'), n,TRUE))\nage <- rnorm(n, 50, 10)\ny <- .05*age + (sex=='female')*(treat=='drug')*.05*abs(age-50) + rnorm(n)\nf <- ols(y ~ rcs(age,4)*treat*sex)\nd <- datadist(age, treat, sex); options(datadist='d')\n\n# show separate estimates by treatment and sex\n\nrequire(ggplot2)\nggplot(Predict(f, age, treat, sex='female'))\nggplot(Predict(f, age, treat, sex='male'))\nages <- seq(35,65,by=5); sexes <- c('female','male')\nw <- contrast(f, list(treat='drug', age=ages, sex=sexes),\n list(treat='placebo', age=ages, sex=sexes))\n# add conf.type=\"simultaneous\" to adjust for having done 14 contrasts\nxYplot(Cbind(Contrast, Lower, Upper) ~ age | sex, data=w,\n ylab='Drug - Placebo')\nw <- as.data.frame(w[c('age','sex','Contrast','Lower','Upper')])\nggplot(w, aes(x=age, y=Contrast)) + geom_point() + facet_grid(sex ~ .) +\n geom_errorbar(aes(ymin=Lower, ymax=Upper), width=0)\nggplot(w, aes(x=age, y=Contrast)) + geom_line() + facet_grid(sex ~ .) +\n geom_ribbon(aes(ymin=Lower, ymax=Upper), width=0, alpha=0.15, linetype=0)\nxYplot(Cbind(Contrast, Lower, Upper) ~ age, groups=sex, data=w,\n ylab='Drug - Placebo', method='alt bars')\noptions(datadist=NULL)\n\n\n# Examples of type='joint' contrast tests\n\nset.seed(1)\nx1 <- rnorm(100)\nx2 <- factor(sample(c('a','b','c'), 100, TRUE))\ndd <- datadist(x1, x2); options(datadist='dd')\ny <- x1 + (x2=='b') + rnorm(100)\n\n# First replicate a test statistic from anova()\n\nf <- ols(y ~ x2)\nanova(f)\ncontrast(f, list(x2=c('b','c')), list(x2='a'), type='joint')\n\n# Repeat with a redundancy; compare a vs b, a vs c, b vs c\n\ncontrast(f, list(x2=c('a','a','b')), list(x2=c('b','c','c')), type='joint')\n\n# Get a test of association of a continuous predictor with y\n# First assume linearity, then cubic\n\nf <- lrm(y>0 ~ x1 + x2)\nanova(f)\ncontrast(f, list(x1=1), list(x1=0), type='joint') # a minimum set of contrasts\nxs <- seq(-2, 2, length=20)\ncontrast(f, list(x1=0), list(x1=xs), type='joint')\n\n# All contrasts were redundant except for the first, because of\n# linearity assumption\n\nf <- lrm(y>0 ~ pol(x1,3) + x2)\nanova(f)\ncontrast(f, list(x1=0), list(x1=xs), type='joint')\nprint(contrast(f, list(x1=0), list(x1=xs), type='joint'), jointonly=TRUE)\n\n# All contrasts were redundant except for the first 3, because of\n# cubic regression assumption\n\n# Now do something that is difficult to do without cryptic contrast\n# matrix operations: Allow each of the three x2 groups to have a different\n# shape for the x1 effect where x1 is quadratic. Test whether there is\n# a difference in mean levels of y for x2='b' vs. 'c' or whether\n# the shape or slope of x1 is different between x2='b' and x2='c' regardless\n# of how they differ when x2='a'. In other words, test whether the mean\n# response differs between group b and c at any value of x1.\n# This is a 3 d.f. test (intercept, linear, quadratic effects) and is\n# a better approach than subsetting the data to remove x2='a' then\n# fitting a simpler model, as it uses a better estimate of sigma from\n# all the data.\n\nf <- ols(y ~ pol(x1,2) * x2)\nanova(f)\ncontrast(f, list(x1=xs, x2='b'),\n list(x1=xs, x2='c'), type='joint')\n\n# Note: If using a spline fit, there should be at least one value of\n# x1 between any two knots and beyond the outer knots.\noptions(datadist=NULL)\n\n\n"} {"package":"rms","topic":"cph","snippet":"### Name: cph\n### Title: Cox Proportional Hazards Model and Extensions\n### Aliases: cph Survival.cph Quantile.cph Mean.cph\n### Keywords: survival models nonparametric\n\n### ** Examples\n\n# Simulate data from a population model in which the log hazard\n# function is linear in age and there is no age x sex interaction\n\nrequire(survival)\nrequire(ggplot2)\nn <- 1000\nset.seed(731)\nage <- 50 + 12*rnorm(n)\nlabel(age) <- \"Age\"\nsex <- factor(sample(c('Male','Female'), n, \n rep=TRUE, prob=c(.6, .4)))\ncens <- 15*runif(n)\nh <- .02*exp(.04*(age-50)+.8*(sex=='Female'))\ndt <- -log(runif(n))/h\nlabel(dt) <- 'Follow-up Time'\ne <- ifelse(dt <= cens,1,0)\ndt <- pmin(dt, cens)\nunits(dt) <- \"Year\"\ndd <- datadist(age, sex)\noptions(datadist='dd')\nS <- Surv(dt,e)\n\nf <- cph(S ~ rcs(age,4) + sex, x=TRUE, y=TRUE)\ncox.zph(f, \"rank\") # tests of PH\nanova(f)\nggplot(Predict(f, age, sex)) # plot age effect, 2 curves for 2 sexes\nsurvplot(f, sex) # time on x-axis, curves for x2\nres <- resid(f, \"scaledsch\")\ntime <- as.numeric(dimnames(res)[[1]])\nz <- loess(res[,4] ~ time, span=0.50) # residuals for sex\nplot(time, fitted(z))\nlines(supsmu(time, res[,4]),lty=2)\nplot(cox.zph(f,\"identity\")) #Easier approach for last few lines\n# latex(f)\n\n\nf <- cph(S ~ age + strat(sex), surv=TRUE)\ng <- Survival(f) # g is a function\ng(seq(.1,1,by=.1), stratum=\"sex=Male\", type=\"poly\") #could use stratum=2\nmed <- Quantile(f)\nplot(Predict(f, age, fun=function(x) med(lp=x))) #plot median survival\n\n# Fit a model that is quadratic in age, interacting with sex as strata\n# Compare standard errors of linear predictor values with those from\n# coxph\n# Use more stringent convergence criteria to match with coxph\n\nf <- cph(S ~ pol(age,2)*strat(sex), x=TRUE, eps=1e-9, iter.max=20)\ncoef(f)\nse <- predict(f, se.fit=TRUE)$se.fit\nrequire(lattice)\nxyplot(se ~ age | sex, main='From cph')\na <- c(30,50,70)\ncomb <- data.frame(age=rep(a, each=2),\n sex=rep(levels(sex), 3))\n\np <- predict(f, comb, se.fit=TRUE)\ncomb$yhat <- p$linear.predictors\ncomb$se <- p$se.fit\nz <- qnorm(.975)\ncomb$lower <- p$linear.predictors - z*p$se.fit\ncomb$upper <- p$linear.predictors + z*p$se.fit\ncomb\n\nage2 <- age^2\nf2 <- coxph(S ~ (age + age2)*strata(sex))\ncoef(f2)\nse <- predict(f2, se.fit=TRUE)$se.fit\nxyplot(se ~ age | sex, main='From coxph')\ncomb <- data.frame(age=rep(a, each=2), age2=rep(a, each=2)^2,\n sex=rep(levels(sex), 3))\np <- predict(f2, newdata=comb, se.fit=TRUE)\ncomb$yhat <- p$fit\ncomb$se <- p$se.fit\ncomb$lower <- p$fit - z*p$se.fit\ncomb$upper <- p$fit + z*p$se.fit\ncomb\n\n\n# g <- cph(Surv(hospital.charges) ~ age, surv=TRUE)\n# Cox model very useful for analyzing highly skewed data, censored or not\n# m <- Mean(g)\n# m(0) # Predicted mean charge for reference age\n\n\n#Fit a time-dependent covariable representing the instantaneous effect\n#of an intervening non-fatal event\nrm(age)\nset.seed(121)\ndframe <- data.frame(failure.time=1:10, event=rep(0:1,5),\n ie.time=c(NA,1.5,2.5,NA,3,4,NA,5,5,5), \n age=sample(40:80,10,rep=TRUE))\nz <- ie.setup(dframe$failure.time, dframe$event, dframe$ie.time)\nS <- z$S\nie.status <- z$ie.status\nattach(dframe[z$subs,]) # replicates all variables\n\nf <- cph(S ~ age + ie.status, x=TRUE, y=TRUE) \n#Must use x=TRUE,y=TRUE to get survival curves with time-dep. covariables\n\n\n#Get estimated survival curve for a 50-year old who has an intervening\n#non-fatal event at 5 days\nnew <- data.frame(S=Surv(c(0,5), c(5,999), c(FALSE,FALSE)), age=rep(50,2),\n ie.status=c(0,1))\ng <- survfit(f, new)\nplot(c(0,g$time), c(1,g$surv[,2]), type='s', \n xlab='Days', ylab='Survival Prob.')\n# Not certain about what columns represent in g$surv for survival5\n# but appears to be for different ie.status\n#or:\n#g <- survest(f, new)\n#plot(g$time, g$surv, type='s', xlab='Days', ylab='Survival Prob.')\n\n\n#Compare with estimates when there is no intervening event\nnew2 <- data.frame(S=Surv(c(0,5), c(5, 999), c(FALSE,FALSE)), age=rep(50,2),\n ie.status=c(0,0))\ng2 <- survfit(f, new2)\nlines(c(0,g2$time), c(1,g2$surv[,2]), type='s', lty=2)\n#or:\n#g2 <- survest(f, new2)\n#lines(g2$time, g2$surv, type='s', lty=2)\ndetach(\"dframe[z$subs, ]\")\noptions(datadist=NULL)\n\n\n"} {"package":"rms","topic":"cr.setup","snippet":"### Name: cr.setup\n### Title: Continuation Ratio Ordinal Logistic Setup\n### Aliases: cr.setup\n### Keywords: category models regression\n\n### ** Examples\n\ny <- c(NA, 10, 21, 32, 32)\ncr.setup(y)\n\n\nset.seed(171)\ny <- sample(0:2, 100, rep=TRUE)\nsex <- sample(c(\"f\",\"m\"),100,rep=TRUE)\nsex <- factor(sex)\ntable(sex, y)\noptions(digits=5)\ntapply(y==0, sex, mean)\ntapply(y==1, sex, mean)\ntapply(y==2, sex, mean)\ncohort <- y>=1\ntapply(y[cohort]==1, sex[cohort], mean)\n\nu <- cr.setup(y)\nY <- u$y\ncohort <- u$cohort\nsex <- sex[u$subs]\n\nlrm(Y ~ cohort + sex)\n \nf <- lrm(Y ~ cohort*sex) # saturated model - has to fit all data cells\nf\n\n#Prob(y=0|female):\n# plogis(-.50078)\n#Prob(y=0|male):\n# plogis(-.50078+.11301)\n#Prob(y=1|y>=1, female):\nplogis(-.50078+.31845)\n#Prob(y=1|y>=1, male):\nplogis(-.50078+.31845+.11301-.07379)\n\ncombinations <- expand.grid(cohort=levels(cohort), sex=levels(sex))\ncombinations\np <- predict(f, combinations, type=\"fitted\")\np\np0 <- p[c(1,3)]\np1 <- p[c(2,4)]\np1.unconditional <- (1 - p0) *p1\np1.unconditional\np2.unconditional <- 1 - p0 - p1.unconditional\np2.unconditional\n\n\n## Not run: \n##D dd <- datadist(inputdata) # do this on non-replicated data\n##D options(datadist='dd')\n##D pain.severity <- inputdata$pain.severity\n##D u <- cr.setup(pain.severity)\n##D # inputdata frame has age, sex with pain.severity\n##D attach(inputdata[u$subs,]) # replicate age, sex\n##D # If age, sex already available, could do age <- age[u$subs] etc., or\n##D # age <- rep(age, u$reps), etc.\n##D y <- u$y\n##D cohort <- u$cohort\n##D dd <- datadist(dd, cohort) # add to dd\n##D f <- lrm(y ~ cohort + age*sex) # ordinary cont. ratio model\n##D g <- lrm(y ~ cohort*sex + age, x=TRUE,y=TRUE) # allow unequal slopes for\n##D # sex across cutoffs\n##D cal <- calibrate(g, cluster=u$subs, subset=cohort=='all') \n##D # subs makes bootstrap sample the correct units, subset causes\n##D # Predicted Prob(pain.severity=0) to be checked for calibration\n## End(Not run)\n\n\n"} {"package":"rms","topic":"datadist","snippet":"### Name: datadist\n### Title: Distribution Summaries for Predictor Variables\n### Aliases: datadist print.datadist\n### Keywords: models nonparametric regression\n\n### ** Examples\n\n## Not run: \n##D d <- datadist(data=1) # use all variables in search pos. 1\n##D d <- datadist(x1, x2, x3)\n##D page(d) # if your options(pager) leaves up a pop-up\n##D # window, this is a useful guide in analyses\n##D d <- datadist(data=2) # all variables in search pos. 2\n##D d <- datadist(data=my.data.frame)\n##D d <- datadist(my.data.frame) # same as previous. Run for all potential vars.\n##D d <- datadist(x2, x3, data=my.data.frame) # combine variables\n##D d <- datadist(x2, x3, q.effect=c(.1,.9), q.display=c(0,1))\n##D # uses inter-decile range odds ratios,\n##D # total range of variables for regression function plots\n##D d <- datadist(d, z) # add a new variable to an existing datadist\n##D options(datadist=\"d\") #often a good idea, to store info with fit\n##D f <- ols(y ~ x1*x2*x3)\n##D \n##D \n##D options(datadist=NULL) #default at start of session\n##D f <- ols(y ~ x1*x2)\n##D d <- datadist(f) #info not stored in `f'\n##D d$limits[\"Adjust to\",\"x1\"] <- .5 #reset adjustment level to .5\n##D options(datadist=\"d\")\n##D \n##D \n##D f <- lrm(y ~ x1*x2, data=mydata)\n##D d <- datadist(f, data=mydata)\n##D options(datadist=\"d\")\n##D \n##D \n##D f <- lrm(y ~ x1*x2) #datadist not used - specify all values for\n##D summary(f, x1=c(200,500,800), x2=c(1,3,5)) # obtaining predictions\n##D plot(Predict(f, x1=200:800, x2=3)) # or ggplot()\n##D \n##D \n##D # Change reference value to get a relative odds plot for a logistic model\n##D d$limits$age[2] <- 30 # make 30 the reference value for age\n##D # Could also do: d$limits[\"Adjust to\",\"age\"] <- 30\n##D fit <- update(fit) # make new reference value take effect\n##D plot(Predict(fit, age, ref.zero=TRUE, fun=exp),\n##D ylab='Age=x:Age=30 Odds Ratio') # or ggplot()\n## End(Not run)\n\n\n"} {"package":"rms","topic":"fastbw","snippet":"### Name: fastbw\n### Title: Fast Backward Variable Selection\n### Aliases: fastbw print.fastbw\n### Keywords: models regression htest\n\n### ** Examples\n\n## Not run: \n##D fastbw(fit, optional.arguments) # print results\n##D z <- fastbw(fit, optional.args) # typically used in simulations\n##D lm.fit(X[,z$parms.kept], Y) # least squares fit of reduced model\n## End(Not run)\n\n\n"} {"package":"rms","topic":"gIndex","snippet":"### Name: gIndex\n### Title: Calculate Total and Partial g-indexes for an rms Fit\n### Aliases: gIndex print.gIndex plot.gIndex\n### Keywords: predictive accuracy robust univar\n\n### ** Examples\n\nset.seed(1)\nn <- 40\nx <- 1:n\nw <- factor(sample(c('a','b'), n, TRUE))\nu <- factor(sample(c('A','B'), n, TRUE))\ny <- .01*x + .2*(w=='b') + .3*(u=='B') + .2*(w=='b' & u=='B') + rnorm(n)/5\ndd <- datadist(x,w,u); options(datadist='dd')\nf <- ols(y ~ x*w*u, x=TRUE, y=TRUE)\nf\nanova(f)\nz <- list()\nfor(type in c('terms','cterms','ccterms'))\n {\n zc <- predict(f, type=type)\n cat('type:', type, '\\n')\n print(zc)\n z[[type]] <- zc\n }\n\nzc <- z$cterms\nGiniMd(zc[, 1])\nGiniMd(zc[, 2])\nGiniMd(zc[, 3])\nGiniMd(f$linear.predictors)\ng <- gIndex(f)\ng\ng['Total',]\ngIndex(f, partials=FALSE)\ngIndex(f, type='cterms')\ngIndex(f, type='terms')\n\ny <- y > .8\nf <- lrm(y ~ x * w * u, x=TRUE, y=TRUE)\ngIndex(f, fun=plogis, funlabel='Prob[y=1]')\n\n# Manual calculation of combined main effect + interaction effort of\n# sex in a 2x2 design with treatments A B, sexes F M,\n# model -.1 + .3*(treat=='B') + .5*(sex=='M') + .4*(treat=='B' & sex=='M')\n\nset.seed(1)\nX <- expand.grid(treat=c('A','B'), sex=c('F', 'M'))\na <- 3; b <- 7; c <- 13; d <- 5\nX <- rbind(X[rep(1, a),], X[rep(2, b),], X[rep(3, c),], X[rep(4, d),])\ny <- with(X, -.1 + .3*(treat=='B') + .5*(sex=='M') + .4*(treat=='B' & sex=='M')) \nf <- ols(y ~ treat*sex, data=X, x=TRUE)\ngIndex(f, type='cterms')\nk <- coef(f)\nb1 <- k[2]; b2 <- k[3]; b3 <- k[4]\nn <- nrow(X)\n( (a+b)*c*abs(b2) + (a+b)*d*abs(b2+b3) + c*d*abs(b3))/(n*(n-1)/2 )\n\n# Manual calculation for combined age effect in a model with sex,\n# age, and age*sex interaction\n\na <- 13; b <- 7\nsex <- c(rep('female',a), rep('male',b))\nagef <- round(runif(a, 20, 30))\nagem <- round(runif(b, 20, 40))\nage <- c(agef, agem)\ny <- (sex=='male') + age/10 - (sex=='male')*age/20\nf <- ols(y ~ sex*age, x=TRUE)\nf\ngIndex(f, type='cterms')\nk <- coef(f)\nb1 <- k[2]; b2 <- k[3]; b3 <- k[4]\nn <- a + b\nsp <- function(w, z=w) sum(outer(w, z, function(u, v) abs(u-v)))\n\n( abs(b2)*sp(agef) + abs(b2+b3)*sp(agem) + 2*sp(b2*agef, (b2+b3)*agem) ) / (n*(n-1))\n\n( abs(b2)*GiniMd(agef)*a*(a-1) + abs(b2+b3)*GiniMd(agem)*b*(b-1) +\n 2*sp(b2*agef, (b2+b3)*agem) ) / (n*(n-1))\n\n## Not run: \n##D # Compare partial and total g-indexes over many random fits\n##D plot(NA, NA, xlim=c(0,3), ylim=c(0,3), xlab='Global',\n##D ylab='x1 (black) x2 (red) x3 (green) x4 (blue)')\n##D abline(a=0, b=1, col=gray(.9))\n##D big <- integer(3)\n##D n <- 50 # try with n=7 - see lots of exceptions esp. for interacting var\n##D for(i in 1:100) {\n##D x1 <- runif(n)\n##D x2 <- runif(n)\n##D x3 <- runif(n)\n##D x4 <- runif(n)\n##D y <- x1 + x2 + x3 + x4 + 2*runif(n)\n##D f <- ols(y ~ x1*x2+x3+x4, x=TRUE)\n##D # f <- ols(y ~ x1+x2+x3+x4, x=TRUE) # also try this\n##D w <- gIndex(f)[,1]\n##D gt <- w['Total']\n##D points(gt, w['x1, x2'])\n##D points(gt, w['x3'], col='green')\n##D points(gt, w['x4'], col='blue')\n##D big[1] <- big[1] + (w['x1, x2'] > gt)\n##D big[2] <- big[2] + (w['x3'] > gt)\n##D big[3] <- big[3] + (w['x4'] > gt)\n##D }\n##D print(big)\n## End(Not run)\n\noptions(datadist=NULL)\n\n\n"} {"package":"rms","topic":"gendata","snippet":"### Name: gendata\n### Title: Generate Data Frame with Predictor Combinations\n### Aliases: gendata\n### Keywords: methods models regression manip\n\n### ** Examples\n\nset.seed(1)\nage <- rnorm(200, 50, 10)\nsex <- factor(sample(c('female','male'),200,TRUE))\nrace <- factor(sample(c('a','b','c','d'),200,TRUE))\ny <- sample(0:1, 200, TRUE)\ndd <- datadist(age,sex,race)\noptions(datadist=\"dd\")\nf <- lrm(y ~ age*sex + race)\ngendata(f)\ngendata(f, age=50)\nd <- gendata(f, age=50, sex=\"female\") # leave race=reference category\nd <- gendata(f, age=c(50,60), race=c(\"b\",\"a\")) # 4 obs.\nd$Predicted <- predict(f, d, type=\"fitted\")\nd # Predicted column prints at the far right\noptions(datadist=NULL)\n## Not run: \n##D d <- gendata(f, nobs=5, view=TRUE) # 5 interactively defined obs.\n##D d[,attr(d,\"names.subset\")] # print variables which varied\n##D predict(f, d)\n## End(Not run)\n\n\n"} {"package":"rms","topic":"ggplot.Predict","snippet":"### Name: ggplot.Predict\n### Title: Plot Effects of Variables Estimated by a Regression Model Fit\n### Using ggplot2\n### Aliases: ggplot.Predict\n### Keywords: models hplot htest\n\n### ** Examples\n\nrequire(ggplot2)\nn <- 350 # define sample size\nset.seed(17) # so can reproduce the results\nage <- rnorm(n, 50, 10)\nblood.pressure <- rnorm(n, 120, 15)\ncholesterol <- rnorm(n, 200, 25)\nsex <- factor(sample(c('female','male'), n,TRUE))\nlabel(age) <- 'Age' # label is in Hmisc\nlabel(cholesterol) <- 'Total Cholesterol'\nlabel(blood.pressure) <- 'Systolic Blood Pressure'\nlabel(sex) <- 'Sex'\nunits(cholesterol) <- 'mg/dl' # uses units.default in Hmisc\nunits(blood.pressure) <- 'mmHg'\n\n# Specify population model for log odds that Y=1\nL <- .4*(sex=='male') + .045*(age-50) +\n (log(cholesterol - 10)-5.2)*(-2*(sex=='female') + 2*(sex=='male')) +\n .01 * (blood.pressure - 120)\n# Simulate binary y to have Prob(y=1) = 1/[1+exp(-L)]\ny <- ifelse(runif(n) < plogis(L), 1, 0)\n\nddist <- datadist(age, blood.pressure, cholesterol, sex)\noptions(datadist='ddist')\n\nfit <- lrm(y ~ blood.pressure + sex * (age + rcs(cholesterol,4)),\n x=TRUE, y=TRUE)\nan <- anova(fit)\n# Plot effects in two vertical sub-panels with continuous predictors on top\n# ggplot(Predict(fit), sepdiscrete='vertical')\n# Plot effects of all 4 predictors with test statistics from anova, and P\nggplot(Predict(fit), anova=an, pval=TRUE)\n# ggplot(Predict(fit), rdata=llist(blood.pressure, age))\n# spike histogram plot for two of the predictors\n\n# p <- Predict(fit, name=c('age','cholesterol')) # Make 2 plots\n# ggplot(p)\n\n# p <- Predict(fit, age=seq(20,80,length=100), sex, conf.int=FALSE)\n# # Plot relationship between age and log\n # odds, separate curve for each sex,\n# ggplot(p, subset=sex=='female' | age > 30)\n# No confidence interval, suppress estimates for males <= 30\n\n# p <- Predict(fit, age, sex)\n# ggplot(p, rdata=llist(age,sex))\n # rdata= allows rug plots (1-dimensional scatterplots)\n # on each sex's curve, with sex-\n # specific density of age\n # If data were in data frame could have used that\n# p <- Predict(fit, age=seq(20,80,length=100), sex='male', fun=plogis)\n # works if datadist not used\n# ggplot(p, ylab=expression(hat(P)))\n # plot predicted probability in place of log odds\n# per <- function(x, y) x >= 30\n# ggplot(p, perim=per) # suppress output for age < 30 but leave scale alone\n\n# Do ggplot2 faceting a few different ways\np <- Predict(fit, age, sex, blood.pressure=c(120,140,160),\n cholesterol=c(180,200,215))\n# ggplot(p)\nggplot(p, cholesterol ~ blood.pressure)\n# ggplot(p, ~ cholesterol + blood.pressure)\n# color for sex, line type for blood.pressure:\nggplot(p, groups=c('sex', 'blood.pressure'))\n# Add legend.position='top' to allow wider plot\n# Map blood.pressure to line thickness instead of line type:\n# ggplot(p, groups=c('sex', 'blood.pressure'), aestype=c('color', 'size'))\n\n# Plot the age effect as an odds ratio\n# comparing the age shown on the x-axis to age=30 years\n\n# ddist$limits$age[2] <- 30 # make 30 the reference value for age\n# Could also do: ddist$limits[\"Adjust to\",\"age\"] <- 30\n# fit <- update(fit) # make new reference value take effect\n# p <- Predict(fit, age, ref.zero=TRUE, fun=exp)\n# ggplot(p, ylab='Age=x:Age=30 Odds Ratio',\n# addlayer=geom_hline(yintercept=1, col=gray(.8)) +\n# geom_vline(xintercept=30, col=gray(.8)) +\n# scale_y_continuous(trans='log',\n# breaks=c(.5, 1, 2, 4, 8))))\n\n# Compute predictions for three predictors, with superpositioning or\n# conditioning on sex, combined into one graph\n\np1 <- Predict(fit, age, sex)\np2 <- Predict(fit, cholesterol, sex)\np3 <- Predict(fit, blood.pressure, sex)\np <- rbind(age=p1, cholesterol=p2, blood.pressure=p3)\nggplot(p, groups='sex', varypred=TRUE, adj.subtitle=FALSE)\n# ggplot(p, groups='sex', varypred=TRUE, adj.subtitle=FALSE, sepdiscrete='vert')\n\n## Not run: \n##D # For males at the median blood pressure and cholesterol, plot 3 types\n##D # of confidence intervals for the probability on one plot, for varying age\n##D ages <- seq(20, 80, length=100)\n##D p1 <- Predict(fit, age=ages, sex='male', fun=plogis) # standard pointwise\n##D p2 <- Predict(fit, age=ages, sex='male', fun=plogis,\n##D conf.type='simultaneous') # simultaneous\n##D p3 <- Predict(fit, age=c(60,65,70), sex='male', fun=plogis,\n##D conf.type='simultaneous') # simultaneous 3 pts\n##D # The previous only adjusts for a multiplicity of 3 points instead of 100\n##D f <- update(fit, x=TRUE, y=TRUE)\n##D g <- bootcov(f, B=500, coef.reps=TRUE)\n##D p4 <- Predict(g, age=ages, sex='male', fun=plogis) # bootstrap percentile\n##D p <- rbind(Pointwise=p1, 'Simultaneous 100 ages'=p2,\n##D 'Simultaneous 3 ages'=p3, 'Bootstrap nonparametric'=p4)\n##D # as.data.frame so will call built-in ggplot\n##D ggplot(as.data.frame(p), aes(x=age, y=yhat)) + geom_line() +\n##D geom_ribbon(data=p, aes(ymin=lower, ymax=upper), alpha=0.2, linetype=0)+\n##D facet_wrap(~ .set., ncol=2)\n##D \n##D # Plots for a parametric survival model\n##D n <- 1000\n##D set.seed(731)\n##D age <- 50 + 12*rnorm(n)\n##D label(age) <- \"Age\"\n##D sex <- factor(sample(c('Male','Female'), n, \n##D rep=TRUE, prob=c(.6, .4)))\n##D cens <- 15*runif(n)\n##D h <- .02*exp(.04*(age-50)+.8*(sex=='Female'))\n##D t <- -log(runif(n))/h\n##D label(t) <- 'Follow-up Time'\n##D e <- ifelse(t<=cens,1,0)\n##D t <- pmin(t, cens)\n##D units(t) <- \"Year\"\n##D ddist <- datadist(age, sex)\n##D require(survival)\n##D Srv <- Surv(t,e)\n##D \n##D # Fit log-normal survival model and plot median survival time vs. age\n##D f <- psm(Srv ~ rcs(age), dist='lognormal')\n##D med <- Quantile(f) # Creates function to compute quantiles\n##D # (median by default)\n##D p <- Predict(f, age, fun=function(x) med(lp=x))\n##D ggplot(p, ylab=\"Median Survival Time\")\n##D # Note: confidence intervals from this method are approximate since\n##D # they don't take into account estimation of scale parameter\n##D \n##D \n##D # Fit an ols model to log(y) and plot the relationship between x1\n##D # and the predicted mean(y) on the original scale without assuming\n##D # normality of residuals; use the smearing estimator\n##D # See help file for rbind.Predict for a method of showing two\n##D # types of confidence intervals simultaneously.\n##D # Add raw data scatterplot to graph\n##D set.seed(1)\n##D x1 <- runif(300)\n##D x2 <- runif(300)\n##D ddist <- datadist(x1, x2); options(datadist='ddist')\n##D y <- exp(x1 + x2 - 1 + rnorm(300))\n##D f <- ols(log(y) ~ pol(x1,2) + x2)\n##D r <- resid(f)\n##D smean <- function(yhat)smearingEst(yhat, exp, res, statistic='mean')\n##D formals(smean) <- list(yhat=numeric(0), res=r[! is.na(r)])\n##D #smean$res <- r[! is.na(r)] # define default res argument to function\n##D ggplot(Predict(f, x1, fun=smean), ylab='Predicted Mean on y-scale', \n##D addlayer=geom_point(aes(x=x1, y=y), data.frame(x1, y)))\n##D # Had ggplot not added a subtitle (i.e., if x2 were not present), you\n##D # could have done ggplot(Predict(), ylab=...) + geom_point(...) \n## End(Not run)\n\n# Make an 'interaction plot', forcing the x-axis variable to be\n# plotted at integer values but labeled with category levels\nn <- 100\nset.seed(1)\ngender <- c(rep('male', n), rep('female',n))\nm <- sample(c('a','b'), 2*n, TRUE)\nd <- datadist(gender, m); options(datadist='d')\nanxiety <- runif(2*n) + .2*(gender=='female') + .4*(gender=='female' & m=='b')\ntapply(anxiety, llist(gender,m), mean)\nf <- ols(anxiety ~ gender*m)\np <- Predict(f, gender, m)\n# ggplot(p) # horizontal dot chart; usually preferred for categorical predictors\n# ggplot(p, flipxdiscrete=FALSE) # back to vertical\nggplot(p, groups='gender')\nggplot(p, ~ m, groups=FALSE, flipxdiscrete=FALSE)\n\noptions(datadist=NULL)\n\n## Not run: \n##D # Example in which separate curves are shown for 4 income values\n##D # For each curve the estimated percentage of voters voting for\n##D # the democratic party is plotted against the percent of voters\n##D # who graduated from college. Data are county-level percents.\n##D \n##D incomes <- seq(22900, 32800, length=4) \n##D # equally spaced to outer quintiles\n##D p <- Predict(f, college, income=incomes, conf.int=FALSE)\n##D ggplot(p, xlim=c(0,35), ylim=c(30,55))\n##D \n##D # Erase end portions of each curve where there are fewer than 10 counties having\n##D # percent of college graduates to the left of the x-coordinate being plotted,\n##D # for the subset of counties having median family income with 1650\n##D # of the target income for the curve\n##D \n##D show.pts <- function(college.pts, income.pt) {\n##D s <- abs(income - income.pt) < 1650 #assumes income known to top frame\n##D x <- college[s]\n##D x <- sort(x[!is.na(x)])\n##D n <- length(x)\n##D low <- x[10]; high <- x[n-9]\n##D college.pts >= low & college.pts <= high\n##D }\n##D \n##D ggplot(p, xlim=c(0,35), ylim=c(30,55), perim=show.pts)\n##D \n##D # Rename variables for better plotting of a long list of predictors\n##D f <- ...\n##D p <- Predict(f)\n##D re <- c(trt='treatment', diabet='diabetes', sbp='systolic blood pressure')\n##D \n##D for(n in names(re)) {\n##D names(p)[names(p)==n] <- re[n]\n##D p$.predictor.[p$.predictor.==n] <- re[n]\n##D }\n##D ggplot(p)\n## End(Not run)\n\n\n"} {"package":"rms","topic":"groupkm","snippet":"### Name: groupkm\n### Title: Kaplan-Meier Estimates vs. a Continuous Variable\n### Aliases: groupkm\n### Keywords: survival nonparametric\n\n### ** Examples\n\nrequire(survival)\nn <- 1000\nset.seed(731)\nage <- 50 + 12*rnorm(n)\ncens <- 15*runif(n)\nh <- .02*exp(.04*(age-50))\nd.time <- -log(runif(n))/h\nlabel(d.time) <- 'Follow-up Time'\ne <- ifelse(d.time <= cens,1,0)\nd.time <- pmin(d.time, cens)\nunits(d.time) <- \"Year\"\ngroupkm(age, Surv(d.time, e), g=10, u=5, pl=TRUE)\n#Plot 5-year K-M survival estimates and 0.95 confidence bars by \n#decile of age. If omit g=10, will have >= 50 obs./group.\n\n\n"} {"package":"rms","topic":"hazard.ratio.plot","snippet":"### Name: hazard.ratio.plot\n### Title: Hazard Ratio Plot\n### Aliases: hazard.ratio.plot\n### Keywords: survival\n\n### ** Examples\n\nrequire(survival)\nn <- 500\nset.seed(1)\nage <- 50 + 12*rnorm(n)\ncens <- 15*runif(n)\nh <- .02*exp(.04*(age-50))\nd.time <- -log(runif(n))/h\nlabel(d.time) <- 'Follow-up Time'\ne <- ifelse(d.time <= cens,1,0)\nd.time <- pmin(d.time, cens)\nunits(d.time) <- \"Year\"\nhazard.ratio.plot(age, Surv(d.time,e), e=20, legendloc='ll')\n\n\n"} {"package":"rms","topic":"ie.setup","snippet":"### Name: ie.setup\n### Title: Intervening Event Setup\n### Aliases: ie.setup\n### Keywords: survival\n\n### ** Examples\n\nfailure.time <- c(1 , 2, 3)\nevent <- c(1 , 1, 0)\nie.time <- c(NA, 1.5, 2.5)\n\nz <- ie.setup(failure.time, event, ie.time)\nS <- z$S\nS\nie.status <- z$ie.status\nie.status\nz$subs\nz$reps\n## Not run: \n##D attach(input.data.frame[z$subs,]) #replicates all variables\n##D f <- cph(S ~ age + sex + ie.status)\n##D # Instead of duplicating rows of data frame, could do this:\n##D attach(input.data.frame)\n##D z <- ie.setup(failure.time, event, ie.time)\n##D s <- z$subs\n##D age <- age[s]\n##D sex <- sex[s]\n##D f <- cph(S ~ age + sex + ie.status)\n## End(Not run)\n\n\n"} {"package":"rms","topic":"impactPO","snippet":"### Name: impactPO\n### Title: Impact of Proportional Odds Assumpton\n### Aliases: impactPO\n### Keywords: category models regression\n\n### ** Examples\n\n\n## Not run: \n##D set.seed(1)\n##D age <- rnorm(500, 50, 10)\n##D sex <- sample(c('female', 'male'), 500, TRUE)\n##D y <- sample(0:4, 500, TRUE)\n##D d <- expand.grid(age=50, sex=c('female', 'male'))\n##D w <- impactPO(y ~ age + sex, nonpo = ~ sex, newdata=d)\n##D w\n##D # Note that PO model is a better model than multinomial (lower AIC)\n##D # since multinomial model's improvement in fit is low in comparison\n##D # with number of additional parameters estimated. Same for PO model\n##D # in comparison with partial PO model.\n##D \n##D # Reverse levels of y so stacked bars have higher y located higher\n##D revo <- function(z) {\n##D z <- as.factor(z)\n##D factor(z, levels=rev(levels(as.factor(z))))\n##D }\n##D \n##D require(ggplot2)\n##D ggplot(w$estimates, aes(x=method, y=Probability, fill=revo(y))) +\n##D facet_wrap(~ sex) + geom_col() +\n##D xlab('') + guides(fill=guide_legend(title=''))\n##D \n##D # Now vary 2 predictors\n##D \n##D d <- expand.grid(sex=c('female', 'male'), age=c(40, 60))\n##D w <- impactPO(y ~ age + sex, nonpo = ~ sex, newdata=d)\n##D w\n##D ggplot(w$estimates, aes(x=method, y=Probability, fill=revo(y))) +\n##D facet_grid(age ~ sex) + geom_col() +\n##D xlab('') + guides(fill=guide_legend(title=''))\n## End(Not run)\n\n\n"} {"package":"rms","topic":"Surv","snippet":"### Name: importedexported\n### Title: Exported Functions That Were Imported From Other Packages\n### Aliases: Surv ggplot\n\n### ** Examples\n\n## Not run: \n##D f <- psm(Surv(dtime, death) ~ x1 + x2 + sex + race, dist='gau')\n##D ggplot(Predict(f))\n## End(Not run)\n\n\n"} {"package":"rms","topic":"latex.cph","snippet":"### Name: latex.cph\n### Title: LaTeX Representation of a Fitted Cox Model\n### Aliases: latex.cph latex.lrm latex.ols latex.orm latex.pphsm latex.psm\n### Keywords: regression character survival interface models\n\n### ** Examples\n\n## Not run: \n##D require(survival)\n##D units(ftime) <- \"Day\"\n##D f <- cph(Surv(ftime, death) ~ rcs(age)+sex, surv=TRUE, time.inc=60)\n##D w <- latex(f, file='f.tex') #Interprets fitted model and makes table of S0(t)\n##D #for t=0,60,120,180,...\n##D w #displays image, if viewer installed and file given above\n##D latex(f) # send LaTeX code to the console for knitr\n##D options(prType='html')\n##D latex(f) # for use with knitr and R Markdown/Quarto using MathJax\n## End(Not run)\n\n\n"} {"package":"rms","topic":"latexrms","snippet":"### Name: latexrms\n### Title: LaTeX Representation of a Fitted Model\n### Aliases: latexrms latex.bj latex.Glm latex.Gls\n### Keywords: models regression character methods interface\n\n### ** Examples\n\n## Not run: \n##D f <- lrm(death ~ rcs(age)+sex)\n##D w <- latex(f, file='f.tex')\n##D w # displays, using e.g. xdvi\n##D latex(f) # send LaTeX code to console, as for knitr\n##D options(prType='html')\n##D latex(f) # emit html and latex for knitr html and html notebooks\n## End(Not run)\n\n\n"} {"package":"rms","topic":"lrm","snippet":"### Name: lrm\n### Title: Logistic Regression Model\n### Aliases: lrm print.lrm\n### Keywords: category models\n\n### ** Examples\n\n#Fit a logistic model containing predictors age, blood.pressure, sex\n#and cholesterol, with age fitted with a smooth 5-knot restricted cubic \n#spline function and a different shape of the age relationship for males \n#and females. As an intermediate step, predict mean cholesterol from\n#age using a proportional odds ordinal logistic model\n#\nrequire(ggplot2)\nn <- 1000 # define sample size\nset.seed(17) # so can reproduce the results\nage <- rnorm(n, 50, 10)\nblood.pressure <- rnorm(n, 120, 15)\ncholesterol <- rnorm(n, 200, 25)\nsex <- factor(sample(c('female','male'), n,TRUE))\nlabel(age) <- 'Age' # label is in Hmisc\nlabel(cholesterol) <- 'Total Cholesterol'\nlabel(blood.pressure) <- 'Systolic Blood Pressure'\nlabel(sex) <- 'Sex'\nunits(cholesterol) <- 'mg/dl' # uses units.default in Hmisc\nunits(blood.pressure) <- 'mmHg'\n\n#To use prop. odds model, avoid using a huge number of intercepts by\n#grouping cholesterol into 40-tiles\nch <- cut2(cholesterol, g=40, levels.mean=TRUE) # use mean values in intervals\ntable(ch)\nf <- lrm(ch ~ age)\noptions(prType='latex')\nprint(f, coefs=4) # write latex code to console\nm <- Mean(f) # see help file for Mean.lrm\nd <- data.frame(age=seq(0,90,by=10))\nm(predict(f, d))\n# Repeat using ols\nf <- ols(cholesterol ~ age)\npredict(f, d)\n\n# Specify population model for log odds that Y=1\nL <- .4*(sex=='male') + .045*(age-50) +\n (log(cholesterol - 10)-5.2)*(-2*(sex=='female') + 2*(sex=='male'))\n# Simulate binary y to have Prob(y=1) = 1/[1+exp(-L)]\ny <- ifelse(runif(n) < plogis(L), 1, 0)\ncholesterol[1:3] <- NA # 3 missings, at random\n\nddist <- datadist(age, blood.pressure, cholesterol, sex)\noptions(datadist='ddist')\n\nfit <- lrm(y ~ blood.pressure + sex * (age + rcs(cholesterol,4)),\n x=TRUE, y=TRUE)\n# x=TRUE, y=TRUE allows use of resid(), which.influence below\n# could define d <- datadist(fit) after lrm(), but data distribution\n# summary would not be stored with fit, so later uses of Predict\n# or summary.rms would require access to the original dataset or\n# d or specifying all variable values to summary, Predict, nomogram\nanova(fit)\np <- Predict(fit, age, sex)\nggplot(p) # or plot()\nggplot(Predict(fit, age=20:70, sex=\"male\")) # need if datadist not used\nprint(cbind(resid(fit,\"dfbetas\"), resid(fit,\"dffits\"))[1:20,])\nwhich.influence(fit, .3)\n# latex(fit) #print nice statement of fitted model\n#\n#Repeat this fit using penalized MLE, penalizing complex terms\n#(for nonlinear or interaction effects)\n#\nfitp <- update(fit, penalty=list(simple=0,nonlinear=10), x=TRUE, y=TRUE)\neffective.df(fitp)\n# or lrm(y ~ \\dots, penalty=\\dots)\n\n\n#Get fits for a variety of penalties and assess predictive accuracy \n#in a new data set. Program efficiently so that complex design \n#matrices are only created once.\n\n\nset.seed(201)\nx1 <- rnorm(500)\nx2 <- rnorm(500)\nx3 <- sample(0:1,500,rep=TRUE)\nL <- x1+abs(x2)+x3\ny <- ifelse(runif(500)<=plogis(L), 1, 0)\nnew.data <- data.frame(x1,x2,x3,y)[301:500,]\n#\nfor(penlty in seq(0,.15,by=.005)) {\n if(penlty==0) {\n f <- lrm(y ~ rcs(x1,4)+rcs(x2,6)*x3, subset=1:300, x=TRUE, y=TRUE)\n # True model is linear in x1 and has no interaction\n X <- f$x # saves time for future runs - don't have to use rcs etc.\n Y <- f$y # this also deletes rows with NAs (if there were any)\n penalty.matrix <- diag(diag(var(X)))\n Xnew <- predict(f, new.data, type=\"x\") \n # expand design matrix for new data\n Ynew <- new.data$y\n } else f <- lrm.fit(X,Y, penalty.matrix=penlty*penalty.matrix)\n#\n cat(\"\\nPenalty :\",penlty,\"\\n\")\n pred.logit <- f$coef[1] + (Xnew %*% f$coef[-1])\n pred <- plogis(pred.logit)\n C.index <- somers2(pred, Ynew)[\"C\"]\n Brier <- mean((pred-Ynew)^2)\n Deviance<- -2*sum( Ynew*log(pred) + (1-Ynew)*log(1-pred) )\n cat(\"ROC area:\",format(C.index),\" Brier score:\",format(Brier),\n \" -2 Log L:\",format(Deviance),\"\\n\")\n}\n#penalty=0.045 gave lowest -2 Log L, Brier, ROC in test sample for S+\n#\n#Use bootstrap validation to estimate predictive accuracy of\n#logistic models with various penalties\n#To see how noisy cross-validation estimates can be, change the\n#validate(f, \\dots) to validate(f, method=\"cross\", B=10) for example.\n#You will see tremendous variation in accuracy with minute changes in\n#the penalty. This comes from the error inherent in using 10-fold\n#cross validation but also because we are not fixing the splits. \n#20-fold cross validation was even worse for some\n#indexes because of the small test sample size. Stability would be\n#obtained by using the same sample splits for all penalty values \n#(see above), but then we wouldn't be sure that the choice of the \n#best penalty is not specific to how the sample was split. This\n#problem is addressed in the last example.\n#\npenalties <- seq(0,.7,length=3) # really use by=.02\nindex <- matrix(NA, nrow=length(penalties), ncol=11,\n\t dimnames=list(format(penalties),\n c(\"Dxy\",\"R2\",\"Intercept\",\"Slope\",\"Emax\",\"D\",\"U\",\"Q\",\"B\",\"g\",\"gp\")))\ni <- 0\nfor(penlty in penalties)\n{\n cat(penlty, \"\")\n i <- i+1\n if(penlty==0)\n {\n f <- lrm(y ~ rcs(x1,4)+rcs(x2,6)*x3, x=TRUE, y=TRUE) # fit whole sample\n X <- f$x\n Y <- f$y\n penalty.matrix <- diag(diag(var(X))) # save time - only do once\n }\n else\n f <- lrm(Y ~ X, penalty=penlty,\n penalty.matrix=penalty.matrix, x=TRUE,y=TRUE)\n val <- validate(f, method=\"boot\", B=20) # use larger B in practice\n index[i,] <- val[,\"index.corrected\"]\n}\npar(mfrow=c(3,3))\nfor(i in 1:9)\n{\n plot(penalties, index[,i], \n xlab=\"Penalty\", ylab=dimnames(index)[[2]][i])\n lines(lowess(penalties, index[,i]))\n}\noptions(datadist=NULL)\n\n# Example of weighted analysis\nx <- 1:5\ny <- c(0,1,0,1,0)\nreps <- c(1,2,3,2,1)\nlrm(y ~ x, weights=reps)\nx <- rep(x, reps)\ny <- rep(y, reps)\nlrm(y ~ x) # same as above\n\n#\n#Study performance of a modified AIC which uses the effective d.f.\n#See Verweij and Van Houwelingen (1994) Eq. (6). Here AIC=chisq-2*df.\n#Also try as effective d.f. equation (4) of the previous reference.\n#Also study performance of Shao's cross-validation technique (which was\n#designed to pick the \"right\" set of variables, and uses a much smaller\n#training sample than most methods). Compare cross-validated deviance\n#vs. penalty to the gold standard accuracy on a 7500 observation dataset.\n#Note that if you only want to get AIC or Schwarz Bayesian information\n#criterion, all you need is to invoke the pentrace function.\n#NOTE: the effective.df( ) function is used in practice\n#\n## Not run: \n##D for(seed in c(339,777,22,111,3)){ \n##D # study performance for several datasets\n##D set.seed(seed)\n##D n <- 175; p <- 8\n##D X <- matrix(rnorm(n*p), ncol=p) # p normal(0,1) predictors\n##D Coef <- c(-.1,.2,-.3,.4,-.5,.6,-.65,.7) # true population coefficients\n##D L <- X %*% Coef # intercept is zero\n##D Y <- ifelse(runif(n)<=plogis(L), 1, 0)\n##D pm <- diag(diag(var(X)))\n##D #Generate a large validation sample to use as a gold standard\n##D n.val <- 7500\n##D X.val <- matrix(rnorm(n.val*p), ncol=p)\n##D L.val <- X.val %*% Coef\n##D Y.val <- ifelse(runif(n.val)<=plogis(L.val), 1, 0)\n##D #\n##D Penalty <- seq(0,30,by=1)\n##D reps <- length(Penalty)\n##D effective.df <- effective.df2 <- aic <- aic2 <- deviance.val <- \n##D Lpenalty <- single(reps)\n##D n.t <- round(n^.75)\n##D ncv <- c(10,20,30,40) # try various no. of reps in cross-val.\n##D deviance <- matrix(NA,nrow=reps,ncol=length(ncv))\n##D #If model were complex, could have started things off by getting X, Y\n##D #penalty.matrix from an initial lrm fit to save time\n##D #\n##D for(i in 1:reps) {\n##D pen <- Penalty[i]\n##D cat(format(pen),\"\")\n##D f.full <- lrm.fit(X, Y, penalty.matrix=pen*pm)\n##D Lpenalty[i] <- pen* t(f.full$coef[-1]) %*% pm %*% f.full$coef[-1]\n##D f.full.nopenalty <- lrm.fit(X, Y, initial=f.full$coef, maxit=1)\n##D info.matrix.unpenalized <- solve(f.full.nopenalty$var)\n##D effective.df[i] <- sum(diag(info.matrix.unpenalized %*% f.full$var)) - 1\n##D lrchisq <- f.full.nopenalty$stats[\"Model L.R.\"]\n##D # lrm does all this penalty adjustment automatically (for var, d.f.,\n##D # chi-square)\n##D aic[i] <- lrchisq - 2*effective.df[i]\n##D #\n##D pred <- plogis(f.full$linear.predictors)\n##D score.matrix <- cbind(1,X) * (Y - pred)\n##D sum.u.uprime <- t(score.matrix) %*% score.matrix\n##D effective.df2[i] <- sum(diag(f.full$var %*% sum.u.uprime))\n##D aic2[i] <- lrchisq - 2*effective.df2[i]\n##D #\n##D #Shao suggested averaging 2*n cross-validations, but let's do only 40\n##D #and stop along the way to see if fewer is OK\n##D dev <- 0\n##D for(j in 1:max(ncv)) {\n##D s <- sample(1:n, n.t)\n##D cof <- lrm.fit(X[s,],Y[s], \n##D penalty.matrix=pen*pm)$coef\n##D pred <- cof[1] + (X[-s,] %*% cof[-1])\n##D dev <- dev -2*sum(Y[-s]*pred + log(1-plogis(pred)))\n##D for(k in 1:length(ncv)) if(j==ncv[k]) deviance[i,k] <- dev/j\n##D }\n##D #\n##D pred.val <- f.full$coef[1] + (X.val %*% f.full$coef[-1])\n##D prob.val <- plogis(pred.val)\n##D deviance.val[i] <- -2*sum(Y.val*pred.val + log(1-prob.val))\n##D }\n##D postscript(hor=TRUE) # along with graphics.off() below, allow plots\n##D par(mfrow=c(2,4)) # to be printed as they are finished\n##D plot(Penalty, effective.df, type=\"l\")\n##D lines(Penalty, effective.df2, lty=2)\n##D plot(Penalty, Lpenalty, type=\"l\")\n##D title(\"Penalty on -2 log L\")\n##D plot(Penalty, aic, type=\"l\")\n##D lines(Penalty, aic2, lty=2)\n##D for(k in 1:length(ncv)) {\n##D plot(Penalty, deviance[,k], ylab=\"deviance\")\n##D title(paste(ncv[k],\"reps\"))\n##D lines(supsmu(Penalty, deviance[,k]))\n##D }\n##D plot(Penalty, deviance.val, type=\"l\")\n##D title(\"Gold Standard (n=7500)\")\n##D title(sub=format(seed),adj=1,cex=.5)\n##D graphics.off()\n##D }\n## End(Not run)\n#The results showed that to obtain a clear picture of the penalty-\n#accuracy relationship one needs 30 or 40 reps in the cross-validation.\n#For 4 of 5 samples, though, the super smoother was able to detect\n#an accurate penalty giving the best (lowest) deviance using 10-fold\n#cross-validation. Cross-validation would have worked better had\n#the same splits been used for all penalties.\n#The AIC methods worked just as well and are much quicker to compute.\n#The first AIC based on the effective d.f. in Gray's Eq. 2.9\n#(Verweij and Van Houwelingen (1994) Eq. 5 (note typo)) worked best.\n\n\n"} {"package":"rms","topic":"lrm.fit","snippet":"### Name: lrm.fit\n### Title: Logistic Model Fitter\n### Aliases: lrm.fit\n### Keywords: models regression\n\n### ** Examples\n\n#Fit an additive logistic model containing numeric predictors age, \n#blood.pressure, and sex, assumed to be already properly coded and \n#transformed\n#\n# fit <- lrm.fit(cbind(age,blood.pressure,sex), death)\n\n\n"} {"package":"rms","topic":"matinv","snippet":"### Name: matinv\n### Title: Total and Partial Matrix Inversion using Gauss-Jordan Sweep\n### Operator\n### Aliases: matinv\n### Keywords: array\n\n### ** Examples\n\na <- diag(1:3)\na.inv1 <- matinv(a, 1, negate=FALSE)\t #Invert with respect to a[1,1]\na.inv1\na.inv <- -matinv(a.inv1, 2:3, negate=FALSE) #Finish the job\na.inv\nsolve(a)\n\n\n"} {"package":"rms","topic":"nomogram","snippet":"### Name: nomogram\n### Title: Draw a Nomogram Representing a Regression Fit\n### Aliases: nomogram print.nomogram plot.nomogram legend.nomabbrev\n### Keywords: models regression hplot\n\n### ** Examples\n\nn <- 1000 # define sample size\nset.seed(17) # so can reproduce the results\nd <- data.frame(age = rnorm(n, 50, 10),\n blood.pressure = rnorm(n, 120, 15),\n cholesterol = rnorm(n, 200, 25),\n sex = factor(sample(c('female','male'), n,TRUE)))\n\n# Specify population model for log odds that Y=1\n# Simulate binary y to have Prob(y=1) = 1/[1+exp(-L)]\nd <- upData(d,\n L = .4*(sex=='male') + .045*(age-50) +\n (log(cholesterol - 10)-5.2)*(-2*(sex=='female') + 2*(sex=='male')),\n y = ifelse(runif(n) < plogis(L), 1, 0))\n\nddist <- datadist(d); options(datadist='ddist')\n\n\nf <- lrm(y ~ lsp(age,50) + sex * rcs(cholesterol, 4) + blood.pressure,\n data=d)\nnom <- nomogram(f, fun=function(x)1/(1+exp(-x)), # or fun=plogis\n fun.at=c(.001,.01,.05,seq(.1,.9,by=.1),.95,.99,.999),\n funlabel=\"Risk of Death\")\n#Instead of fun.at, could have specified fun.lp.at=logit of\n#sequence above - faster and slightly more accurate\nplot(nom, xfrac=.45)\nprint(nom)\nnom <- nomogram(f, age=seq(10,90,by=10))\nplot(nom, xfrac=.45)\ng <- lrm(y ~ sex + rcs(age, 3) * rcs(cholesterol, 3), data=d)\nnom <- nomogram(g, interact=list(age=c(20,40,60)), \n conf.int=c(.7,.9,.95))\nplot(nom, col.conf=c(1,.5,.2), naxes=7)\n\nrequire(survival)\nw <- upData(d,\n cens = 15 * runif(n),\n h = .02 * exp(.04 * (age - 50) + .8 * (sex == 'Female')),\n d.time = -log(runif(n)) / h,\n death = ifelse(d.time <= cens, 1, 0),\n d.time = pmin(d.time, cens))\n\n\nf <- psm(Surv(d.time,death) ~ sex * age, data=w, dist='lognormal')\nmed <- Quantile(f)\nsurv <- Survival(f) # This would also work if f was from cph\nplot(nomogram(f, fun=function(x) med(lp=x), funlabel=\"Median Survival Time\"))\nnom <- nomogram(f, fun=list(function(x) surv(3, x),\n function(x) surv(6, x)),\n funlabel=c(\"3-Month Survival Probability\", \n \"6-month Survival Probability\"))\nplot(nom, xfrac=.7)\n\n## Not run: \n##D nom <- nomogram(fit.with.categorical.predictors, abbrev=TRUE, minlength=1)\n##D nom$x1$points # print points assigned to each level of x1 for its axis\n##D #Add legend for abbreviations for category levels\n##D abb <- attr(nom, 'info')$abbrev$treatment\n##D legend(locator(1), abb$full, pch=paste(abb$abbrev,collapse=''), \n##D ncol=2, bty='n') # this only works for 1-letter abbreviations\n##D #Or use the legend.nomabbrev function:\n##D legend.nomabbrev(nom, 'treatment', locator(1), ncol=2, bty='n')\n## End(Not run)\n\n\n#Make a nomogram with axes predicting probabilities Y>=j for all j=1-3\n#in an ordinal logistic model, where Y=0,1,2,3\nw <- upData(w, Y = ifelse(y==0, 0, sample(1:3, length(y), TRUE)))\ng <- lrm(Y ~ age+rcs(cholesterol,4) * sex, data=w)\nfun2 <- function(x) plogis(x-g$coef[1]+g$coef[2])\nfun3 <- function(x) plogis(x-g$coef[1]+g$coef[3])\nf <- Newlabels(g, c(age='Age in Years')) \n#see Design.Misc, which also has Newlevels to change \n#labels for levels of categorical variables\ng <- nomogram(f, fun=list('Prob Y>=1'=plogis, 'Prob Y>=2'=fun2, \n 'Prob Y=3'=fun3), \n fun.at=c(.01,.05,seq(.1,.9,by=.1),.95,.99))\nplot(g, lmgp=.2, cex.axis=.6)\noptions(datadist=NULL)\n\n\n"} {"package":"rms","topic":"npsurv","snippet":"### Name: npsurv\n### Title: Nonparametric Survival Estimates for Censored Data\n### Aliases: npsurv\n\n### ** Examples\n\nrequire(survival)\n# fit a Kaplan-Meier and plot it\nfit <- npsurv(Surv(time, status) ~ x, data = aml)\nplot(fit, lty = 2:3)\nlegend(100, .8, c(\"Maintained\", \"Nonmaintained\"), lty = 2:3)\n\n# Here is the data set from Turnbull\n# There are no interval censored subjects, only left-censored (status=3),\n# right-censored (status 0) and observed events (status 1)\n#\n# Time\n# 1 2 3 4\n# Type of observation\n# death 12 6 2 3\n# losses 3 2 0 3\n# late entry 2 4 2 5\n#\ntdata <- data.frame(time = c(1,1,1,2,2,2,3,3,3,4,4,4),\n status = rep(c(1,0,2),4),\n n = c(12,3,2,6,2,4,2,0,2,3,3,5))\nfit <- npsurv(Surv(time, time, status, type='interval') ~ 1,\n data=tdata, weights=n)\n\n#\n# Time to progression/death for patients with monoclonal gammopathy\n# Competing risk curves (cumulative incidence)\n# status variable must be a factor with first level denoting right censoring\nm <- upData(mgus1, stop = stop / 365.25, units=c(stop='years'),\n labels=c(stop='Follow-up Time'), subset=start == 0)\nf <- npsurv(Surv(stop, event) ~ 1, data=m)\n\n# CI curves are always plotted from 0 upwards, rather than 1 down\nplot(f, fun='event', xmax=20, mark.time=FALSE,\n col=2:3, xlab=\"Years post diagnosis of MGUS\")\ntext(10, .4, \"Competing Risk: death\", col=3)\ntext(16, .15,\"Competing Risk: progression\", col=2)\n\n# Use survplot for enhanced displays of cumulative incidence curves for\n# competing risks\n\nsurvplot(f, state='pcm', n.risk=TRUE, xlim=c(0, 20), ylim=c(0, .5), col=2)\nsurvplot(f, state='death', add=TRUE, col=3)\n\nf <- npsurv(Surv(stop, event) ~ sex, data=m)\nsurvplot(f, state='death', n.risk=TRUE, conf='diffbands')\n\n\n"} {"package":"rms","topic":"ols","snippet":"### Name: ols\n### Title: Linear Model Estimation Using Ordinary Least Squares\n### Aliases: ols\n### Keywords: models regression\n\n### ** Examples\n\nset.seed(1)\nx1 <- runif(200)\nx2 <- sample(0:3, 200, TRUE)\ndistance <- (x1 + x2/3 + rnorm(200))^2\nd <- datadist(x1,x2)\noptions(datadist=\"d\") # No d -> no summary, plot without giving all details\n\n\nf <- ols(sqrt(distance) ~ rcs(x1,4) + scored(x2), x=TRUE)\n# could use d <- datadist(f); options(datadist=\"d\") at this point,\n# but predictor summaries would not be stored in the fit object for\n# use with Predict, summary.rms. In that case, the original\n# dataset or d would need to be accessed later, or all variable values\n# would have to be specified to summary, plot\nanova(f)\nwhich.influence(f)\nsummary(f)\nsummary.lm(f) # will only work if penalty and penalty.matrix not used\n\n\n# Fit a complex model and approximate it with a simple one\nx1 <- runif(200)\nx2 <- runif(200)\nx3 <- runif(200)\nx4 <- runif(200)\ny <- x1 + x2 + rnorm(200)\nf <- ols(y ~ rcs(x1,4) + x2 + x3 + x4)\npred <- fitted(f) # or predict(f) or f$linear.predictors\nf2 <- ols(pred ~ rcs(x1,4) + x2 + x3 + x4, sigma=1)\n# sigma=1 prevents numerical problems resulting from R2=1\nfastbw(f2, aics=100000)\n# This will find the best 1-variable model, best 2-variable model, etc.\n# in predicting the predicted values from the original model\noptions(datadist=NULL)\n\n\n"} {"package":"rms","topic":"orm","snippet":"### Name: orm\n### Title: Ordinal Regression Model\n### Aliases: orm print.orm Quantile.orm\n### Keywords: category models\n\n### ** Examples\n\nrequire(ggplot2)\nset.seed(1)\nn <- 100\ny <- round(runif(n), 2)\nx1 <- sample(c(-1,0,1), n, TRUE)\nx2 <- sample(c(-1,0,1), n, TRUE)\nf <- lrm(y ~ x1 + x2, eps=1e-5)\ng <- orm(y ~ x1 + x2, eps=1e-5)\nmax(abs(coef(g) - coef(f)))\nw <- vcov(g, intercepts='all') / vcov(f) - 1\nmax(abs(w))\n\nset.seed(1)\nn <- 300\nx1 <- c(rep(0,150), rep(1,150))\ny <- rnorm(n) + 3*x1\ng <- orm(y ~ x1)\ng\nk <- coef(g)\ni <- num.intercepts(g)\nh <- orm(y ~ x1, family=probit)\nll <- orm(y ~ x1, family=loglog)\ncll <- orm(y ~ x1, family=cloglog)\ncau <- orm(y ~ x1, family=cauchit)\nx <- 1:i\nz <- list(logistic=list(x=x, y=coef(g)[1:i]),\n probit =list(x=x, y=coef(h)[1:i]),\n loglog =list(x=x, y=coef(ll)[1:i]),\n cloglog =list(x=x, y=coef(cll)[1:i]))\nlabcurve(z, pl=TRUE, col=1:4, ylab='Intercept')\n\ntapply(y, x1, mean)\nm <- Mean(g)\nm(w <- k[1] + k['x1']*c(0,1))\nmh <- Mean(h)\nwh <- coef(h)[1] + coef(h)['x1']*c(0,1)\nmh(wh)\n\nqu <- Quantile(g)\n# Compare model estimated and empirical quantiles\ncq <- function(y) {\n cat(qu(.1, w), tapply(y, x1, quantile, probs=.1), '\\n')\n cat(qu(.5, w), tapply(y, x1, quantile, probs=.5), '\\n')\n cat(qu(.9, w), tapply(y, x1, quantile, probs=.9), '\\n')\n }\ncq(y)\n\n# Try on log-normal model\ng <- orm(exp(y) ~ x1)\ng\nk <- coef(g)\nplot(k[1:i])\nm <- Mean(g)\nm(w <- k[1] + k['x1']*c(0,1))\ntapply(exp(y), x1, mean)\n\nqu <- Quantile(g)\ncq(exp(y))\n\n# Compare predicted mean with ols for a continuous x\nset.seed(3)\nn <- 200\nx1 <- rnorm(n)\ny <- x1 + rnorm(n)\ndd <- datadist(x1); options(datadist='dd')\nf <- ols(y ~ x1)\ng <- orm(y ~ x1, family=probit)\nh <- orm(y ~ x1, family=logistic)\nw <- orm(y ~ x1, family=cloglog)\nmg <- Mean(g); mh <- Mean(h); mw <- Mean(w)\nr <- rbind(ols = Predict(f, conf.int=FALSE),\n probit = Predict(g, conf.int=FALSE, fun=mg),\n logistic = Predict(h, conf.int=FALSE, fun=mh),\n cloglog = Predict(w, conf.int=FALSE, fun=mw))\nplot(r, groups='.set.')\n\n# Compare predicted 0.8 quantile with quantile regression\nqu <- Quantile(g)\nqu80 <- function(lp) qu(.8, lp)\nf <- Rq(y ~ x1, tau=.8)\nr <- rbind(probit = Predict(g, conf.int=FALSE, fun=qu80),\n quantreg = Predict(f, conf.int=FALSE))\nplot(r, groups='.set.')\n\n# Verify transformation invariance of ordinal regression\nga <- orm(exp(y) ~ x1, family=probit)\nqua <- Quantile(ga)\nqua80 <- function(lp) log(qua(.8, lp))\nr <- rbind(logprobit = Predict(ga, conf.int=FALSE, fun=qua80),\n probit = Predict(g, conf.int=FALSE, fun=qu80))\nplot(r, groups='.set.')\n\n# Try the same with quantile regression. Need to transform x1\nfa <- Rq(exp(y) ~ rcs(x1,5), tau=.8)\nr <- rbind(qr = Predict(f, conf.int=FALSE),\n logqr = Predict(fa, conf.int=FALSE, fun=log))\nplot(r, groups='.set.')\n\n# Make a plot of Pr(Y >= y) vs. a continuous covariate for 3 levels\n# of y and also against a binary covariate\nset.seed(1)\nn <- 1000\nage <- rnorm(n, 50, 15)\nsex <- sample(c('m', 'f'), 1000, TRUE)\nY <- runif(n)\ndd <- datadist(age, sex); options(datadist='dd')\nf <- orm(Y ~ age + sex)\n# Use ExProb function to derive an R function to compute\n# P(Y >= y | X)\nex <- ExProb(f)\nex1 <- function(x) ex(x, y=0.25)\nex2 <- function(x) ex(x, y=0.5)\nex3 <- function(x) ex(x, y=0.75)\np1 <- Predict(f, age, sex, fun=ex1)\np2 <- Predict(f, age, sex, fun=ex2)\np3 <- Predict(f, age, sex, fun=ex3)\np <- rbind('P(Y >= 0.25)' = p1,\n 'P(Y >= 0.5)' = p2,\n 'P(Y >= 0.75)' = p3)\nggplot(p)\n\n# Make plot with two curves (by sex) with y on the x-axis, and\n# estimated P(Y >= y | sex, age=median) on the y-axis\nys <- seq(min(Y), max(Y), length=100)\ng <- function(sx) as.vector(ex(y=ys, Predict(f, sex=sx)$yhat)$prob)\n\nd <- rbind(data.frame(sex='m', y=ys, p=g('m')),\n data.frame(sex='f', y=ys, p=g('f')))\nggplot(d, aes(x=y, y=p, color=sex)) + geom_line() +\n ylab(expression(P(Y >= y))) +\n guides(color=guide_legend(title='Sex')) +\n theme(legend.position='bottom')\n\noptions(datadist=NULL)\n## Not run: \n##D ## Simulate power and type I error for orm logistic and probit regression\n##D ## for likelihood ratio, Wald, and score chi-square tests, and compare\n##D ## with t-test\n##D require(rms)\n##D set.seed(5)\n##D nsim <- 2000\n##D r <- NULL\n##D for(beta in c(0, .4)) {\n##D for(n in c(10, 50, 300)) {\n##D cat('beta=', beta, ' n=', n, '\\n\\n')\n##D plogistic <- pprobit <- plogistics <- pprobits <- plogisticw <-\n##D pprobitw <- ptt <- numeric(nsim)\n##D x <- c(rep(0, n/2), rep(1, n/2))\n##D pb <- setPb(nsim, every=25, label=paste('beta=', beta, ' n=', n))\n##D for(j in 1:nsim) {\n##D pb(j)\n##D y <- beta*x + rnorm(n)\n##D tt <- t.test(y ~ x)\n##D ptt[j] <- tt$p.value\n##D f <- orm(y ~ x)\n##D plogistic[j] <- f$stats['P']\n##D plogistics[j] <- f$stats['Score P']\n##D plogisticw[j] <- 1 - pchisq(coef(f)['x']^2 / vcov(f)[2,2], 1)\n##D f <- orm(y ~ x, family=probit)\n##D pprobit[j] <- f$stats['P']\n##D pprobits[j] <- f$stats['Score P']\n##D pprobitw[j] <- 1 - pchisq(coef(f)['x']^2 / vcov(f)[2,2], 1)\n##D }\n##D if(beta == 0) plot(ecdf(plogistic))\n##D r <- rbind(r, data.frame(beta = beta, n=n,\n##D ttest = mean(ptt < 0.05),\n##D logisticlr = mean(plogistic < 0.05),\n##D logisticscore= mean(plogistics < 0.05),\n##D logisticwald = mean(plogisticw < 0.05),\n##D probit = mean(pprobit < 0.05),\n##D probitscore = mean(pprobits < 0.05),\n##D probitwald = mean(pprobitw < 0.05)))\n##D }\n##D }\n##D print(r)\n##D # beta n ttest logisticlr logisticscore logisticwald probit probitscore probitwald\n##D #1 0.0 10 0.0435 0.1060 0.0655 0.043 0.0920 0.0920 0.0820\n##D #2 0.0 50 0.0515 0.0635 0.0615 0.060 0.0620 0.0620 0.0620\n##D #3 0.0 300 0.0595 0.0595 0.0590 0.059 0.0605 0.0605 0.0605\n##D #4 0.4 10 0.0755 0.1595 0.1070 0.074 0.1430 0.1430 0.1285\n##D #5 0.4 50 0.2950 0.2960 0.2935 0.288 0.3120 0.3120 0.3120\n##D #6 0.4 300 0.9240 0.9215 0.9205 0.920 0.9230 0.9230 0.9230\n## End(Not run)\n\n\n"} {"package":"rms","topic":"orm.fit","snippet":"### Name: orm.fit\n### Title: Ordinal Regression Model Fitter\n### Aliases: orm.fit\n### Keywords: models regression\n\n### ** Examples\n\n#Fit an additive logistic model containing numeric predictors age, \n#blood.pressure, and sex, assumed to be already properly coded and \n#transformed\n#\n# fit <- orm.fit(cbind(age,blood.pressure,sex), death)\n\n\n"} {"package":"rms","topic":"pentrace","snippet":"### Name: pentrace\n### Title: Trace AIC and BIC vs. Penalty\n### Aliases: pentrace plot.pentrace print.pentrace effective.df\n### Keywords: models regression\n\n### ** Examples\n\nn <- 1000 # define sample size\nset.seed(17) # so can reproduce the results\nage <- rnorm(n, 50, 10)\nblood.pressure <- rnorm(n, 120, 15)\ncholesterol <- rnorm(n, 200, 25)\nsex <- factor(sample(c('female','male'), n,TRUE))\n# Specify population model for log odds that Y=1\nL <- .4*(sex=='male') + .045*(age-50) +\n (log(cholesterol - 10)-5.2)*(-2*(sex=='female') + 2*(sex=='male'))\n# Simulate binary y to have Prob(y=1) = 1/[1+exp(-L)]\ny <- ifelse(runif(n) < plogis(L), 1, 0)\n\n\nf <- lrm(y ~ blood.pressure + sex * (age + rcs(cholesterol,4)),\n x=TRUE, y=TRUE)\np <- pentrace(f, seq(.2,1,by=.05))\nplot(p)\np$diag # may learn something about fractional effective d.f. \n # for each original parameter\npentrace(f, list(simple=c(0,.2,.4), nonlinear=c(0,.2,.4,.8,1)))\n\n\n# Bootstrap pentrace 5 times, making a plot of corrected AIC plot with 5 reps\nn <- nrow(f$x)\nplot(pentrace(f, seq(.2,1,by=.05)), which='aic.c', \n col=1, ylim=c(30,120)) #original in black\nfor(j in 1:5)\n plot(pentrace(f, seq(.2,1,by=.05), subset=sample(n,n,TRUE)), \n which='aic.c', col=j+1, add=TRUE)\n\n\n# Find penalty giving optimum corrected AIC. Initial guess is 1.0\n# Not implemented yet\n# pentrace(f, 1, method='optimize')\n\n\n# Find penalty reducing total regression d.f. effectively to 5\n# pentrace(f, 1, target.df=5)\n\n\n# Re-fit with penalty giving best aic.c without differential penalization\nf <- update(f, penalty=p$penalty)\neffective.df(f)\n\n\n"} {"package":"rms","topic":"plot.Predict","snippet":"### Name: plot.Predict\n### Title: Plot Effects of Variables Estimated by a Regression Model Fit\n### Aliases: plot.Predict pantext\n### Keywords: models hplot htest\n\n### ** Examples\n\nn <- 1000 # define sample size\nset.seed(17) # so can reproduce the results\nage <- rnorm(n, 50, 10)\nblood.pressure <- rnorm(n, 120, 15)\ncholesterol <- rnorm(n, 200, 25)\nsex <- factor(sample(c('female','male'), n,TRUE))\nlabel(age) <- 'Age' # label is in Hmisc\nlabel(cholesterol) <- 'Total Cholesterol'\nlabel(blood.pressure) <- 'Systolic Blood Pressure'\nlabel(sex) <- 'Sex'\nunits(cholesterol) <- 'mg/dl' # uses units.default in Hmisc\nunits(blood.pressure) <- 'mmHg'\n\n# Specify population model for log odds that Y=1\nL <- .4*(sex=='male') + .045*(age-50) +\n (log(cholesterol - 10)-5.2)*(-2*(sex=='female') + 2*(sex=='male'))\n# Simulate binary y to have Prob(y=1) = 1/[1+exp(-L)]\ny <- ifelse(runif(n) < plogis(L), 1, 0)\n\nddist <- datadist(age, blood.pressure, cholesterol, sex)\noptions(datadist='ddist')\n\nfit <- lrm(y ~ blood.pressure + sex * (age + rcs(cholesterol,4)),\n x=TRUE, y=TRUE)\nan <- anova(fit)\n# Plot effects of all 4 predictors with test statistics from anova, and P\nplot(Predict(fit), anova=an, pval=TRUE)\nplot(Predict(fit), data=llist(blood.pressure,age))\n # rug plot for two of the predictors\n\np <- Predict(fit, name=c('age','cholesterol')) # Make 2 plots\nplot(p)\n\np <- Predict(fit, age=seq(20,80,length=100), sex, conf.int=FALSE)\n # Plot relationship between age and log\n # odds, separate curve for each sex,\nplot(p, subset=sex=='female' | age > 30)\n# No confidence interval, suppress estimates for males <= 30\n\np <- Predict(fit, age, sex)\nplot(p, label.curves=FALSE, data=llist(age,sex))\n # use label.curves=list(keys=c('a','b'))'\n # to use 1-letter abbreviations\n # data= allows rug plots (1-dimensional scatterplots)\n # on each sex's curve, with sex-\n # specific density of age\n # If data were in data frame could have used that\np <- Predict(fit, age=seq(20,80,length=100), sex='male', fun=plogis)\n # works if datadist not used\nplot(p, ylab=expression(hat(P)))\n # plot predicted probability in place of log odds\n\nper <- function(x, y) x >= 30\nplot(p, perim=per) # suppress output for age < 30 but leave scale alone\n\n# Take charge of the plot setup by specifying a lattice formula\np <- Predict(fit, age, blood.pressure=c(120,140,160),\n cholesterol=c(180,200,215), sex)\nplot(p, ~ age | blood.pressure*cholesterol, subset=sex=='male')\n# plot(p, ~ age | cholesterol*blood.pressure, subset=sex=='female')\n# plot(p, ~ blood.pressure|cholesterol*round(age,-1), subset=sex=='male')\nplot(p)\n\n# Plot the age effect as an odds ratio\n# comparing the age shown on the x-axis to age=30 years\n\nddist$limits$age[2] <- 30 # make 30 the reference value for age\n# Could also do: ddist$limits[\"Adjust to\",\"age\"] <- 30\nfit <- update(fit) # make new reference value take effect\np <- Predict(fit, age, ref.zero=TRUE, fun=exp)\nplot(p, ylab='Age=x:Age=30 Odds Ratio',\n abline=list(list(h=1, lty=2, col=2), list(v=30, lty=2, col=2)))\n\n# Compute predictions for three predictors, with superpositioning or\n# conditioning on sex, combined into one graph\n\np1 <- Predict(fit, age, sex)\np2 <- Predict(fit, cholesterol, sex)\np3 <- Predict(fit, blood.pressure, sex)\np <- rbind(age=p1, cholesterol=p2, blood.pressure=p3)\nplot(p, groups='sex', varypred=TRUE, adj.subtitle=FALSE)\nplot(p, cond='sex', varypred=TRUE, adj.subtitle=FALSE)\n\n## Not run: \n##D # For males at the median blood pressure and cholesterol, plot 3 types\n##D # of confidence intervals for the probability on one plot, for varying age\n##D ages <- seq(20, 80, length=100)\n##D p1 <- Predict(fit, age=ages, sex='male', fun=plogis) # standard pointwise\n##D p2 <- Predict(fit, age=ages, sex='male', fun=plogis,\n##D conf.type='simultaneous') # simultaneous\n##D p3 <- Predict(fit, age=c(60,65,70), sex='male', fun=plogis,\n##D conf.type='simultaneous') # simultaneous 3 pts\n##D # The previous only adjusts for a multiplicity of 3 points instead of 100\n##D f <- update(fit, x=TRUE, y=TRUE)\n##D g <- bootcov(f, B=500, coef.reps=TRUE)\n##D p4 <- Predict(g, age=ages, sex='male', fun=plogis) # bootstrap percentile\n##D p <- rbind(Pointwise=p1, 'Simultaneous 100 ages'=p2,\n##D 'Simultaneous 3 ages'=p3, 'Bootstrap nonparametric'=p4)\n##D xYplot(Cbind(yhat, lower, upper) ~ age, groups=.set.,\n##D data=p, type='l', method='bands', label.curve=list(keys='lines'))\n## End(Not run)\n\n# Plots for a parametric survival model\nrequire(survival)\nn <- 1000\nset.seed(731)\nage <- 50 + 12*rnorm(n)\nlabel(age) <- \"Age\"\nsex <- factor(sample(c('Male','Female'), n, \n rep=TRUE, prob=c(.6, .4)))\ncens <- 15*runif(n)\nh <- .02*exp(.04*(age-50)+.8*(sex=='Female'))\nt <- -log(runif(n))/h\nlabel(t) <- 'Follow-up Time'\ne <- ifelse(t<=cens,1,0)\nt <- pmin(t, cens)\nunits(t) <- \"Year\"\nddist <- datadist(age, sex)\nSrv <- Surv(t,e)\n\n\n# Fit log-normal survival model and plot median survival time vs. age\nf <- psm(Srv ~ rcs(age), dist='lognormal')\nmed <- Quantile(f) # Creates function to compute quantiles\n # (median by default)\np <- Predict(f, age, fun=function(x) med(lp=x))\nplot(p, ylab=\"Median Survival Time\")\n# Note: confidence intervals from this method are approximate since\n# they don't take into account estimation of scale parameter\n\n\n# Fit an ols model to log(y) and plot the relationship between x1\n# and the predicted mean(y) on the original scale without assuming\n# normality of residuals; use the smearing estimator\n# See help file for rbind.Predict for a method of showing two\n# types of confidence intervals simultaneously.\nset.seed(1)\nx1 <- runif(300)\nx2 <- runif(300)\nddist <- datadist(x1,x2)\ny <- exp(x1+x2-1+rnorm(300))\nf <- ols(log(y) ~ pol(x1,2)+x2)\nr <- resid(f)\nsmean <- function(yhat)smearingEst(yhat, exp, res, statistic='mean')\nformals(smean) <- list(yhat=numeric(0), res=r[!is.na(r)])\n#smean$res <- r[!is.na(r)] # define default res argument to function\nplot(Predict(f, x1, fun=smean), ylab='Predicted Mean on y-scale')\n\n# Make an 'interaction plot', forcing the x-axis variable to be\n# plotted at integer values but labeled with category levels\nn <- 100\nset.seed(1)\ngender <- c(rep('male', n), rep('female',n))\nm <- sample(c('a','b'), 2*n, TRUE)\nd <- datadist(gender, m); options(datadist='d')\nanxiety <- runif(2*n) + .2*(gender=='female') + .4*(gender=='female' & m=='b')\ntapply(anxiety, llist(gender,m), mean)\nf <- ols(anxiety ~ gender*m)\np <- Predict(f, gender, m)\nplot(p) # horizontal dot chart; usually preferred for categorical predictors\nKey(.5, .5)\nplot(p, ~gender, groups='m', nlines=TRUE)\nplot(p, ~m, groups='gender', nlines=TRUE)\nplot(p, ~gender|m, nlines=TRUE)\n\noptions(datadist=NULL)\n\n## Not run: \n##D # Example in which separate curves are shown for 4 income values\n##D # For each curve the estimated percentage of voters voting for\n##D # the democratic party is plotted against the percent of voters\n##D # who graduated from college. Data are county-level percents.\n##D \n##D incomes <- seq(22900, 32800, length=4) \n##D # equally spaced to outer quintiles\n##D p <- Predict(f, college, income=incomes, conf.int=FALSE)\n##D plot(p, xlim=c(0,35), ylim=c(30,55))\n##D \n##D # Erase end portions of each curve where there are fewer than 10 counties having\n##D # percent of college graduates to the left of the x-coordinate being plotted,\n##D # for the subset of counties having median family income with 1650\n##D # of the target income for the curve\n##D \n##D show.pts <- function(college.pts, income.pt) {\n##D s <- abs(income - income.pt) < 1650 #assumes income known to top frame\n##D x <- college[s]\n##D x <- sort(x[!is.na(x)])\n##D n <- length(x)\n##D low <- x[10]; high <- x[n-9]\n##D college.pts >= low & college.pts <= high\n##D }\n##D \n##D plot(p, xlim=c(0,35), ylim=c(30,55), perim=show.pts)\n##D \n##D # Rename variables for better plotting of a long list of predictors\n##D f <- ...\n##D p <- Predict(f)\n##D re <- c(trt='treatment', diabet='diabetes', sbp='systolic blood pressure')\n##D \n##D for(n in names(re)) {\n##D names(p)[names(p)==n] <- re[n]\n##D p$.predictor.[p$.predictor.==n] <- re[n]\n##D }\n##D plot(p)\n## End(Not run)\n\n\n"} {"package":"rms","topic":"plot.xmean.ordinaly","snippet":"### Name: plot.xmean.ordinaly\n### Title: Plot Mean X vs. Ordinal Y\n### Aliases: plot.xmean.ordinaly\n### Keywords: category models regression hplot\n\n### ** Examples\n\n# Simulate data from a population proportional odds model\nset.seed(1)\nn <- 400\nage <- rnorm(n, 50, 10)\nblood.pressure <- rnorm(n, 120, 15)\nregion <- factor(sample(c('north','south','east','west'), n, replace=TRUE))\nL <- .2*(age-50) + .1*(blood.pressure-120)\np12 <- plogis(L) # Pr(Y>=1)\np2 <- plogis(L-1) # Pr(Y=2)\np <- cbind(1-p12, p12-p2, p2) # individual class probabilites\n# Cumulative probabilities:\ncp <- matrix(cumsum(t(p)) - rep(0:(n-1), rep(3,n)), byrow=TRUE, ncol=3)\ny <- (cp < runif(n)) %*% rep(1,3)\n# Thanks to Dave Krantz for this trick\n\npar(mfrow=c(2,2))\nplot.xmean.ordinaly(y ~ age + blood.pressure + region, cr=TRUE, topcats=2)\npar(mfrow=c(1,1))\n# Note that for unimportant predictors we don't care very much about the\n# shapes of these plots. Use the Hmisc chiSquare function to compute\n# Pearson chi-square statistics to rank the variables by unadjusted\n# importance without assuming any ordering of the response:\nchiSquare(y ~ age + blood.pressure + region, g=3)\nchiSquare(y ~ age + blood.pressure + region, g=5)\n\n\n"} {"package":"rms","topic":"plotp.Predict","snippet":"### Name: plotp.Predict\n### Title: Plot Effects of Variables Estimated by a Regression Model Fit\n### Using plotly\n### Aliases: plotp.Predict\n### Keywords: models hplot htest\n\n### ** Examples\n\n## Not run: \n##D n <- 350 # define sample size\n##D set.seed(17) # so can reproduce the results\n##D age <- rnorm(n, 50, 10)\n##D blood.pressure <- rnorm(n, 120, 15)\n##D cholesterol <- rnorm(n, 200, 25)\n##D sex <- factor(sample(c('female','male'), n,TRUE))\n##D label(age) <- 'Age' # label is in Hmisc\n##D label(cholesterol) <- 'Total Cholesterol'\n##D label(blood.pressure) <- 'Systolic Blood Pressure'\n##D label(sex) <- 'Sex'\n##D units(cholesterol) <- 'mg/dl' # uses units.default in Hmisc\n##D units(blood.pressure) <- 'mmHg'\n##D \n##D # Specify population model for log odds that Y=1\n##D L <- .4*(sex=='male') + .045*(age-50) +\n##D (log(cholesterol - 10)-5.2)*(-2*(sex=='female') + 2*(sex=='male')) +\n##D .01 * (blood.pressure - 120)\n##D # Simulate binary y to have Prob(y=1) = 1/[1+exp(-L)]\n##D y <- ifelse(runif(n) < plogis(L), 1, 0)\n##D \n##D ddist <- datadist(age, blood.pressure, cholesterol, sex)\n##D options(datadist='ddist')\n##D \n##D fit <- lrm(y ~ blood.pressure + sex * (age + rcs(cholesterol,4)),\n##D x=TRUE, y=TRUE)\n##D \n##D p <- plotp(Predict(fit))\n##D p$Continuous\n##D p$Categorical\n##D # When using Rmarkdown html notebook, best to use\n##D # prList(p) to render the two objects\n##D plotp(Predict(fit), rdata=llist(blood.pressure, age))$Continuous\n##D # spike histogram plot for two of the predictors\n##D \n##D p <- Predict(fit, name=c('age','cholesterol')) # Make 2 plots\n##D plotp(p)\n##D \n##D p <- Predict(fit, age, sex)\n##D plotp(p, rdata=llist(age,sex))\n##D # rdata= allows rug plots (1-dimensional scatterplots)\n##D # on each sex's curve, with sex-\n##D # specific density of age\n##D # If data were in data frame could have used that\n##D p <- Predict(fit, age=seq(20,80,length=100), sex='male', fun=plogis)\n##D # works if datadist not used\n##D plotp(p, ylab='P')\n##D # plot predicted probability in place of log odds\n##D \n##D # Compute predictions for three predictors, with superpositioning or\n##D # conditioning on sex, combined into one graph\n##D \n##D p1 <- Predict(fit, age, sex)\n##D p2 <- Predict(fit, cholesterol, sex)\n##D p3 <- Predict(fit, blood.pressure, sex)\n##D p <- rbind(age=p1, cholesterol=p2, blood.pressure=p3)\n##D plotp(p, ncols=2, rdata=llist(age, cholesterol, sex))\n## End(Not run)\n\n\n"} {"package":"rms","topic":"poma","snippet":"### Name: poma\n### Title: Examine proportional odds and parallelism assumptions of 'orm'\n### and 'lrm' model fits.\n### Aliases: poma\n\n### ** Examples\n\n\n## Not run: \n##D ## orm model (response variable has fewer than 10 unique levels)\n##D mod.orm <- orm(carb ~ cyl + hp , x = TRUE, y = TRUE, data = mtcars)\n##D poma(mod.orm)\n##D \n##D \n##D ## runs rms::impactPO when its args are supplied\n##D ## More examples: (https://yhpua.github.io/poma/)\n##D d <- expand.grid(hp = c(90, 180), vs = c(0, 1))\n##D mod.orm <- orm(cyl ~ vs + hp , x = TRUE, y = TRUE, data = mtcars)\n##D poma(mod.orm, newdata = d)\n##D \n##D \n##D ## orm model (response variable has >=10 unique levels)\n##D mod.orm <- orm(mpg ~ cyl + hp , x=TRUE, y=TRUE, data = mtcars)\n##D poma(mod.orm)\n##D \n##D \n##D ## orm model using imputation\n##D dat <- mtcars\n##D ## introduce NAs\n##D dat[sample(rownames(dat), 10), \"cyl\"] <- NA\n##D im <- aregImpute(~ cyl + wt + mpg + am, data = dat)\n##D aa <- fit.mult.impute(mpg ~ cyl + wt , xtrans = im, data = dat, fitter = orm)\n##D poma(aa)\n## End(Not run)\n\n\n"} {"package":"rms","topic":"pphsm","snippet":"### Name: pphsm\n### Title: Parametric Proportional Hazards form of AFT Models\n### Aliases: pphsm print.pphsm vcov.pphsm\n### Keywords: models survival regression\n\n### ** Examples\n\nrequire(survival)\nset.seed(1)\nS <- Surv(runif(100))\nx <- runif(100)\ndd <- datadist(x); options(datadist='dd')\nf <- psm(S ~ x, dist=\"exponential\")\nsummary(f) # effects on log(T) scale\nf.ph <- pphsm(f)\n## Not run: summary(f.ph) # effects on hazard ratio scale\noptions(datadist=NULL)\n\n\n"} {"package":"rms","topic":"predab.resample","snippet":"### Name: predab.resample\n### Title: Predictive Ability using Resampling\n### Aliases: predab.resample\n### Keywords: models\n\n### ** Examples\n\n# See the code for validate.ols for an example of the use of\n# predab.resample\n\n\n"} {"package":"rms","topic":"predict.lrm","snippet":"### Name: predict.lrm\n### Title: Predicted Values for Binary and Ordinal Logistic Models\n### Aliases: predict.lrm predict.orm Mean.lrm Mean.orm\n### Keywords: models regression\n\n### ** Examples\n\n# See help for predict.rms for several binary logistic\n# regression examples\n\n\n# Examples of predictions from ordinal models\nset.seed(1)\ny <- factor(sample(1:3, 400, TRUE), 1:3, c('good','better','best'))\nx1 <- runif(400)\nx2 <- runif(400)\nf <- lrm(y ~ rcs(x1,4)*x2, x=TRUE) #x=TRUE needed for se.fit\n# Get 0.95 confidence limits for Prob[better or best]\nL <- predict(f, se.fit=TRUE) #omitted kint= so use 1st intercept\nplogis(with(L, linear.predictors + 1.96*cbind(-se.fit,se.fit)))\npredict(f, type=\"fitted.ind\")[1:10,] #gets Prob(better) and all others\nd <- data.frame(x1=c(.1,.5),x2=c(.5,.15))\npredict(f, d, type=\"fitted\") # Prob(Y>=j) for new observation\npredict(f, d, type=\"fitted.ind\") # Prob(Y=j)\npredict(f, d, type='mean', codes=TRUE) # predicts mean(y) using codes 1,2,3\nm <- Mean(f, codes=TRUE)\nlp <- predict(f, d)\nm(lp)\n# Can use function m as an argument to Predict or nomogram to\n# get predicted means instead of log odds or probabilities\ndd <- datadist(x1,x2); options(datadist='dd')\nm\nplot(Predict(f, x1, fun=m), ylab='Predicted Mean')\n# Note: Run f through bootcov with coef.reps=TRUE to get proper confidence\n# limits for predicted means from the prop. odds model\noptions(datadist=NULL)\n\n\n"} {"package":"rms","topic":"predictrms","snippet":"### Name: predictrms\n### Title: Predicted Values from Model Fit\n### Aliases: predictrms predict.rms predict.bj predict.cph predict.Glm\n### predict.Gls predict.ols predict.psm\n### Keywords: models regression\n\n### ** Examples\n\nn <- 1000 # define sample size\nset.seed(17) # so can reproduce the results\nage <- rnorm(n, 50, 10)\nblood.pressure <- rnorm(n, 120, 15)\ncholesterol <- rnorm(n, 200, 25)\nsex <- factor(sample(c('female','male'), n,TRUE))\ntreat <- factor(sample(c('a','b','c'), n,TRUE))\n\n\n# Specify population model for log odds that Y=1\nL <- .4*(sex=='male') + .045*(age-50) +\n (log(cholesterol - 10)-5.2)*(-2*(sex=='female') + 2*(sex=='male')) +\n .3*sqrt(blood.pressure-60)-2.3 + 1*(treat=='b')\n# Simulate binary y to have Prob(y=1) = 1/[1+exp(-L)]\ny <- ifelse(runif(n) < plogis(L), 1, 0)\n\n\nddist <- datadist(age, blood.pressure, cholesterol, sex, treat)\noptions(datadist='ddist')\n\n\nfit <- lrm(y ~ rcs(blood.pressure,4) + \n sex * (age + rcs(cholesterol,4)) + sex*treat*age)\n\n\n# Use xYplot to display predictions in 9 panels, with error bars,\n# with superposition of two treatments\n\n\ndat <- expand.grid(treat=levels(treat),sex=levels(sex),\n age=c(20,40,60),blood.pressure=120,\n cholesterol=seq(100,300,length=10))\n# Add variables linear.predictors and se.fit to dat\ndat <- cbind(dat, predict(fit, dat, se.fit=TRUE))\n# This is much easier with Predict\n# xYplot in Hmisc extends xyplot to allow error bars\n\nxYplot(Cbind(linear.predictors,linear.predictors-1.96*se.fit,\n linear.predictors+1.96*se.fit) ~ cholesterol | sex*age,\n groups=treat, data=dat, type='b')\n\n\n\n\n# Since blood.pressure doesn't interact with anything, we can quickly and\n# interactively try various transformations of blood.pressure, taking\n# the fitted spline function as the gold standard. We are seeking a\n# linearizing transformation even though this may lead to falsely\n# narrow confidence intervals if we use this data-dredging-based transformation\n\n\nbp <- 70:160\nlogit <- predict(fit, expand.grid(treat=\"a\", sex='male', age=median(age),\n cholesterol=median(cholesterol),\n blood.pressure=bp), type=\"terms\")[,\"blood.pressure\"]\n#Note: if age interacted with anything, this would be the age\n# \"main effect\" ignoring interaction terms\n#Could also use Predict(f, age=ag)$yhat\n#which allows evaluation of the shape for any level of interacting\n#factors. When age does not interact with anything, the result from\n#predict(f, \\dots, type=\"terms\") would equal the result from\n#plot if all other terms were ignored\n\n\nplot(bp^.5, logit) # try square root vs. spline transform.\nplot(bp^1.5, logit) # try 1.5 power\nplot(sqrt(bp-60), logit)\n\n\n#Some approaches to making a plot showing how predicted values\n#vary with a continuous predictor on the x-axis, with two other\n#predictors varying\n\n\ncombos <- gendata(fit, age=seq(10,100,by=10), cholesterol=c(170,200,230),\n blood.pressure=c(80,120,160))\n#treat, sex not specified -> set to mode\n#can also used expand.grid\n\nrequire(lattice)\ncombos$pred <- predict(fit, combos)\nxyplot(pred ~ age | cholesterol*blood.pressure, data=combos, type='l')\nxYplot(pred ~ age | cholesterol, groups=blood.pressure, data=combos, type='l')\nKey() # Key created by xYplot\nxYplot(pred ~ age, groups=interaction(cholesterol,blood.pressure),\n data=combos, type='l', lty=1:9)\nKey()\n\n\n# Add upper and lower 0.95 confidence limits for individuals\ncombos <- cbind(combos, predict(fit, combos, conf.int=.95))\nxYplot(Cbind(linear.predictors, lower, upper) ~ age | cholesterol,\n groups=blood.pressure, data=combos, type='b')\nKey()\n\n\n# Plot effects of treatments (all pairwise comparisons) vs.\n# levels of interacting factors (age, sex)\n\n\nd <- gendata(fit, treat=levels(treat), sex=levels(sex), age=seq(30,80,by=10))\nx <- predict(fit, d, type=\"x\")\nbetas <- fit$coef\ncov <- vcov(fit, intercepts='none')\n\n\ni <- d$treat==\"a\"; xa <- x[i,]; Sex <- d$sex[i]; Age <- d$age[i]\ni <- d$treat==\"b\"; xb <- x[i,]\ni <- d$treat==\"c\"; xc <- x[i,]\n\n\ndoit <- function(xd, lab) {\n xb <- matxv(xd, betas)\n se <- apply((xd %*% cov) * xd, 1, sum)^.5\n q <- qnorm(1-.01/2) # 0.99 confidence limits\n lower <- xb - q * se; upper <- xb + q * se\n #Get odds ratios instead of linear effects\n xb <- exp(xb); lower <- exp(lower); upper <- exp(upper)\n #First elements of these agree with \n #summary(fit, age=30, sex='female',conf.int=.99))\n for(sx in levels(Sex)) {\n j <- Sex==sx\n errbar(Age[j], xb[j], upper[j], lower[j], xlab=\"Age\", \n ylab=paste(lab, \"Odds Ratio\"), ylim=c(.1, 20), log='y')\n title(paste(\"Sex:\", sx))\n abline(h=1, lty=2)\n }\n}\n\n\npar(mfrow=c(3,2), oma=c(3,0,3,0))\ndoit(xb - xa, \"b:a\")\ndoit(xc - xa, \"c:a\")\ndoit(xb - xa, \"c:b\")\n\n# NOTE: This is much easier to do using contrast.rms\n\n# Demonstrate type=\"terms\", \"cterms\", \"ccterms\"\nset.seed(1)\nn <- 40\nx <- 1:n\nw <- factor(sample(c('a', 'b'), n, TRUE))\nu <- factor(sample(c('A', 'B'), n, TRUE))\ny <- .01*x + .2*(w=='b') + .3*(u=='B') + .2*(w=='b' & u=='B') + rnorm(n)/5\nddist <- datadist(x, w, u)\nf <- ols(y ~ x*w*u, x=TRUE, y=TRUE)\nf\nanova(f)\nz <- predict(f, type='terms', center.terms=FALSE)\nz[1:5,]\nk <- coef(f)\n## Manually compute combined terms\nwb <- w=='b'\nuB <- u=='B'\nh <- k['x * w=b * u=B']*x*wb*uB\ntx <- k['x'] *x + k['x * w=b']*x*wb + k['x * u=B'] *x*uB + h\ntw <- k['w=b']*wb + k['x * w=b']*x*wb + k['w=b * u=B']*wb*uB + h\ntu <- k['u=B']*uB + k['x * u=B']*x*uB + k['w=b * u=B']*wb*uB + h\nh <- z[,'x * w * u'] # highest order term is present in all cterms\ntx2 <- z[,'x']+z[,'x * w']+z[,'x * u']+h\ntw2 <- z[,'w']+z[,'x * w']+z[,'w * u']+h\ntu2 <- z[,'u']+z[,'x * u']+z[,'w * u']+h\nae <- function(a, b) all.equal(a, b, check.attributes=FALSE)\nae(tx, tx2)\nae(tw, tw2)\nae(tu, tu2)\n\nzc <- predict(f, type='cterms')\nzc[1:5,]\nae(tx, zc[,'x'])\nae(tw, zc[,'w'])\nae(tu, zc[,'u'])\n\nzc <- predict(f, type='ccterms')\n# As all factors are indirectly related, ccterms gives overall linear\n# predictor except for the intercept\nzc[1:5,]\nae(as.vector(zc + coef(f)[1]), f$linear.predictors)\n\n## Not run: \n##D #A variable state.code has levels \"1\", \"5\",\"13\"\n##D #Get predictions with or without converting variable in newdata to factor\n##D predict(fit, data.frame(state.code=c(5,13)))\n##D predict(fit, data.frame(state.code=factor(c(5,13))))\n##D \n##D \n##D #Use gendata function (gendata.rms) for interactive specification of\n##D #predictor variable settings (for 10 observations)\n##D df <- gendata(fit, nobs=10, viewvals=TRUE)\n##D df$predicted <- predict(fit, df) # add variable to data frame\n##D df\n##D \n##D \n##D df <- gendata(fit, age=c(10,20,30)) # leave other variables at ref. vals.\n##D predict(fit, df, type=\"fitted\")\n##D \n##D \n##D # See reShape (in Hmisc) for an example where predictions corresponding to \n##D # values of one of the varying predictors are reformatted into multiple\n##D # columns of a matrix\n## End(Not run)\noptions(datadist=NULL)\n\n\n"} {"package":"rms","topic":"prmiInfo","snippet":"### Name: prmiInfo\n### Title: prmiInfo\n### Aliases: prmiInfo\n\n### ** Examples\n\n## Not run: \n##D a <- aregImpute(...)\n##D f <- fit.mult.impute(...)\n##D v <- processMI(f, 'anova')\n##D prmiInfo(v)\n## End(Not run)\n\n\n"} {"package":"rms","topic":"psm","snippet":"### Name: psm\n### Title: Parametric Survival Model\n### Aliases: psm print.psm Hazard Survival Hazard.psm Mean.psm Quantile.psm\n### Survival.psm residuals.psm lines.residuals.psm.censored.normalized\n### survplot.residuals.psm.censored.normalized\n### Keywords: models survival\n\n### ** Examples\n\nrequire(survival)\nn <- 400\nset.seed(1)\nage <- rnorm(n, 50, 12)\nsex <- factor(sample(c('Female','Male'),n,TRUE))\ndd <- datadist(age,sex)\noptions(datadist='dd')\n# Population hazard function:\nh <- .02*exp(.06*(age-50)+.8*(sex=='Female'))\nd.time <- -log(runif(n))/h\ncens <- 15*runif(n)\ndeath <- ifelse(d.time <= cens,1,0)\nd.time <- pmin(d.time, cens)\n\nf <- psm(Surv(d.time,death) ~ sex*pol(age,2), \n dist='lognormal')\n# Log-normal model is a bad fit for proportional hazards data\nprint(f, r2=0:4, pg=TRUE)\n\nanova(f)\nfastbw(f) # if deletes sex while keeping age*sex ignore the result\nf <- update(f, x=TRUE,y=TRUE) # so can validate, compute certain resids\nvalidate(f, B=10) # ordinarily use B=300 or more\nplot(Predict(f, age, sex)) # needs datadist since no explicit age, hosp.\n# Could have used ggplot(Predict(...))\nsurvplot(f, age=c(20,60)) # needs datadist since hospital not set here\n# latex(f)\n\n\nS <- Survival(f)\nplot(f$linear.predictors, S(6, f$linear.predictors),\n xlab=expression(X*hat(beta)),\n ylab=expression(S(6,X*hat(beta))))\n# plots 6-month survival as a function of linear predictor (X*Beta hat)\n\n\ntimes <- seq(0,24,by=.25)\nplot(times, S(times,0), type='l') # plots survival curve at X*Beta hat=0\nlam <- Hazard(f)\nplot(times, lam(times,0), type='l') # similarly for hazard function\n\n\nmed <- Quantile(f) # new function defaults to computing median only\nlp <- seq(-3, 5, by=.1)\nplot(lp, med(lp=lp), ylab=\"Median Survival Time\")\nmed(c(.25,.5), f$linear.predictors)\n # prints matrix with 2 columns\n\n\n# fit a model with no predictors\nf <- psm(Surv(d.time,death) ~ 1, dist=\"weibull\")\nf\npphsm(f) # print proportional hazards form\ng <- survest(f)\nplot(g$time, g$surv, xlab='Time', type='l',\n ylab=expression(S(t)))\n\n\nf <- psm(Surv(d.time,death) ~ age, \n dist=\"loglogistic\", y=TRUE)\nr <- resid(f, 'cens') # note abbreviation\nsurvplot(npsurv(r ~ 1), conf='none') \n # plot Kaplan-Meier estimate of \n # survival function of standardized residuals\nsurvplot(npsurv(r ~ cut2(age, g=2)), conf='none') \n # both strata should be n(0,1)\nlines(r) # add theoretical survival function\n#More simply:\nsurvplot(r, age, g=2)\n\noptions(datadist=NULL)\n\n\n"} {"package":"rms","topic":"residuals.cph","snippet":"### Name: residuals.cph\n### Title: Residuals for a cph Fit\n### Aliases: residuals.cph\n### Keywords: survival\n\n### ** Examples\n\n# fit <- cph(Surv(start, stop, event) ~ (age + surgery)* transplant, \n# data=jasa1)\n# mresid <- resid(fit, collapse=jasa1$id)\n\n\n# Get unadjusted relationships for several variables\n# Pick one variable that's not missing too much, for fit\n\nrequire(survival)\nn <- 1000 # define sample size\nset.seed(17) # so can reproduce the results\nage <- rnorm(n, 50, 10)\nblood.pressure <- rnorm(n, 120, 15)\ncholesterol <- rnorm(n, 200, 25)\nsex <- factor(sample(c('female','male'), n,TRUE))\ncens <- 15*runif(n)\nh <- .02*exp(.04*(age-50)+.8*(sex=='Female'))\nd.time <- -log(runif(n))/h\ndeath <- ifelse(d.time <= cens,1,0)\nd.time <- pmin(d.time, cens)\n\n\nf <- cph(Surv(d.time, death) ~ age + blood.pressure + cholesterol, iter.max=0)\nres <- resid(f) # This re-inserts rows for NAs, unlike f$resid\nyl <- quantile(res, c(10/length(res),1-10/length(res)), na.rm=TRUE)\n# Scale all plots from 10th smallest to 10th largest residual\npar(mfrow=c(2,2), oma=c(3,0,3,0))\np <- function(x) {\n s <- !is.na(x+res)\n plot(lowess(x[s], res[s], iter=0), xlab=label(x), ylab=\"Residual\",\n ylim=yl, type=\"l\")\n}\np(age); p(blood.pressure); p(cholesterol)\nmtext(\"Smoothed Martingale Residuals\", outer=TRUE)\n\n\n# Assess PH by estimating log relative hazard over time\nf <- cph(Surv(d.time,death) ~ age + sex + blood.pressure, x=TRUE, y=TRUE)\nr <- resid(f, \"scaledsch\")\ntt <- as.numeric(dimnames(r)[[1]])\npar(mfrow=c(3,2))\nfor(i in 1:3) {\n g <- areg.boot(I(r[,i]) ~ tt, B=20)\n plot(g, boot=FALSE) # shows bootstrap CIs\n} # Focus on 3 graphs on right\n# Easier approach:\nplot(cox.zph(f)) # invokes plot.cox.zph\npar(mfrow=c(1,1))\n\n\n"} {"package":"rms","topic":"residuals.lrm","snippet":"### Name: residuals.lrm\n### Title: Residuals from an 'lrm' or 'orm' Fit\n### Aliases: residuals.lrm residuals.orm plot.lrm.partial\n### Keywords: models regression\n\n### ** Examples\n\nset.seed(1)\nx1 <- runif(200, -1, 1)\nx2 <- runif(200, -1, 1)\nL <- x1^2 - .5 + x2\ny <- ifelse(runif(200) <= plogis(L), 1, 0)\nf <- lrm(y ~ x1 + x2, x=TRUE, y=TRUE)\nresid(f) #add rows for NAs back to data\nresid(f, \"score\") #also adds back rows\nr <- resid(f, \"partial\") #for checking transformations of X's\npar(mfrow=c(1,2))\nfor(i in 1:2) {\n xx <- if(i==1)x1 else x2\n plot(xx, r[,i], xlab=c('x1','x2')[i])\n lines(lowess(xx,r[,i]))\n}\nresid(f, \"partial\", pl=\"loess\") #same as last 3 lines\nresid(f, \"partial\", pl=TRUE) #plots for all columns of X using supsmu\nresid(f, \"gof\") #global test of goodness of fit\nlp1 <- resid(f, \"lp1\") #approx. leave-out-1 linear predictors\n-2*sum(y*lp1 + log(1-plogis(lp1))) #approx leave-out-1 deviance\n #formula assumes y is binary\n\n\n# Simulate data from a population proportional odds model\nset.seed(1)\nn <- 400\nage <- rnorm(n, 50, 10)\nblood.pressure <- rnorm(n, 120, 15)\nL <- .05*(age-50) + .03*(blood.pressure-120)\np12 <- plogis(L) # Pr(Y>=1)\np2 <- plogis(L-1) # Pr(Y=2)\np <- cbind(1-p12, p12-p2, p2) # individual class probabilites\n# Cumulative probabilities:\ncp <- matrix(cumsum(t(p)) - rep(0:(n-1), rep(3,n)), byrow=TRUE, ncol=3)\n# simulate multinomial with varying probs:\ny <- (cp < runif(n)) %*% rep(1,3)\ny <- as.vector(y)\n# Thanks to Dave Krantz for this trick\nf <- lrm(y ~ age + blood.pressure, x=TRUE, y=TRUE)\npar(mfrow=c(2,2))\nresid(f, 'score.binary', pl=TRUE) #plot score residuals\nresid(f, 'partial', pl=TRUE) #plot partial residuals\nresid(f, 'gof') #test GOF for each level separately\n\n\n# Show use of Li-Shepherd residuals\nf.wrong <- lrm(y ~ blood.pressure, x=TRUE, y=TRUE)\npar(mfrow=c(2,1))\n# li.shepherd residuals from model without age\nplot(age, resid(f.wrong, type=\"li.shepherd\"),\n ylab=\"li.shepherd residual\")\nlines(lowess(age, resid(f.wrong, type=\"li.shepherd\")))\n# li.shepherd residuals from model including age\nplot(age, resid(f, type=\"li.shepherd\"),\n ylab=\"li.shepherd residual\")\nlines(lowess(age, resid(f, type=\"li.shepherd\")))\n\n\n# Make a series of binary fits and draw 2 partial residual plots\n#\nf1 <- lrm(y>=1 ~ age + blood.pressure, x=TRUE, y=TRUE)\nf2 <- update(f1, y==2 ~.)\npar(mfrow=c(2,1))\nplot.lrm.partial(f1, f2)\n\n\n# Simulate data from both a proportional odds and a non-proportional\n# odds population model. Check how 3 kinds of residuals detect\n# non-prop. odds\nset.seed(71)\nn <- 400\nx <- rnorm(n)\n\npar(mfrow=c(2,3))\nfor(j in 1:2) { # 1: prop.odds 2: non-prop. odds\n if(j==1) \n L <- matrix(c(1.4,.4,-.1,-.5,-.9),\n nrow=n, ncol=5, byrow=TRUE) + x / 2\n else {\n\t # Slopes and intercepts for cutoffs of 1:5 :\n\t slopes <- c(.7,.5,.3,.3,0)\n\t ints <- c(2.5,1.2,0,-1.2,-2.5)\n L <- matrix(ints, nrow=n, ncol=5, byrow=TRUE) +\n matrix(slopes, nrow=n, ncol=5, byrow=TRUE) * x\n }\n p <- plogis(L)\n # Cell probabilities\n p <- cbind(1-p[,1],p[,1]-p[,2],p[,2]-p[,3],p[,3]-p[,4],p[,4]-p[,5],p[,5])\n # Cumulative probabilities from left to right\n cp <- matrix(cumsum(t(p)) - rep(0:(n-1), rep(6,n)), byrow=TRUE, ncol=6)\n y <- (cp < runif(n)) %*% rep(1,6)\n\n\n f <- lrm(y ~ x, x=TRUE, y=TRUE)\n for(cutoff in 1:5) print(lrm(y >= cutoff ~ x)$coef)\n\n\n print(resid(f,'gof'))\n resid(f, 'score', pl=TRUE)\n # Note that full ordinal model score residuals exhibit a\n # U-shaped pattern even under prop. odds\n ti <- if(j==2) 'Non-Proportional Odds\\nSlopes=.7 .5 .3 .3 0' else\n 'True Proportional Odds\\nOrdinal Model Score Residuals'\n title(ti)\n resid(f, 'score.binary', pl=TRUE)\n if(j==1) ti <- 'True Proportional Odds\\nBinary Score Residuals'\n title(ti)\n resid(f, 'partial', pl=TRUE)\n if(j==1) ti <- 'True Proportional Odds\\nPartial Residuals'\n title(ti)\n}\npar(mfrow=c(1,1))\n\n# Shepherd-Li residuals from orm. Thanks: Qi Liu\n\nset.seed(3)\nn <- 100\nx1 <- rnorm(n)\ny <- x1 + rnorm(n)\ng <- orm(y ~ x1, family=probit, x=TRUE, y=TRUE)\ng.resid <- resid(g)\nplot(x1, g.resid, cex=0.4); lines(lowess(x1, g.resid)); abline(h=0, col=2,lty=2)\n\nset.seed(3)\nn <- 100\nx1 <- rnorm(n)\ny <- x1 + x1^2 +rnorm(n)\n# model misspecification, the square term is left out in the model\ng <- orm(y ~ x1, family=probit, x=TRUE, y=TRUE)\ng.resid <- resid(g)\nplot(x1, g.resid, cex=0.4); lines(lowess(x1, g.resid)); abline(h=0, col=2,lty=2)\n\n\n## Not run: \n##D # Get data used in Hosmer et al. paper and reproduce their calculations\n##D v <- Cs(id, low, age, lwt, race, smoke, ptl, ht, ui, ftv, bwt)\n##D d <- read.table(\"http://www.umass.edu/statdata/statdata/data/lowbwt.dat\",\n##D skip=6, col.names=v)\n##D d <- upData(d, race=factor(race,1:3,c('white','black','other')))\n##D f <- lrm(low ~ age + lwt + race + smoke, data=d, x=TRUE,y=TRUE)\n##D f\n##D resid(f, 'gof')\n##D # Their Table 7 Line 2 found sum of squared errors=36.91, expected\n##D # value under H0=36.45, variance=.065, P=.071\n##D # We got 36.90, 36.45, SD=.26055 (var=.068), P=.085\n##D # Note that two logistic regression coefficients differed a bit\n##D # from their Table 1\n## End(Not run)\n\n\n"} {"package":"rms","topic":"residuals.ols","snippet":"### Name: residuals.ols\n### Title: Residuals for ols\n### Aliases: residuals.ols\n### Keywords: models regression\n\n### ** Examples\n\nset.seed(1)\nx1 <- rnorm(100)\nx2 <- rnorm(100)\nx1[1] <- 100\ny <- x1 + x2 + rnorm(100)\nf <- ols(y ~ x1 + x2, x=TRUE, y=TRUE)\nresid(f, \"dfbetas\")\nwhich.influence(f)\ni <- resid(f, 'influence.measures') # dfbeta, dffit, etc.\n\n\n"} {"package":"rms","topic":"rms","snippet":"### Name: rms\n### Title: rms Methods and Generic Functions\n### Aliases: rms Design modelData\n### Keywords: models regression survival math manip methods\n\n### ** Examples\n\n## Not run: \n##D require(rms)\n##D require(ggplot2)\n##D require(survival)\n##D dist <- datadist(data=2) # can omit if not using summary, (gg)plot, survplot,\n##D # or if specify all variable values to them. Can\n##D # also defer. data=2: get distribution summaries\n##D # for all variables in search position 2\n##D # run datadist once, for all candidate variables\n##D dist <- datadist(age,race,bp,sex,height) # alternative\n##D options(datadist=\"dist\")\n##D f <- cph(Surv(d.time, death) ~ rcs(age,4)*strat(race) +\n##D bp*strat(sex)+lsp(height,60),x=TRUE,y=TRUE)\n##D anova(f)\n##D anova(f,age,height) # Joint test of 2 vars\n##D fastbw(f)\n##D summary(f, sex=\"female\") # Adjust sex to \"female\" when testing\n##D # interacting factor bp\n##D bplot(Predict(f, age, height)) # 3-D plot\n##D ggplot(Predict(f, age=10:70, height=60))\n##D latex(f) # LaTeX representation of fit\n##D \n##D \n##D f <- lm(y ~ x) # Can use with any fitting function that\n##D # calls model.frame.default, e.g. lm, glm\n##D specs.rms(f) # Use .rms since class(f)=\"lm\"\n##D anova(f) # Works since Varcov(f) (=Varcov.lm(f)) works\n##D fastbw(f)\n##D options(datadist=NULL)\n##D f <- ols(y ~ x1*x2) # Saves enough information to do fastbw, anova\n##D anova(f) # Will not do Predict since distributions\n##D fastbw(f) # of predictors not saved\n##D plot(f, x1=seq(100,300,by=.5), x2=.5) \n##D # all values defined - don't need datadist\n##D dist <- datadist(x1,x2) # Equivalent to datadist(f)\n##D options(datadist=\"dist\")\n##D plot(f, x1, x2=.5) # Now you can do plot, summary\n##D plot(nomogram(f, interact=list(x2=c(.2,.7))))\n## End(Not run)\n\n\n"} {"package":"rms","topic":"rms.trans","snippet":"### Name: rms.trans\n### Title: rms Special Transformation Functions\n### Aliases: rms.trans asis pol lsp rcs catg scored strat matrx gTrans %ia%\n### makepredictcall.rms\n### Keywords: models regression math manip methods survival smooth\n\n### ** Examples\n\n## Not run: \n##D options(knots=4, poly.degree=2)\n##D # To get the old behavior of rcspline.eval knot placement (which didnt' handle\n##D # clumping at the lowest or highest value of the predictor very well):\n##D # options(fractied = 1.0) # see rcspline.eval for details\n##D country <- factor(country.codes)\n##D blood.pressure <- cbind(sbp=systolic.bp, dbp=diastolic.bp)\n##D fit <- lrm(Y ~ sqrt(x1)*rcs(x2) + rcs(x3,c(5,10,15)) + \n##D lsp(x4,c(10,20)) + country + blood.pressure + poly(age,2))\n##D # sqrt(x1) is an implicit asis variable, but limits of x1, not sqrt(x1)\n##D # are used for later plotting and effect estimation\n##D # x2 fitted with restricted cubic spline with 4 default knots\n##D # x3 fitted with r.c.s. with 3 specified knots\n##D # x4 fitted with linear spline with 2 specified knots\n##D # country is an implied catg variable\n##D # blood.pressure is an implied matrx variable\n##D # since poly is not an rms function (pol is), it creates a\n##D # matrx type variable with no automatic linearity testing\n##D # or plotting\n##D f1 <- lrm(y ~ rcs(x1) + rcs(x2) + rcs(x1) %ia% rcs(x2))\n##D # %ia% restricts interactions. Here it removes terms nonlinear in\n##D # both x1 and x2\n##D f2 <- lrm(y ~ rcs(x1) + rcs(x2) + x1 %ia% rcs(x2))\n##D # interaction linear in x1\n##D f3 <- lrm(y ~ rcs(x1) + rcs(x2) + x1 %ia% x2)\n##D # simple product interaction (doubly linear)\n##D # Use x1 %ia% x2 instead of x1:x2 because x1 %ia% x2 triggers\n##D # anova to pool x1*x2 term into x1 terms to test total effect\n##D # of x1\n##D #\n##D # Examples of gTrans\n##D #\n##D # Linear relationship with a discontinuity at zero:\n##D ldisc <- function(x) {z <- cbind(x == 0, x); attr(z, 'nonlinear') <- 1; z}\n##D gTrans(x, ldisc)\n##D # Duplicate pol(x, 2):\n##D pol2 <- function(x) {z <- cbind(x, x^2); attr(z, 'nonlinear') <- 2; z}\n##D gTrans(x, pol2)\n##D # Linear spline with a knot at x=10 with the new slope taking effect\n##D # until x=20 and the spline turning flat at that point but with a\n##D # discontinuous vertical shift\n##D # tex is only needed if you will be using latex(fit)\n##D dspl <- function(x) {\n##D z <- cbind(x, pmax(pmin(x, 20) - 10, 0), x > 20)\n##D attr(z, 'nonlinear') <- 2:3\n##D attr(z, 'tex') <- function(x) sprintf(c('%s', '(\\min(%s, 20) - 10)_{+}',\n##D '[%s > 20]'), x)\n##D z }\n##D gTrans(x, dspl)\n## End(Not run)\n\n\n"} {"package":"rms","topic":"rmsMisc","snippet":"### Name: rmsMisc\n### Title: Miscellaneous Design Attributes and Utility Functions\n### Aliases: rmsMisc calibrate.rms DesignAssign vcov.rms vcov.cph vcov.Glm\n### vcov.Gls vcov.lrm vcov.ols vcov.orm vcov.psm oos.loglik\n### oos.loglik.ols oos.loglik.lrm oos.loglik.cph oos.loglik.psm\n### oos.loglik.Glm Getlim Getlimi related.predictors\n### interactions.containing combineRelatedPredictors param.order\n### Penalty.matrix Penalty.setup logLik.Gls logLik.ols logLik.rms AIC.rms\n### nobs.rms lrtest univarLR Newlabels Newlevels Newlabels.rms\n### Newlevels.rms rmsArgs print.rms print.lrtest survest.rms prModFit\n### prStats reListclean formatNP latex.naprint.delete html.naprint.delete\n### removeFormulaTerms\n### Keywords: models methods\n\n### ** Examples\n\n## Not run: \n##D f <- psm(S ~ x1 + x2 + sex + race, dist='gau')\n##D g <- psm(S ~ x1 + sex + race, dist='gau', \n##D fixed=list(scale=exp(f$parms)))\n##D lrtest(f, g)\n##D \n##D \n##D g <- Newlabels(f, c(x2='Label for x2'))\n##D g <- Newlevels(g, list(sex=c('Male','Female'),race=c('B','W')))\n##D nomogram(g)\n## End(Not run)\n\n\n"} {"package":"rms","topic":"robcov","snippet":"### Name: robcov\n### Title: Robust Covariance Matrix Estimates\n### Aliases: robcov\n### Keywords: models regression robust\n\n### ** Examples\n\n# In OLS test against more manual approach\nset.seed(1)\nn <- 15\nx1 <- 1:n\nx2 <- sample(1:n)\ny <- round(x1 + x2 + 8*rnorm(n))\nf <- ols(y ~ x1 + x2, x=TRUE, y=TRUE)\nvcov(f)\nvcov(robcov(f))\nX <- f$x\nG <- diag(resid(f)^2)\nsolve(t(X) %*% X) %*% (t(X) %*% G %*% X) %*% solve(t(X) %*% X)\n\n# Duplicate data and adjust for intra-cluster correlation to see that\n# the cluster sandwich estimator completely ignored the duplicates\nx1 <- c(x1,x1)\nx2 <- c(x2,x2)\ny <- c(y, y)\ng <- ols(y ~ x1 + x2, x=TRUE, y=TRUE)\nvcov(robcov(g, c(1:n, 1:n)))\n\n# A dataset contains a variable number of observations per subject,\n# and all observations are laid out in separate rows. The responses\n# represent whether or not a given segment of the coronary arteries\n# is occluded. Segments of arteries may not operate independently\n# in the same patient. We assume a \"working independence model\" to\n# get estimates of the coefficients, i.e., that estimates assuming\n# independence are reasonably efficient. The job is then to get\n# unbiased estimates of variances and covariances of these estimates.\n\nn.subjects <- 30\nages <- rnorm(n.subjects, 50, 15)\nsexes <- factor(sample(c('female','male'), n.subjects, TRUE))\nlogit <- (ages-50)/5\nprob <- plogis(logit) # true prob not related to sex\nid <- sample(1:n.subjects, 300, TRUE) # subjects sampled multiple times\ntable(table(id)) # frequencies of number of obs/subject\nage <- ages[id]\nsex <- sexes[id]\n# In truth, observations within subject are independent:\ny <- ifelse(runif(300) <= prob[id], 1, 0)\nf <- lrm(y ~ lsp(age,50)*sex, x=TRUE, y=TRUE)\ng <- robcov(f, id)\ndiag(g$var)/diag(f$var)\n# add ,group=w to re-sample from within each level of w\nanova(g) # cluster-adjusted Wald statistics\n# fastbw(g) # cluster-adjusted backward elimination\nplot(Predict(g, age=30:70, sex='female')) # cluster-adjusted confidence bands\n# or use ggplot(...)\n\n# Get design effects based on inflation of the variances when compared\n# with bootstrap estimates which ignore clustering\ng2 <- robcov(f)\ndiag(g$var)/diag(g2$var)\n\n\n# Get design effects based on pooled tests of factors in model\nanova(g2)[,1] / anova(g)[,1]\n\n\n\n\n# A dataset contains one observation per subject, but there may be\n# heteroscedasticity or other model misspecification. Obtain\n# the robust sandwich estimator of the covariance matrix.\n\n\n# f <- ols(y ~ pol(age,3), x=TRUE, y=TRUE)\n# f.adj <- robcov(f)\n\n\n"} {"package":"rms","topic":"sensuc","snippet":"### Name: sensuc\n### Title: Sensitivity to Unmeasured Covariables\n### Aliases: sensuc plot.sensuc\n### Keywords: regression htest models survival\n\n### ** Examples\n\nset.seed(17)\nx <- sample(0:1, 500,TRUE)\ny <- sample(0:1, 500,TRUE)\ny[1:100] <- x[1:100] # induce an association between x and y\nx2 <- rnorm(500)\n\n\nf <- lrm(y ~ x + x2, x=TRUE, y=TRUE)\n\n\n#Note: in absence of U odds ratio for x is exp(2nd coefficient)\n\n\ng <- sensuc(f, c(1,3))\n\n\n# Note: If the generated sample of U was typical, the odds ratio for\n# x dropped had U been known, where U had an odds ratio\n# with x of 3 and an odds ratio with y of 3\n\n\nplot(g)\n\n\n# Fit a Cox model and check sensitivity to an unmeasured confounder\n\n# require(survival)\n# f <- cph(Surv(d.time,death) ~ treatment + pol(age,2)*sex, x=TRUE, y=TRUE)\n# sensuc(f, event=function(y) y[,2] & y[,1] < 365.25 )\n# Event = failed, with event time before 1 year\n# Note: Analysis uses f$y which is a 2-column Surv object\n\n\n"} {"package":"rms","topic":"setPb","snippet":"### Name: setPb\n### Title: Progress Bar for Simulations\n### Aliases: setPb\n### Keywords: utilities\n\n### ** Examples\n\n## Not run: \n##D options(showprogress=TRUE) # same as ='tk'\n##D pb <- setPb(1000)\n##D for(i in 1:1000) {\n##D pb(i) # pb(i, every=10) to only show for multiples of 10\n##D # your calculations\n##D }\n##D # Force rms functions to do simulations to not report progress\n##D options(showprogress='none')\n##D # For functions that do simulations to use the console instead of pop-up\n##D # Even with tcltk is installed\n##D options(showprogress='console')\n##D pb <- setPb(1000, label='Random Sampling')\n## End(Not run)\n\n\n"} {"package":"rms","topic":"specs.rms","snippet":"### Name: specs.rms\n### Title: rms Specifications for Models\n### Aliases: specs.rms specs print.specs.rms\n### Keywords: models regression methods\n\n### ** Examples\n\nset.seed(1)\nblood.pressure <- rnorm(200, 120, 15)\ndd <- datadist(blood.pressure)\noptions(datadist='dd')\nL <- .03*(blood.pressure-120)\nsick <- ifelse(runif(200) <= plogis(L), 1, 0)\nf <- lrm(sick ~ rcs(blood.pressure,5))\nspecs(f) # find out where 5 knots are placed\ng <- Glm(sick ~ rcs(blood.pressure,5), family=binomial)\nspecs(g,long=TRUE)\noptions(datadist=NULL)\n\n\n"} {"package":"rms","topic":"summary.rms","snippet":"### Name: summary.rms\n### Title: Summary of Effects in Model\n### Aliases: summary.rms print.summary.rms latex.summary.rms\n### html.summary.rms plot.summary.rms\n### Keywords: models regression htest survival hplot interface\n\n### ** Examples\n\nn <- 1000 # define sample size\nset.seed(17) # so can reproduce the results\nage <- rnorm(n, 50, 10)\nblood.pressure <- rnorm(n, 120, 15)\ncholesterol <- rnorm(n, 200, 25)\nsex <- factor(sample(c('female','male'), n,TRUE))\nlabel(age) <- 'Age' # label is in Hmisc\nlabel(cholesterol) <- 'Total Cholesterol'\nlabel(blood.pressure) <- 'Systolic Blood Pressure'\nlabel(sex) <- 'Sex'\nunits(cholesterol) <- 'mg/dl' # uses units.default in Hmisc\nunits(blood.pressure) <- 'mmHg'\n\n\n# Specify population model for log odds that Y=1\nL <- .4*(sex=='male') + .045*(age-50) +\n (log(cholesterol - 10)-5.2)*(-2*(sex=='female') + 2*(sex=='male'))\n# Simulate binary y to have Prob(y=1) = 1/[1+exp(-L)]\ny <- ifelse(runif(n) < plogis(L), 1, 0)\n\n\nddist <- datadist(age, blood.pressure, cholesterol, sex)\noptions(datadist='ddist')\n\n\nfit <- lrm(y ~ blood.pressure + sex * (age + rcs(cholesterol,4)))\n\n\ns <- summary(fit) # Estimate effects using default ranges\n # Gets odds ratio for age=3rd quartile\n # compared to 1st quartile\n## Not run: \n##D latex(s) # Use LaTeX to print nice version\n##D latex(s, file=\"\") # Just write LaTeX code to console\n##D html(s) # html/LaTeX to console for knitr\n##D # Or:\n##D options(prType='latex')\n##D summary(fit) # prints with LaTeX, table.env=FALSE\n##D options(prType='html')\n##D summary(fit) # prints with html\n## End(Not run)\nsummary(fit, sex='male', age=60) # Specify ref. cell and adjustment val\nsummary(fit, age=c(50,70)) # Estimate effect of increasing age from\n # 50 to 70\ns <- summary(fit, age=c(50,60,70)) \n # Increase age from 50 to 70, adjust to\n # 60 when estimating effects of other factors\n#Could have omitted datadist if specified 3 values for all non-categorical\n#variables (1 value for categorical ones - adjustment level)\nplot(s, log=TRUE, at=c(.1,.5,1,1.5,2,4,8))\n\n\noptions(datadist=NULL)\n\n\n"} {"package":"rms","topic":"survest","snippet":"### Name: survest.cph\n### Title: Cox Survival Estimates\n### Aliases: survest survest.cph\n### Keywords: models survival regression\n\n### ** Examples\n\n# Simulate data from a population model in which the log hazard\n# function is linear in age and there is no age x sex interaction\n# Proportional hazards holds for both variables but we\n# unnecessarily stratify on sex to see what happens\nrequire(survival)\nn <- 1000\nset.seed(731)\nage <- 50 + 12*rnorm(n)\nlabel(age) <- \"Age\"\nsex <- factor(sample(c('Male','Female'), n, TRUE))\ncens <- 15*runif(n)\nh <- .02*exp(.04*(age-50)+.8*(sex=='Female'))\ndt <- -log(runif(n))/h\nlabel(dt) <- 'Follow-up Time'\ne <- ifelse(dt <= cens,1,0)\ndt <- pmin(dt, cens)\nunits(dt) <- \"Year\"\ndd <- datadist(age, sex)\noptions(datadist='dd')\nSrv <- Surv(dt,e)\n\n\nf <- cph(Srv ~ age*strat(sex), x=TRUE, y=TRUE) #or surv=T\nsurvest(f, expand.grid(age=c(20,40,60),sex=c(\"Male\",\"Female\")),\n\t times=c(2,4,6), conf.int=.9)\nf <- update(f, surv=TRUE)\nlp <- c(0, .5, 1)\nf$strata # check strata names\nattr(lp,'strata') <- rep(1,3) # or rep('sex=Female',3)\nsurvest(f, linear.predictors=lp, times=c(2,4,6))\n\n# Test survest by comparing to survfit.coxph for a more complex model\nf <- cph(Srv ~ pol(age,2)*strat(sex), x=TRUE, y=TRUE)\nsurvest(f, data.frame(age=median(age), sex=levels(sex)), times=6)\n\nage2 <- age^2\nf2 <- coxph(Srv ~ (age + age2)*strata(sex))\nnew <- data.frame(age=median(age), age2=median(age)^2, sex='Male')\nsummary(survfit(f2, new), times=6)\nnew$sex <- 'Female'\nsummary(survfit(f2, new), times=6)\n\noptions(datadist=NULL)\n\n\n"} {"package":"rms","topic":"survest.psm","snippet":"### Name: survest.psm\n### Title: Parametric Survival Estimates\n### Aliases: survest.psm print.survest.psm\n### Keywords: survival regression models\n\n### ** Examples\n\n# Simulate data from a proportional hazards population model\nrequire(survival)\nn <- 1000\nset.seed(731)\nage <- 50 + 12*rnorm(n)\nlabel(age) <- \"Age\"\ncens <- 15*runif(n)\nh <- .02*exp(.04*(age-50))\ndt <- -log(runif(n))/h\nlabel(dt) <- 'Follow-up Time'\ne <- ifelse(dt <= cens,1,0)\ndt <- pmin(dt, cens)\nunits(dt) <- \"Year\"\nS <- Surv(dt,e)\n\nf <- psm(S ~ lsp(age,c(40,70)))\nsurvest(f, data.frame(age=seq(20,80,by=5)), times=2)\n\n#Get predicted survival curve for 40 year old\nsurvest(f, data.frame(age=40))\n\n#Get hazard function for 40 year old\nsurvest(f, data.frame(age=40), what=\"hazard\")$surv #still called surv\n\n\n"} {"package":"rms","topic":"survplot","snippet":"### Name: survplot\n### Title: Plot Survival Curves and Hazard Functions\n### Aliases: survplot survplotp survplot.rms survplot.npsurv\n### survplotp.npsurv survdiffplot\n### Keywords: survival hplot nonparametric models\n\n### ** Examples\n\n# Simulate data from a population model in which the log hazard\n# function is linear in age and there is no age x sex interaction\nrequire(survival)\nn <- 1000\nset.seed(731)\nage <- 50 + 12*rnorm(n)\nlabel(age) <- \"Age\"\nsex <- factor(sample(c('male','female'), n, TRUE))\ncens <- 15*runif(n)\nh <- .02*exp(.04*(age-50)+.8*(sex=='female'))\ndt <- -log(runif(n))/h\nlabel(dt) <- 'Follow-up Time'\ne <- ifelse(dt <= cens,1,0)\ndt <- pmin(dt, cens)\nunits(dt) <- \"Year\"\ndd <- datadist(age, sex)\noptions(datadist='dd')\nS <- Surv(dt,e)\n\n# When age is in the model by itself and we predict at the mean age,\n# approximate confidence intervals are ok\n\nf <- cph(S ~ age, surv=TRUE)\nsurvplot(f, age=mean(age), conf.int=.95)\ng <- cph(S ~ age, x=TRUE, y=TRUE)\nsurvplot(g, age=mean(age), conf.int=.95, add=TRUE, col='red', conf='bars')\n\n# Repeat for an age far from the mean; not ok\nsurvplot(f, age=75, conf.int=.95)\nsurvplot(g, age=75, conf.int=.95, add=TRUE, col='red', conf='bars')\n\n\n#Plot stratified survival curves by sex, adj for quadratic age effect\n# with age x sex interaction (2 d.f. interaction)\n\nf <- cph(S ~ pol(age,2)*strat(sex), x=TRUE, y=TRUE)\n#or f <- psm(S ~ pol(age,2)*sex)\nPredict(f, sex, age=c(30,50,70))\nsurvplot(f, sex, n.risk=TRUE, levels.only=TRUE) #Adjust age to median\nsurvplot(f, sex, logt=TRUE, loglog=TRUE) #Check for Weibull-ness (linearity)\nsurvplot(f, sex=c(\"male\",\"female\"), age=50)\n #Would have worked without datadist\n #or with an incomplete datadist\nsurvplot(f, sex, label.curves=list(keys=c(2,0), point.inc=2))\n #Identify curves with symbols\n\n\nsurvplot(f, sex, label.curves=list(keys=c('m','f')))\n #Identify curves with single letters\n\n\n#Plots by quintiles of age, adjusting sex to male\noptions(digits=3)\nsurvplot(f, age=quantile(age,(1:4)/5), sex=\"male\")\n\n\n#Plot survival Kaplan-Meier survival estimates for males\nf <- npsurv(S ~ 1, subset=sex==\"male\")\nsurvplot(f)\n\n\n#Plot survival for both sexes and show exponential hazard estimates\nf <- npsurv(S ~ sex)\nsurvplot(f, aehaz=TRUE)\n#Check for log-normal and log-logistic fits\nsurvplot(f, fun=qnorm, ylab=\"Inverse Normal Transform\")\nsurvplot(f, fun=function(y)log(y/(1-y)), ylab=\"Logit S(t)\")\n\n#Plot the difference between sexes\nsurvdiffplot(f)\n\n#Similar but show half-width of confidence intervals centered\n#at average of two survival estimates\n#See Boers (2004)\nsurvplot(f, conf='diffbands')\n\noptions(datadist=NULL)\n## Not run: \n##D #\n##D # Time to progression/death for patients with monoclonal gammopathy\n##D # Competing risk curves (cumulative incidence)\n##D # status variable must be a factor with first level denoting right censoring\n##D m <- upData(mgus1, stop = stop / 365.25, units=c(stop='years'),\n##D labels=c(stop='Follow-up Time'), subset=start == 0)\n##D f <- npsurv(Surv(stop, event) ~ 1, data=m)\n##D \n##D # Use survplot for enhanced displays of cumulative incidence curves for\n##D # competing risks\n##D \n##D survplot(f, state='pcm', n.risk=TRUE, xlim=c(0, 20), ylim=c(0, .5), col=2)\n##D survplot(f, state='death', aehaz=TRUE, col=3,\n##D label.curves=list(keys='lines'))\n##D f <- npsurv(Surv(stop, event) ~ sex, data=m)\n##D survplot(f, state='death', aehaz=TRUE, n.risk=TRUE, conf='diffbands',\n##D label.curves=list(keys='lines'))\n## End(Not run)\n\n\n"} {"package":"rms","topic":"val.prob","snippet":"### Name: val.prob\n### Title: Validate Predicted Probabilities\n### Aliases: val.prob print.val.prob plot.val.prob\n### Keywords: models regression htest smooth\n\n### ** Examples\n\n# Fit logistic model on 100 observations simulated from the actual \n# model given by Prob(Y=1 given X1, X2, X3) = 1/(1+exp[-(-1 + 2X1)]),\n# where X1 is a random uniform [0,1] variable. Hence X2 and X3 are \n# irrelevant. After fitting a linear additive model in X1, X2,\n# and X3, the coefficients are used to predict Prob(Y=1) on a\n# separate sample of 100 observations. Note that data splitting is\n# an inefficient validation method unless n > 20,000.\n\n\nset.seed(1)\nn <- 200\nx1 <- runif(n)\nx2 <- runif(n)\nx3 <- runif(n)\nlogit <- 2*(x1-.5)\nP <- 1/(1+exp(-logit))\ny <- ifelse(runif(n)<=P, 1, 0)\nd <- data.frame(x1,x2,x3,y)\nf <- lrm(y ~ x1 + x2 + x3, subset=1:100)\npred.logit <- predict(f, d[101:200,])\nphat <- 1/(1+exp(-pred.logit))\nval.prob(phat, y[101:200], m=20, cex=.5) # subgroups of 20 obs.\n\n\n# Validate predictions more stringently by stratifying on whether\n# x1 is above or below the median\n\n\nv <- val.prob(phat, y[101:200], group=x1[101:200], g.group=2)\nv\nplot(v)\nplot(v, flag=function(stats) ifelse(\n stats[,'ChiSq2'] > qchisq(.95,2) |\n stats[,'B ChiSq'] > qchisq(.95,1), '*', ' ') )\n# Stars rows of statistics in plot corresponding to significant\n# mis-calibration at the 0.05 level instead of the default, 0.01\n\n\nplot(val.prob(phat, y[101:200], group=x1[101:200], g.group=2), \n col=1:3) # 3 colors (1 for overall)\n\n\n# Weighted calibration curves\n# plot(val.prob(pred, y, group=age, weights=freqs))\n\n\n"} {"package":"rms","topic":"val.surv","snippet":"### Name: val.surv\n### Title: Validate Predicted Probabilities Against Observed Survival Times\n### Aliases: val.surv plot.val.surv plot.val.survh print.val.survh\n### Keywords: models regression smooth survival\n\n### ** Examples\n\n# Generate failure times from an exponential distribution\nrequire(survival)\nset.seed(123) # so can reproduce results\nn <- 1000\nage <- 50 + 12*rnorm(n)\nsex <- factor(sample(c('Male','Female'), n, rep=TRUE, prob=c(.6, .4)))\ncens <- 15*runif(n)\nh <- .02*exp(.04*(age-50)+.8*(sex=='Female'))\nt <- -log(runif(n))/h\nunits(t) <- 'Year'\nlabel(t) <- 'Time to Event'\nev <- ifelse(t <= cens, 1, 0)\nt <- pmin(t, cens)\nS <- Surv(t, ev)\n\n# First validate true model used to generate data\n\n# If hare is available, make a smooth calibration plot for 1-year\n# survival probability where we predict 1-year survival using the\n# known true population survival probability\n# In addition, use groupkm to show that grouping predictions into\n# intervals and computing Kaplan-Meier estimates is not as accurate.\n\nif(requireNamespace('polspline')) {\n s1 <- exp(-h*1)\n w <- val.surv(est.surv=s1, S=S, u=1,\n fun=function(p)log(-log(p)))\n plot(w, lim=c(.85,1), scat1d.opts=list(nhistSpike=200, side=1))\n groupkm(s1, S, m=100, u=1, pl=TRUE, add=TRUE)\n}\n\n# Now validate the true model using residuals\n\nw <- val.surv(est.surv=exp(-h*t), S=S)\nplot(w)\nplot(w, group=sex) # stratify by sex\n\n\n# Now fit an exponential model and validate\n# Note this is not really a validation as we're using the\n# training data here\nf <- psm(S ~ age + sex, dist='exponential', y=TRUE)\nw <- val.surv(f)\nplot(w, group=sex)\n\n\n# We know the censoring time on every subject, so we can\n# compare the predicted Pr[T <= observed T | T>c, X] to\n# its expectation 0.5 Pr[T <= C | X] where C = censoring time\n# We plot a ratio that should equal one\nw <- val.surv(f, censor=cens)\nplot(w)\nplot(w, group=age, g=3) # stratify by tertile of age\n\n\n"} {"package":"rms","topic":"validate","snippet":"### Name: validate\n### Title: Resampling Validation of a Fitted Model's Indexes of Fit\n### Aliases: validate print.validate latex.validate html.validate\n### Keywords: models regression methods survival\n\n### ** Examples\n\n# See examples for validate.cph, validate.lrm, validate.ols\n# Example of validating a parametric survival model:\n\nrequire(survival)\nn <- 1000\nset.seed(731)\nage <- 50 + 12*rnorm(n)\nlabel(age) <- \"Age\"\nsex <- factor(sample(c('Male','Female'), n, TRUE))\ncens <- 15*runif(n)\nh <- .02*exp(.04*(age-50)+.8*(sex=='Female'))\ndt <- -log(runif(n))/h\ne <- ifelse(dt <= cens,1,0)\ndt <- pmin(dt, cens)\nunits(dt) <- \"Year\"\nS <- Surv(dt,e)\n\n\nf <- psm(S ~ age*sex, x=TRUE, y=TRUE) # Weibull model\n# Validate full model fit\nvalidate(f, B=10) # usually B=150\n\n\n# Validate stepwise model with typical (not so good) stopping rule\n# bw=TRUE does not preserve hierarchy of terms at present\nvalidate(f, B=10, bw=TRUE, rule=\"p\", sls=.1, type=\"individual\")\n\n\n"} {"package":"rms","topic":"validate.Rq","snippet":"### Name: validate.Rq\n### Title: Validation of a Quantile Regression Model\n### Aliases: validate.Rq\n### Keywords: models regression\n\n### ** Examples\n\nset.seed(1)\nx1 <- runif(200)\nx2 <- sample(0:3, 200, TRUE)\nx3 <- rnorm(200)\ndistance <- (x1 + x2/3 + rnorm(200))^2\n\nf <- Rq(sqrt(distance) ~ rcs(x1,4) + scored(x2) + x3, x=TRUE, y=TRUE)\n\n#Validate full model fit (from all observations) but for x1 < .75\nvalidate(f, B=20, subset=x1 < .75) # normally B=300\n\n#Validate stepwise model with typical (not so good) stopping rule\nvalidate(f, B=20, bw=TRUE, rule=\"p\", sls=.1, type=\"individual\")\n\n\n"} {"package":"rms","topic":"validate.cph","snippet":"### Name: validate.cph\n### Title: Validation of a Fitted Cox or Parametric Survival Model's\n### Indexes of Fit\n### Aliases: validate.cph validate.psm dxy.cens\n### Keywords: models regression survival\n\n### ** Examples\n\nrequire(survival)\nn <- 1000\nset.seed(731)\nage <- 50 + 12*rnorm(n)\nlabel(age) <- \"Age\"\nsex <- factor(sample(c('Male','Female'), n, TRUE))\ncens <- 15*runif(n)\nh <- .02*exp(.04*(age-50)+.8*(sex=='Female'))\ndt <- -log(runif(n))/h\ne <- ifelse(dt <= cens,1,0)\ndt <- pmin(dt, cens)\nunits(dt) <- \"Year\"\nS <- Surv(dt,e)\n\nf <- cph(S ~ age*sex, x=TRUE, y=TRUE)\n# Validate full model fit\nvalidate(f, B=10) # normally B=150\n\n# Validate a model with stratification. Dxy is the only\n# discrimination measure for such models, by Dxy requires\n# one to choose a single time at which to predict S(t|X)\nf <- cph(S ~ rcs(age)*strat(sex), \n x=TRUE, y=TRUE, surv=TRUE, time.inc=2)\nvalidate(f, u=2, B=10) # normally B=150\n# Note u=time.inc\n\n\n"} {"package":"rms","topic":"validate.lrm","snippet":"### Name: validate.lrm\n### Title: Resampling Validation of a Logistic or Ordinal Regression Model\n### Aliases: validate.lrm validate.orm\n### Keywords: models regression\n\n### ** Examples\n\nn <- 1000 # define sample size\nage <- rnorm(n, 50, 10)\nblood.pressure <- rnorm(n, 120, 15)\ncholesterol <- rnorm(n, 200, 25)\nsex <- factor(sample(c('female','male'), n,TRUE))\n\n\n# Specify population model for log odds that Y=1\nL <- .4*(sex=='male') + .045*(age-50) +\n (log(cholesterol - 10)-5.2)*(-2*(sex=='female') + 2*(sex=='male'))\n# Simulate binary y to have Prob(y=1) = 1/[1+exp(-L)]\ny <- ifelse(runif(n) < plogis(L), 1, 0)\n\n\nf <- lrm(y ~ sex*rcs(cholesterol)+pol(age,2)+blood.pressure, x=TRUE, y=TRUE)\n#Validate full model fit\nvalidate(f, B=10) # normally B=300\nvalidate(f, B=10, group=y) \n# two-sample validation: make resamples have same numbers of\n# successes and failures as original sample\n\n\n#Validate stepwise model with typical (not so good) stopping rule\nvalidate(f, B=10, bw=TRUE, rule=\"p\", sls=.1, type=\"individual\")\n\n\n## Not run: \n##D #Fit a continuation ratio model and validate it for the predicted\n##D #probability that y=0\n##D u <- cr.setup(y)\n##D Y <- u$y\n##D cohort <- u$cohort\n##D attach(mydataframe[u$subs,])\n##D f <- lrm(Y ~ cohort+rcs(age,4)*sex, penalty=list(interaction=2))\n##D validate(f, cluster=u$subs, subset=cohort=='all') \n##D #see predab.resample for cluster and subset\n## End(Not run)\n\n\n"} {"package":"rms","topic":"validate.ols","snippet":"### Name: validate.ols\n### Title: Validation of an Ordinary Linear Model\n### Aliases: validate.ols\n### Keywords: models regression\n\n### ** Examples\n\nset.seed(1)\nx1 <- runif(200)\nx2 <- sample(0:3, 200, TRUE)\nx3 <- rnorm(200)\ndistance <- (x1 + x2/3 + rnorm(200))^2\n\nf <- ols(sqrt(distance) ~ rcs(x1,4) + scored(x2) + x3, x=TRUE, y=TRUE)\n\n#Validate full model fit (from all observations) but for x1 < .75\nvalidate(f, B=20, subset=x1 < .75) # normally B=300\n\n#Validate stepwise model with typical (not so good) stopping rule\nvalidate(f, B=20, bw=TRUE, rule=\"p\", sls=.1, type=\"individual\")\n\n\n"} {"package":"rms","topic":"validate.rpart","snippet":"### Name: validate.rpart\n### Title: Dxy and Mean Squared Error by Cross-validating a Tree Sequence\n### Aliases: validate.rpart print.validate.rpart plot.validate.rpart\n### Keywords: models tree category\n\n### ** Examples\n\n## Not run: \n##D n <- 100\n##D set.seed(1)\n##D x1 <- runif(n)\n##D x2 <- runif(n)\n##D x3 <- runif(n)\n##D y <- 1*(x1+x2+rnorm(n) > 1)\n##D table(y)\n##D require(rpart)\n##D f <- rpart(y ~ x1 + x2 + x3, model=TRUE)\n##D v <- validate(f)\n##D v # note the poor validation\n##D par(mfrow=c(1,2))\n##D plot(v, legendloc=c(.2,.5))\n##D par(mfrow=c(1,1))\n## End(Not run)\n\n\n"} {"package":"rms","topic":"vif","snippet":"### Name: vif\n### Title: Variance Inflation Factors\n### Aliases: vif\n### Keywords: models regression\n\n### ** Examples\n\nset.seed(1)\nx1 <- rnorm(100)\nx2 <- x1+.1*rnorm(100)\ny <- sample(0:1, 100, TRUE)\nf <- lrm(y ~ x1 + x2)\nvif(f)\n\n\n"} {"package":"rms","topic":"which.influence","snippet":"### Name: which.influence\n### Title: Which Observations are Influential\n### Aliases: which.influence show.influence\n### Keywords: models regression survival\n\n### ** Examples\n\n#print observations in data frame that are influential,\n#separately for each factor in the model\nx1 <- 1:20\nx2 <- abs(x1-10)\nx3 <- factor(rep(0:2,length.out=20))\ny <- c(rep(0:1,8),1,1,1,1)\nf <- lrm(y ~ rcs(x1,3) + x2 + x3, x=TRUE,y=TRUE)\nw <- which.influence(f, .55)\nnam <- names(w)\nd <- data.frame(x1,x2,x3,y)\nfor(i in 1:length(nam)) {\n print(paste(\"Influential observations for effect of \",nam[i]),quote=FALSE)\n print(d[w[[i]],])\n}\n\nshow.influence(w, d) # better way to show results\n\n\n"} {"package":"rms","topic":"rmsOverview","snippet":"### Name: rmsOverview\n### Title: Overview of rms Package\n### Aliases: rmsOverview rms.Overview\n### Keywords: models\n\n### ** Examples\n\n## To run several comprehensive examples, run the following command\n## Not run: \n##D demo(all, 'rms')\n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"getFolderPath","snippet":"### Name: getFolderPath\n### Title: Returns the folder path associated with a session\n### Aliases: getFolderPath\n### Keywords: file\n\n### ** Examples\n\n## Not run: \n##D \n##D # library(Rlabkey)\n##D \n##D lks<- getSession(\"https://www.labkey.org\", \"/home\")\n##D getFolderPath(lks) #returns \"/home\"\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"getLookups","snippet":"### Name: getLookups\n### Title: Get related data fields that are available to include in a query\n### on a given query object\n### Aliases: getLookups\n### Keywords: file\n\n### ** Examples\n\n## Not run: \n##D \n##D ## get fields from lookup tables and add to query\n##D # library(Rlabkey)\n##D \n##D s<- getSession(baseUrl=\"http://localhost:8080/labkey\",\n##D folderPath=\"/apisamples\")\n##D \n##D scobj <- getSchema(s, \"lists\")\n##D \n##D # can add fields from related queries\n##D lucols <- getLookups(s, scobj$AllTypes$Category)\n##D \n##D # keep going to other tables\n##D lucols2 <- getLookups(s, lucols[[\"Category/Group\"]])\n##D \n##D cols <- c(names(scobj$AllTypes)[2:6], names(lucols)[2:4])\n##D \n##D getRows(s, scobj$AllTypes, colSelect=paste(cols, sep=\",\"))\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"getRows","snippet":"### Name: getRows\n### Title: Retrieve data from LabKey Server\n### Aliases: getRows\n### Keywords: file\n\n### ** Examples\n\n## Not run: \n##D \n##D ## simple example of getting data using schema objects\n##D # library(Rlabkey)\n##D \n##D s<-getSession(baseUrl=\"http://localhost:8080/labkey\",\n##D folderPath=\"/apisamples\")\n##D s # shows schemas\n##D \n##D scobj <- getSchema(s, \"lists\")\n##D scobj # shows available queries\n##D \n##D scobj$AllTypes ## this is the query object\n##D \n##D getRows(s, scobj$AllTypes)\n##D \t\n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"getSchema","snippet":"### Name: getSchema\n### Title: Returns an object representing a LabKey schema\n### Aliases: getSchema\n### Keywords: file\n\n### ** Examples\n\n## Not run: \n##D \n##D ## the basics of using session, schema, and query objects\n##D # library(Rlabkey)\n##D \n##D s<- getSession(baseUrl=\"http://localhost:8080/labkey\",\n##D folderPath=\"/apisamples\")\n##D \n##D sch<- getSchema(s, \"lists\")\n##D \n##D # can walk down the populataed schema tree from schema node or query node\n##D sch$AllTypes$Category\n##D sch$AllTypes$Category$caption\n##D sch$AllTypes$Category$type\n##D \n##D # can add fields from related queries\n##D lucols <- getLookups(s, sch$AllTypes$Category)\n##D \n##D cols <- c(names(sch$AllTypes[2:6]), names(lucols)[2:4])\n##D \n##D getRows(s, sch$AllTypes, colSelect=cols)\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"getSession","snippet":"### Name: getSession\n### Title: Creates and returns a LabKey Server session\n### Aliases: getSession\n### Keywords: file\n\n### ** Examples\n\n## Not run: \n##D \n##D # library(Rlabkey)\n##D \n##D s <- getSession(\"https://www.labkey.org\", \"/home\")\n##D s #shows schemas\n##D \n##D ## using the curlOptions for generating debug tracesof network traffic\n##D d<- debugGatherer()\n##D copt <- curlOptions(debugfunction=d$update, verbose=TRUE,\n##D cookiefile='/cooks.txt')\n##D sdbg<- getSession(baseUrl=\"http://localhost:8080/labkey\",\n##D folderPath=\"/apisamples\", curlOptions=copt)\n##D getRows(sdbg, scobj$AllTypes)\n##D strwrap(d$value(), 100)\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.deleteRows","snippet":"### Name: labkey.deleteRows\n### Title: Delete rows of data from a LabKey database\n### Aliases: labkey.deleteRows\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D ## Insert, update and delete\n##D ## Note that users must have the necessary permissions in the LabKey Server\n##D ## to be able to modify data through the use of these functions\n##D # library(Rlabkey)\n##D \n##D newrow <- data.frame(\n##D \tDisplayFld=\"Inserted from R\"\n##D \t, TextFld=\"how its done\"\n##D \t, IntFld= 98 \n##D \t, DoubleFld = 12.345\n##D \t, DateTimeFld = \"03/01/2010\"\n##D \t, BooleanFld= FALSE\n##D \t, LongTextFld = \"Four score and seven years ago\"\n##D #\t, AttachmentFld = NA #attachment fields not supported \n##D \t, RequiredText = \"Veni, vidi, vici\"\n##D \t, RequiredInt = 0\n##D \t, Category = \"LOOKUP2\"\n##D \t, stringsAsFactors=FALSE)\n##D \n##D insertedRow <- labkey.insertRows(\"http://localhost:8080/labkey\",\n##D folderPath=\"/apisamples\", schemaName=\"lists\",\n##D queryName=\"AllTypes\", toInsert=newrow)\n##D newRowId <- insertedRow$rows[[1]]$RowId\n##D \n##D selectedRow<-labkey.selectRows(\"http://localhost:8080/labkey\",\n##D folderPath=\"/apisamples\", schemaName=\"lists\", queryName=\"AllTypes\",\n##D colFilter=makeFilter(c(\"RowId\", \"EQUALS\", newRowId)))\n##D selectedRow\n##D \n##D updaterow=data.frame(\n##D \tRowId=newRowId\n##D \t, DisplayFld=\"Updated from R\"\n##D \t, TextFld=\"how to update\"\n##D \t, IntFld= 777 \n##D \t, stringsAsFactors=FALSE)\n##D \n##D updatedRow <- labkey.updateRows(\"http://localhost:8080/labkey\",\n##D folderPath=\"/apisamples\", schemaName=\"lists\",\n##D queryName=\"AllTypes\", toUpdate=updaterow)\n##D selectedRow<-labkey.selectRows(\"http://localhost:8080/labkey\",\n##D folderPath=\"/apisamples\", schemaName=\"lists\", queryName=\"AllTypes\",\n##D colFilter=makeFilter(c(\"RowId\", \"EQUALS\", newRowId)))\n##D selectedRow\n##D \n##D deleterow <- data.frame(RowId=newRowId, stringsAsFactors=FALSE)\n##D result <- labkey.deleteRows(baseUrl=\"http://localhost:8080/labkey\",\n##D folderPath=\"/apisamples\", schemaName=\"lists\",\n##D queryName=\"AllTypes\", toDelete=deleterow)\n##D result\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.domain.FILTER_TYPES","snippet":"### Name: labkey.domain.FILTER_TYPES\n### Title: Provide comparator access\n### Aliases: labkey.domain.FILTER_TYPES\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D library(Rlabkey)\n##D \n##D qf <- labkey.domain.FILTER_TYPES\n##D \n##D # Example of available comparators\n##D comparator1 <- qf$EQUAL\n##D comparator2 <- qf$GREATER_THAN\n##D comparator3 <- qf$DATE_LESS_THAN_OR_EQUAL\n##D comparator4 <- qf$STARTS_WITH\n##D comparator5 <- qf$CONTAINS_ONE_OF\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.domain.create","snippet":"### Name: labkey.domain.create\n### Title: Create a new LabKey domain\n### Aliases: labkey.domain.create\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D ## create a data frame and infer it's fields, then create a domain design from it\n##D library(Rlabkey)\n##D \n##D df <- data.frame(ptid=c(1:3), age = c(10,20,30), sex = c(\"f\", \"m\", \"f\"))\n##D fields <- labkey.domain.inferFields(baseUrl=\"http://labkey/\", folderPath=\"home\", df=df)\n##D dd <- labkey.domain.createDesign(name=\"test list\", fields=fields)\n##D \n##D ## create a new list with an integer key field\n##D labkey.domain.create(baseUrl=\"http://labkey/\", folderPath=\"home\",\n##D domainKind=\"IntList\", domainDesign=dd, options=list(keyName = \"ptid\"))\n##D \n##D ## create a domain using a domain template\n##D labkey.domain.create(baseUrl=\"http://labkey/\", folderPath=\"home\",\n##D domainTemplate=\"Priority\", module=\"simpletest\", domainGroup=\"todolist\")\n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.domain.createAndLoad","snippet":"### Name: labkey.domain.createAndLoad\n### Title: Create a new LabKey domain and load data\n### Aliases: labkey.domain.createAndLoad\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D library(Rlabkey)\n##D \n##D ## Prepare a data.frame\n##D participants = c(\"0001\",\"0001\",\"0002\",\"0002\",\"0007\",\"0008\")\n##D Visit = c(\"V1\", \"V2\", \"V2\", \"V1\", \"V2\", \"V1\")\n##D IntValue = c(256:261)\n##D dataset = data.frame(\"ParticipantID\" = participants, Visit,\n##D \"IntegerValue\" = IntValue, check.names = FALSE)\n##D \n##D ## Create the dataset and import\n##D labkey.domain.createAndLoad(baseUrl=\"http://labkey\", folderPath=\"home\",\n##D name=\"demo dataset\", df=dataset, domainKind=\"StudyDatasetVisit\")\n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.domain.createConditionalFormat","snippet":"### Name: labkey.domain.createConditionalFormat\n### Title: Create a conditional format data frame\n### Aliases: labkey.domain.createConditionalFormat\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D library(Rlabkey)\n##D \n##D domain <- labkey.domain.get(baseUrl=\"http://labkey/\", folderPath=\"home\",\n##D schemaName=\"lists\", queryName=\"test list\")\n##D \n##D ## update the third field to use two conditional formats\n##D qf <- labkey.domain.FILTER_TYPES\n##D cf1 = labkey.domain.createConditionalFormat(labkey.domain.createConditionalFormatQueryFilter(qf$GT,\n##D 100), bold=TRUE, text_color=\"D33115\", background_color=\"333333\")\n##D cf2 = labkey.domain.createConditionalFormat(labkey.domain.createConditionalFormatQueryFilter(\n##D qf$LESS_THAN, 400), italic=TRUE, text_color=\"68BC00\")\n##D domain$fields$conditionalFormats[[3]] = rbind(cf1,cf2)\n##D \n##D labkey.domain.save(baseUrl=\"http://labkey/\", folderPath=\"home\",\n##D schemaName=\"lists\", queryName=\"test list\", domainDesign=domain)\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.domain.createConditionalFormatQueryFilter","snippet":"### Name: labkey.domain.createConditionalFormatQueryFilter\n### Title: Create a conditional format query filter\n### Aliases: labkey.domain.createConditionalFormatQueryFilter\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D library(Rlabkey)\n##D \n##D qf <- labkey.domain.FILTER_TYPES\n##D \n##D # Filters for values equal to 750\n##D qf1 <- labkey.domain.createConditionalFormatQueryFilter(qf$EQUAL, 750)\n##D # Filters for values greater than 500, but less than 1000\n##D qf2 <- labkey.domain.createConditionalFormatQueryFilter(qf$GREATER_THAN, 500, qf$LESS_THAN, 1000)\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.domain.createDesign","snippet":"### Name: labkey.domain.createDesign\n### Title: Helper function to create a domain design data structure\n### Aliases: labkey.domain.createDesign\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D ## create a data frame and infer it's fields, then create a domain design from it\n##D library(Rlabkey)\n##D \n##D df <- data.frame(ptid=c(1:3), age = c(10,20,30), sex = c(\"f\", \"m\", \"f\"))\n##D fields <- labkey.domain.inferFields(baseUrl=\"http://labkey/\", folderPath=\"home\", df=df)\n##D indices = labkey.domain.createIndices(list(\"ptid\", \"age\"), TRUE)\n##D indices = labkey.domain.createIndices(list(\"age\"), FALSE, indices)\n##D dd <- labkey.domain.createDesign(name=\"test list\", fields=fields, indices=indices)\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.domain.createIndices","snippet":"### Name: labkey.domain.createIndices\n### Title: Helper function to create a domain design indices list\n### Aliases: labkey.domain.createIndices\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D ## create a list of indices definitions to use for a domain design\n##D library(Rlabkey)\n##D \n##D indices = labkey.domain.createIndices(list(\"intKey\", \"customInt\"), TRUE)\n##D indices = labkey.domain.createIndices(list(\"customInt\"), FALSE, indices)\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.domain.drop","snippet":"### Name: labkey.domain.drop\n### Title: Delete a LabKey domain\n### Aliases: labkey.domain.drop\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D ## delete an existing domain\n##D library(Rlabkey)\n##D \n##D labkey.domain.drop(baseUrl=\"http://labkey/\", folderPath=\"home\",\n##D schemaName=\"lists\", queryName=\"test list\")\n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.domain.get","snippet":"### Name: labkey.domain.get\n### Title: Returns the metadata for an existing LabKey domain\n### Aliases: labkey.domain.get\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D ## retrieve an existing domain\n##D library(Rlabkey)\n##D \n##D labkey.domain.get(baseUrl=\"http://labkey/\", folderPath=\"home\",\n##D schemaName=\"lists\", queryName=\"test list\")\n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.domain.inferFields","snippet":"### Name: labkey.domain.inferFields\n### Title: Infer field metadata from a data frame\n### Aliases: labkey.domain.inferFields\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D ## create a data frame and infer it's fields\n##D library(Rlabkey)\n##D \n##D df <- data.frame(ptid=c(1:3), age = c(10,20,30), sex = c(\"f\", \"m\", \"f\"))\n##D fields <- labkey.domain.inferFields(baseUrl=\"http://labkey/\", folderPath=\"home\", df=df)\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.domain.save","snippet":"### Name: labkey.domain.save\n### Title: Updates an existing LabKey domain\n### Aliases: labkey.domain.save\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D library(Rlabkey)\n##D ## change the type of one of the columns\n##D domain <- labkey.domain.get(baseUrl=\"http://labkey/\", folderPath=\"home\",\n##D schemaName=\"lists\", queryName=\"test list\")\n##D \n##D domain$fields[3,]$rangeURI = \"xsd:string\"\n##D domain$fields[3,]$name = \"changed to string\"\n##D \n##D labkey.domain.save(baseUrl=\"http://labkey/\", folderPath=\"home\",\n##D schemaName=\"lists\", queryName=\"test list\", domainDesign=domain)\n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.executeSql","snippet":"### Name: labkey.executeSql\n### Title: Retrieve data from a LabKey Server using SQL commands\n### Aliases: labkey.executeSql\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D ## Example of an expicit join and use of group by and aggregates\n##D # library(Rlabkey)\n##D \n##D sql<- \"SELECT AllTypesCategories.Category AS Category, \n##D SUM(AllTypes.IntFld) AS SumOfIntFld,\n##D AVG(AllTypes.DoubleFld) AS AvgOfDoubleFld\n##D FROM AllTypes LEFT JOIN AllTypesCategories\n##D ON (AllTypes.Category = AllTypesCategories.TextKey)\n##D WHERE AllTypes.Category IS NOT NULL\n##D GROUP BY AllTypesCategories.Category\"\n##D \n##D sqlResults <- labkey.executeSql(\n##D baseUrl=\"http://localhost:8080/labkey\",\n##D folderPath=\"/apisamples\",\n##D schemaName=\"lists\",\n##D sql = sql)\n##D \n##D sqlResults\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.experiment.createData","snippet":"### Name: labkey.experiment.createData\n### Title: Create an experiment data object\n### Aliases: labkey.experiment.createData\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D library(Rlabkey)\n##D \n##D ## create a non-assay backed run with data classes as data inputs and outputs\n##D \n##D d1 <- labkey.experiment.createData(\n##D list(name = \"dc-01\"), dataClassId = 400)\n##D d2 <- labkey.experiment.createData(\n##D list(name = \"dc-02\"), dataClassId = 402)\n##D run <- labkey.experiment.createRun(\n##D list(name=\"new run\"), dataInputs = d1, dataOutputs = d2)\n##D labkey.experiment.saveBatch(baseUrl=\"http://labkey/\", folderPath=\"home\",\n##D protocolName=labkey.experiment.SAMPLE_DERIVATION_PROTOCOL, runList=run)\n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.experiment.createMaterial","snippet":"### Name: labkey.experiment.createMaterial\n### Title: Create an experiment material object\n### Aliases: labkey.experiment.createMaterial\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D library(Rlabkey)\n##D \n##D ## create a non-assay backed run with samples as material inputs and outputs\n##D \n##D m1 <- labkey.experiment.createMaterial(\n##D list(name = \"87444063.2604.626\"), sampleSetName = \"Study Specimens\")\n##D m2 <- labkey.experiment.createMaterial(\n##D list(name = \"87444063.2604.625\"), sampleSetName = \"Study Specimens\")\n##D run <- labkey.experiment.createRun(\n##D list(name=\"new run\"), materialInputs = m1, materialOutputs = m2)\n##D labkey.experiment.saveBatch(baseUrl=\"http://labkey/\", folderPath=\"home\",\n##D protocolName=labkey.experiment.SAMPLE_DERIVATION_PROTOCOL, runList=run)\n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.experiment.createRun","snippet":"### Name: labkey.experiment.createRun\n### Title: Create an experiment run object\n### Aliases: labkey.experiment.createRun\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D library(Rlabkey)\n##D \n##D ## create a non-assay backed run with samples as material inputs and outputs\n##D \n##D m1 <- labkey.experiment.createMaterial(\n##D list(name = \"87444063.2604.626\"), sampleSetName = \"Study Specimens\")\n##D m2 <- labkey.experiment.createMaterial(\n##D list(name = \"87444063.2604.625\"), sampleSetName = \"Study Specimens\")\n##D run <- labkey.experiment.createRun(\n##D list(name=\"new run\"), materialInputs = m1, materialOutputs = m2)\n##D labkey.experiment.saveBatch(baseUrl=\"http://labkey/\", folderPath=\"home\",\n##D protocolName=labkey.experiment.SAMPLE_DERIVATION_PROTOCOL, runList=run)\n##D \n##D ## import an assay run which includes plate metadata\n##D \n##D df <- data.frame(participantId=c(1:3), visitId = c(10,20,30), welllocation = c(\"A1\", \"D11\", \"F12\"))\n##D \n##D runConfig <- fromJSON(txt='{\"assayId\": 310,\n##D \"name\" : \"api imported run with plate metadata\",\n##D \"properties\" : {\n##D \"PlateTemplate\" : \"urn:lsid:labkey.com:PlateTemplate.Folder-6:d8bbec7d-34cd-1038-bd67-b3bd\"\n##D }\n##D }')\n##D \n##D plateMetadata <- fromJSON(txt='{\n##D \"control\" : {\n##D \"positive\" : {\"dilution\": 0.005},\n##D \"negative\" : {\"dilution\": 1.0}\n##D },\n##D \"sample\" : {\n##D \"SA01\" : {\"dilution\": 1.0, \"ID\" : 111, \"Barcode\" : \"BC_111\", \"Concentration\" : 0.0125},\n##D \"SA02\" : {\"dilution\": 2.0, \"ID\" : 222, \"Barcode\" : \"BC_222\"},\n##D \"SA03\" : {\"dilution\": 3.0, \"ID\" : 333, \"Barcode\" : \"BC_333\"},\n##D \"SA04\" : {\"dilution\": 4.0, \"ID\" : 444, \"Barcode\" : \"BC_444\"}\n##D }\n##D }')\n##D \n##D run <- labkey.experiment.createRun(runConfig, dataRows = df, plateMetadata = plateMetadata)\n##D labkey.experiment.saveBatch(\n##D baseUrl=\"http://labkey/\", folderPath=\"home\",\n##D assayConfig=list(assayId = 310), runList=run\n##D )\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.experiment.saveBatch","snippet":"### Name: labkey.experiment.saveBatch\n### Title: Saves a modified experiment batch\n### Aliases: labkey.experiment.saveBatch\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D library(Rlabkey)\n##D \n##D ## uploads data to an existing assay\n##D \n##D df <- data.frame(participantId=c(1:3), visitId = c(10,20,30), sex = c(\"f\", \"m\", \"f\"))\n##D bprops <- list(LabNotes=\"this is a simple demo\")\n##D bpl <- list(name=paste(\"Batch \", as.character(date())),properties=bprops)\n##D run <- labkey.experiment.createRun(list(name=\"new assay run\"), dataRows = df)\n##D labkey.experiment.saveBatch(baseUrl=\"http://labkey/\", folderPath=\"home\",\n##D assayConfig=list(assayName=\"GPAT\", providerName=\"General\"),\n##D batchPropertyList=bpl, runList=run)\n##D \n##D ## create a non-assay backed run with samples as material inputs and outputs\n##D \n##D m1 <- labkey.experiment.createMaterial(\n##D list(name = \"87444063.2604.626\"), sampleSetName = \"Study Specimens\")\n##D m2 <- labkey.experiment.createMaterial(\n##D list(name = \"87444063.2604.625\"), sampleSetName = \"Study Specimens\")\n##D run <- labkey.experiment.createRun(\n##D list(name=\"new run\"), materialInputs = m1, materialOutputs = m2)\n##D labkey.experiment.saveBatch(baseUrl=\"http://labkey/\", folderPath=\"home\",\n##D protocolName=labkey.experiment.SAMPLE_DERIVATION_PROTOCOL, runList=run)\n##D \n##D ## import an assay run which includes plate metadata\n##D \n##D df <- data.frame(participantId=c(1:3), visitId = c(10,20,30), welllocation = c(\"A1\", \"D11\", \"F12\"))\n##D \n##D runConfig <- fromJSON(txt='{\"assayId\": 310,\n##D \"name\" : \"api imported run with plate metadata\",\n##D \"properties\" : {\n##D \"PlateTemplate\" : \"urn:lsid:labkey.com:PlateTemplate.Folder-6:d8bbec7d-34cd-1038-bd67-b3bd\"\n##D }\n##D }')\n##D \n##D plateMetadata <- fromJSON(txt='{\n##D \"control\" : {\n##D \"positive\" : {\"dilution\": 0.005},\n##D \"negative\" : {\"dilution\": 1.0}\n##D },\n##D \"sample\" : {\n##D \"SA01\" : {\"dilution\": 1.0, \"ID\" : 111, \"Barcode\" : \"BC_111\", \"Concentration\" : 0.0125},\n##D \"SA02\" : {\"dilution\": 2.0, \"ID\" : 222, \"Barcode\" : \"BC_222\"},\n##D \"SA03\" : {\"dilution\": 3.0, \"ID\" : 333, \"Barcode\" : \"BC_333\"},\n##D \"SA04\" : {\"dilution\": 4.0, \"ID\" : 444, \"Barcode\" : \"BC_444\"}\n##D }\n##D }')\n##D \n##D run <- labkey.experiment.createRun(runConfig, dataRows = df, plateMetadata = plateMetadata)\n##D labkey.experiment.saveBatch(\n##D baseUrl=\"http://labkey/\", folderPath=\"home\",\n##D assayConfig=list(assayId = 310), runList=run\n##D )\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.experiment.saveRuns","snippet":"### Name: labkey.experiment.saveRuns\n### Title: Saves Runs.\n### Aliases: labkey.experiment.saveRuns\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D library(Rlabkey)\n##D \n##D ## example with materialInputs and materialOutputs\n##D \n##D m1 <- labkey.experiment.createMaterial(\n##D list(name = \"87444063.2604.626\"), sampleSetName = \"Study Specimens\")\n##D m2 <- labkey.experiment.createMaterial(\n##D list(name = \"87444063.2604.625\"), sampleSetName = \"Study Specimens\")\n##D run <- labkey.experiment.createRun(\n##D list(name=\"new run\"), materialInputs = m1, materialOutputs = m2)\n##D labkey.experiment.saveRuns(baseUrl=\"http://labkey/\", folderPath=\"home\",\n##D protocolName=labkey.experiment.SAMPLE_DERIVATION_PROTOCOL, runList=run)\n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.getBaseUrl","snippet":"### Name: labkey.getBaseUrl\n### Title: Get the default baseUrl parameter used for all http or https\n### requests\n### Aliases: labkey.getBaseUrl\n\n### ** Examples\n\n## Not run: \n##D ## Example of getting previously set baseUrl\n##D library(Rlabkey)\n##D labkey.setDefaults(apiKey=\"abcdef0123456789abcdef0123456789\",\n##D baseUrl=\"http://labkey/\")\n##D labkey.getBaseUrl()\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.getDefaultViewDetails","snippet":"### Name: labkey.getDefaultViewDetails\n### Title: Retrieve the fields of a LabKey query view\n### Aliases: labkey.getDefaultViewDetails\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D ## Details of fields of a default query view\n##D # library(Rlabkey)\n##D \n##D queryDF <- labkey.getDefaultViewDetails(\n##D \tbaseUrl=\"http://localhost:8080/labkey\",\n##D \tfolderPath=\"/apisamples\",\n##D \tschemaName=\"lists\",\n##D \tqueryName=\"AllTypes\")\n##D \t\n##D queryDF\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.getFolders","snippet":"### Name: labkey.getFolders\n### Title: Retrieve a list of folders accessible to the current user\n### Aliases: labkey.getFolders\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D ## List of folders \n##D # library(Rlabkey)\n##D \n##D folders <- labkey.getFolders(\"https://www.labkey.org\", \"/home\")\n##D folders\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.getLookupDetails","snippet":"### Name: labkey.getLookupDetails\n### Title: Retrieve detailed information on a LabKey query\n### Aliases: labkey.getLookupDetails\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D ## Details of fields of a query referenced by a lookup field\n##D # library(Rlabkey)\n##D \n##D lu1 <- labkey.getLookupDetails(\n##D \tbaseUrl=\"http://localhost:8080/labkey\",\n##D \tfolderPath=\"/apisamples\",\n##D \tschemaName=\"lists\",\n##D \tqueryName=\"AllTypes\",\n##D \tlookupKey=\"Category\"\n##D )\n##D lu1\n##D \n##D ## When a lookup field points to a query object that itself has a lookup\n##D ## field, use a compound fieldkey consisting of the lookup fields from\n##D ## the base query object to the target lookupDetails, separated by\n##D ## forward slashes\n##D lu2<- labkey.getLookupDetails(\n##D \tbaseUrl=\"http://localhost:8080/labkey\",\n##D \tfolderPath=\"/apisamples\",\n##D \tschemaName=\"lists\",\n##D \tqueryName=\"AllTypes\",\n##D \tlookupKey=\"Category/Group\"\n##D )\n##D lu2\n##D \n##D ## Now select a result set containing a field from the base query, a\n##D ## field from the 1st level of lookup, and one from the 2nd\n##D rows<- labkey.selectRows(\n##D \tbaseUrl=\"http://localhost:8080/labkey\",\n##D \tfolderPath=\"/apisamples\",\n##D \tschemaName=\"lists\",\n##D \tqueryName=\"AllTypes\",\n##D \tcolSelect=c(\"DisplayFld\",\"Category/Category\",\"Category/Group/GroupName\"), \n##D \tcolFilter = makeFilter(c(\"Category/Group/GroupName\",\n##D \t \"NOT_EQUALS\",\"TypeRange\")), maxRows=20\n##D )\n##D rows\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.getModuleProperty","snippet":"### Name: labkey.getModuleProperty\n### Title: Get effective module property value\n### Aliases: labkey.getModuleProperty\n\n### ** Examples\n\n## Not run: \n##D library(Rlabkey)\n##D labkey.getModuleProperty(baseUrl=\"http://labkey/\", folderPath=\"flowProject\",\n##D moduleName=\"flow\", propName=\"ExportToScriptFormat\")\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.getQueries","snippet":"### Name: labkey.getQueries\n### Title: Retrieve a list of available queries for a specified LabKey\n### schema\n### Aliases: labkey.getQueries\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D ## List of queries in a schema\n##D # library(Rlabkey)\n##D \n##D queriesDF <- labkey.getQueries(\n##D \tbaseUrl=\"https://www.labkey.org\",\n##D \tfolderPath=\"/home\",\n##D \tschemaName=\"lists\"\n##D )\n##D queriesDF\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.getQueryDetails","snippet":"### Name: labkey.getQueryDetails\n### Title: Retrieve detailed information on a LabKey query\n### Aliases: labkey.getQueryDetails\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D ## Details of fields of a query\n##D # library(Rlabkey)\n##D \n##D queryDF<-labkey.getQueryDetails(\n##D \tbaseUrl=\"http://localhost:8080/labkey\",\n##D \tfolderPath=\"/apisamples\",\n##D \tschemaName=\"lists\",\n##D \tqueryName=\"AllTypes\")\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.getQueryViews","snippet":"### Name: labkey.getQueryViews\n### Title: Retrieve a list of available named views defined on a query in a\n### schema\n### Aliases: labkey.getQueryViews\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D ## List of views defined for a query in a schema\n##D # library(Rlabkey)\n##D \n##D viewsDF <- labkey.getQueryViews(\n##D \tbaseUrl=\"http://localhost:8080/labkey\",\n##D \tfolderPath=\"/apisamples\",\n##D \tschemaName=\"lists\",\n##D \tqueryName=\"AllTypes\"\n##D )\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.getRequestOptions","snippet":"### Name: labkey.getRequestOptions\n### Title: Helper function to get the HTTP request options for a specific\n### method type.\n### Aliases: labkey.getRequestOptions\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D library(Rlabkey)\n##D labkey.getRequestOptions()\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.getSchemas","snippet":"### Name: labkey.getSchemas\n### Title: Retrieve a list of available schemas from a labkey database\n### Aliases: labkey.getSchemas\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D ## List of schemas\n##D # library(Rlabkey)\n##D \n##D schemasDF <- labkey.getSchemas(\n##D \tbaseUrl=\"http://localhost:8080/labkey\",\n##D \tfolderPath=\"/apisamples\"\n##D )\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.importRows","snippet":"### Name: labkey.importRows\n### Title: Import rows of data into a LabKey Server\n### Aliases: labkey.importRows\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D ## Note that users must have the necessary permissions in the database\n##D ## to be able to modify data through the use of these functions\n##D # library(Rlabkey)\n##D \n##D newrows <- data.frame(\n##D \tDisplayFld=\"Imported from R\"\n##D \t, RequiredText=\"abc\"\n##D \t, RequiredInt=1\n##D \t, stringsAsFactors=FALSE)\n##D newrows = newrows[rep(1:nrow(newrows),each=5),]\n##D \n##D importedInfo <- labkey.importRows(\"http://localhost:8080/labkey\",\n##D folderPath=\"/apisamples\", schemaName=\"lists\", queryName=\"AllTypes\",\n##D toImport=newrows)\n##D \n##D importedInfo$rowsAffected\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.insertRows","snippet":"### Name: labkey.insertRows\n### Title: Insert new rows of data into a LabKey Server\n### Aliases: labkey.insertRows\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D ## Insert, update and delete\n##D ## Note that users must have the necessary permissions in the database\n##D ## to be able to modify data through the use of these functions\n##D # library(Rlabkey)\n##D \n##D newrow <- data.frame(\n##D \tDisplayFld=\"Inserted from R\"\n##D \t, TextFld=\"how its done\"\n##D \t, IntFld= 98 \n##D \t, DoubleFld = 12.345\n##D \t, DateTimeFld = \"03/01/2010\"\n##D \t, BooleanFld= FALSE\n##D \t, LongTextFld = \"Four score and seven years ago\"\n##D #\t, AttachmentFld = NA #attachment fields not supported \n##D \t, RequiredText = \"Veni, vidi, vici\"\n##D \t, RequiredInt = 0\n##D \t, Category = \"LOOKUP2\"\n##D \t, stringsAsFactors=FALSE)\n##D \n##D insertedRow <- labkey.insertRows(\"http://localhost:8080/labkey\",\n##D folderPath=\"/apisamples\", schemaName=\"lists\", queryName=\"AllTypes\",\n##D toInsert=newrow, options=list(auditBehavior=\"DETAILED\",\n##D auditUserComment=\"testing audit comment for insert\"))\n##D newRowId <- insertedRow$rows[[1]]$RowId\n##D \n##D selectedRow<-labkey.selectRows(\"http://localhost:8080/labkey\",\n##D folderPath=\"/apisamples\", schemaName=\"lists\", queryName=\"AllTypes\",\n##D colFilter=makeFilter(c(\"RowId\", \"EQUALS\", newRowId)))\n##D updaterow=data.frame(\n##D \tRowId=newRowId\n##D \t, DisplayFld=\"Updated from R\"\n##D \t, TextFld=\"how to update\"\n##D \t, IntFld= 777 \n##D \t, stringsAsFactors=FALSE)\n##D \n##D updatedRow <- labkey.updateRows(\"http://localhost:8080/labkey\",\n##D folderPath=\"/apisamples\", schemaName=\"lists\", queryName=\"AllTypes\",\n##D toUpdate=updaterow, options=list(auditBehavior=\"DETAILED\",\n##D auditUserComment=\"testing audit comment for update\"))\n##D selectedRow<-labkey.selectRows(\"http://localhost:8080/labkey\",\n##D folderPath=\"/apisamples\", schemaName=\"lists\", queryName=\"AllTypes\",\n##D colFilter=makeFilter(c(\"RowId\", \"EQUALS\", newRowId)))\n##D \n##D deleterow <- data.frame(RowId=newRowId, stringsAsFactors=FALSE)\n##D result <- labkey.deleteRows(baseUrl=\"http://localhost:8080/labkey\",\n##D folderPath=\"/apisamples\", schemaName=\"lists\", queryName=\"AllTypes\",\n##D toDelete=deleterow)\n##D \n##D ## Example of creating a provenance run with an initial step with material inputs, a second step\n##D ## with provenance mapping to link existing samples with newly inserted samples, and a final step\n##D ## with a data output\n##D ##\n##D mi <- data.frame(lsid=c(\"urn:lsid:labkey.com:Sample.251.MySamples:sample1\",\n##D \"urn:lsid:labkey.com:Sample.251.MySamples:sample2\"))\n##D p <- labkey.provenance.createProvenanceParams(name=\"step1\", description=\"initial step\",\n##D materialInputs=mi)\n##D ra <- labkey.provenance.startRecording(baseUrl=\"https://labkey.org/labkey/\",\n##D folderPath = \"Provenance\", provenanceParams=p)\n##D \n##D rows <- fromJSON(txt='[{\n##D \"name\" : \"sample3\",\n##D \"protein\" : \"p3\",\n##D \"prov:objectInputs\" : [\n##D \"urn:lsid:labkey.com:Sample.251.MySamples:sample21\",\n##D \"urn:lsid:labkey.com:Sample.251.MySamples:sample22\"\n##D ]\n##D },{\n##D \"name\" : \"sample4\",\n##D \"protein\" : \"p4\",\n##D \"prov:objectInputs\" : [\n##D \"urn:lsid:labkey.com:Sample.251.MySamples:sample21\",\n##D \"urn:lsid:labkey.com:Sample.251.MySamples:sample22\"\n##D ]\n##D }\n##D ]')\n##D \n##D labkey.insertRows(baseUrl=\"https://labkey.org/labkey/\", folderPath = \"Provenance\",\n##D schemaName=\"samples\", queryName=\"MySamples\", toInsert=rows,\n##D provenanceParams=labkey.provenance.createProvenanceParams(name=\"query step\",\n##D recordingId=ra$recordingId))\n##D labkey.provenance.stopRecording(baseUrl=\"https://labkey.org/labkey/\", folderPath = \"Provenance\",\n##D provenanceParams=labkey.provenance.createProvenanceParams(name=\"final step\",\n##D recordingId=ra$recordingId, dataOutputs=do))\n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.makeRemotePath","snippet":"### Name: labkey.makeRemotePath\n### Title: Build a file path to data on a remote machine\n### Aliases: labkey.makeRemotePath\n### Keywords: file\n\n### ** Examples\n\n\n# library(Rlabkey)\n\nlabkey.pipeline.root <- \"c:/data/fcs\"\nlabkey.remote.pipeline.root <- \"/volumes/fcs\"\nfcsFile <- \"c:/data/fcs/runA/aaa.fcs\"\n\n# returns \"/volumes/fcs/runA/aaa.fcs\nlabkey.makeRemotePath(\n\tlocalRoot=labkey.pipeline.root,\n\tremoteRoot=labkey.remote.pipeline.root,\n\tfullPath=fcsFile);\n\n\n\n"} {"package":"Rlabkey","topic":"labkey.pipeline.getFileStatus","snippet":"### Name: labkey.pipeline.getFileStatus\n### Title: Gets the protocol file status for a pipeline\n### Aliases: labkey.pipeline.getFileStatus\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D labkey.pipeline.getFileStatus(\n##D baseUrl=\"http://labkey/\",\n##D folderPath=\"home\",\n##D taskId = \"pipelinetest:pipeline:r-copy\",\n##D path = \"r-copy\",\n##D protocolName = \"Test protocol name\",\n##D files = list(\"sample.txt\", \"result.txt\")\n##D )\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.pipeline.getPipelineContainer","snippet":"### Name: labkey.pipeline.getPipelineContainer\n### Title: Gets the container in which the pipeline is defined\n### Aliases: labkey.pipeline.getPipelineContainer\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D labkey.pipeline.getPipelineContainer(\n##D baseUrl=\"http://labkey/\",\n##D folderPath=\"home\"\n##D )\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.pipeline.getProtocols","snippet":"### Name: labkey.pipeline.getProtocols\n### Title: Gets the protocols that have been saved for a particular\n### pipeline\n### Aliases: labkey.pipeline.getProtocols\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D labkey.pipeline.getProtocols(\n##D baseUrl=\"http://labkey/\",\n##D folderPath=\"home\",\n##D taskId = \"pipelinetest:pipeline:r-copy\",\n##D path = \"r-copy\",\n##D includeWorkbooks = FALSE\n##D )\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.pipeline.startAnalysis","snippet":"### Name: labkey.pipeline.startAnalysis\n### Title: Start an analysis of a set of files using a pipeline\n### Aliases: labkey.pipeline.startAnalysis\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D labkey.pipeline.startAnalysis(\n##D baseUrl=\"http://labkey/\",\n##D folderPath=\"home\",\n##D taskId = \"pipelinetest:pipeline:r-copy\",\n##D protocolName = \"Test protocol name\",\n##D path=\"r-copy\",\n##D files = list(\"sample.txt\", \"result.txt\"),\n##D protocolDescription = \"Test protocol description\",\n##D pipelineDescription = \"test pipeline description\",\n##D jsonParameters = list(assay = \"Test assay name\", comment = \"Test assay comment\"),\n##D saveProtocol = TRUE\n##D )\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.provenance.addRecordingStep","snippet":"### Name: labkey.provenance.addRecordingStep\n### Title: Add a step to a provenance recording\n### Aliases: labkey.provenance.addRecordingStep\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D ## start a provenance recording and add a recording step\n##D library(Rlabkey)\n##D \n##D mi <- data.frame(lsid=c(\"urn:lsid:labkey.com:Sample.251.MySamples:sample1\",\n##D \"urn:lsid:labkey.com:Sample.251.MySamples:sample2\"))\n##D \n##D p <- labkey.provenance.createProvenanceParams(name=\"step1\", description=\"initial step\",\n##D materialInputs=mi)\n##D r <- labkey.provenance.startRecording(baseUrl=\"https://labkey.org/labkey/\",\n##D folderPath = \"Provenance\", provenanceParams=p)\n##D do <- data.frame(\n##D lsid=\"urn:lsid:labkey.com:AssayRunTSVData.Folder-251:12c70994-7ce5-1038-82f0-9c1487dbd334\")\n##D \n##D labkey.provenance.addRecordingStep(baseUrl=\"https://labkey.org/labkey/\", folderPath = \"Provenance\",\n##D provenanceParams=labkey.provenance.createProvenanceParams(name=\"additional step\",\n##D recordingId=r$recordingId, dataOutputs=do))\n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.provenance.createProvenanceParams","snippet":"### Name: labkey.provenance.createProvenanceParams\n### Title: Create provenance parameter object\n### Aliases: labkey.provenance.createProvenanceParams\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D ## create provenance params with material inputs and data outputs\n##D library(Rlabkey)\n##D \n##D mi <- data.frame(lsid=c(\"urn:lsid:labkey.com:Sample.251.MySamples:sample1\",\n##D \"urn:lsid:labkey.com:Sample.251.MySamples:sample2\"))\n##D do <- data.frame(\n##D lsid=\"urn:lsid:labkey.com:AssayRunTSVData.Folder-251:12c70994-7ce5-1038-82f0-9c1487dbd334\")\n##D \n##D p <- labkey.provenance.createProvenanceParams(name=\"step1\", description=\"initial step\",\n##D materialInputs=mi, dataOutputs=do)\n##D \n##D ## create provenance params with object inputs (from an assay run)\n##D oi <- labkey.selectRows(baseUrl=\"https://labkey.org/labkey/\", folderPath = \"Provenance\",\n##D schemaName=\"assay.General.titer\",\n##D queryName=\"Data\",\n##D colSelect= c(\"LSID\"),\n##D colFilter=makeFilter(c(\"Run/RowId\",\"EQUAL\",\"253\")))\n##D mi <- data.frame(lsid=c(\"urn:lsid:labkey.com:Sample.251.MySamples:sample1\",\n##D \"urn:lsid:labkey.com:Sample.251.MySamples:sample2\"))\n##D \n##D p <- labkey.provenance.createProvenanceParams(name=\"step1\", description=\"initial step\",\n##D objectInputs=oi[[\"LSID\"]], materialInputs=mi)\n##D \n##D ## add run step properties and custom properties to the provenance params\n##D props <- data.frame(\n##D \"urn:lsid:labkey.com:Vocabulary.Folder-996:ProvenanceDomain#version\"=c(22.3),\n##D \"urn:lsid:labkey.com:Vocabulary.Folder-996:ProvenanceDomain#instrumentName\"=c(\"NAb reader\"),\n##D check.names=FALSE)\n##D params <- list()\n##D params$comments <- \"adding additional step properties\"\n##D params$activityDate <- \"2022-3-21\"\n##D params$startTime <- \"2022-3-21 12:35:00\"\n##D params$endTime <- \"2022-3-22 02:15:30\"\n##D params$recordCount <- 2\n##D p <- labkey.provenance.createProvenanceParams(recordingId=ra$recordingId, name=\"step2\",\n##D properties=props, params=params)\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.provenance.startRecording","snippet":"### Name: labkey.provenance.startRecording\n### Title: Start a provenance recording\n### Aliases: labkey.provenance.startRecording\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D ## create provenance params with material inputs and data outputs and start a recording\n##D library(Rlabkey)\n##D \n##D mi <- data.frame(lsid=c(\"urn:lsid:labkey.com:Sample.251.MySamples:sample1\",\n##D \"urn:lsid:labkey.com:Sample.251.MySamples:sample2\"))\n##D do <- data.frame(\n##D lsid=\"urn:lsid:labkey.com:AssayRunTSVData.Folder-251:12c70994-7ce5-1038-82f0-9c1487dbd334\")\n##D \n##D p <- labkey.provenance.createProvenanceParams(name=\"step1\", description=\"initial step\",\n##D materialInputs=mi, dataOutputs=do)\n##D labkey.provenance.startRecording(baseUrl=\"https://labkey.org/labkey/\",\n##D folderPath = \"Provenance\", provenanceParams=p)\n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.provenance.stopRecording","snippet":"### Name: labkey.provenance.stopRecording\n### Title: Stop a provenance recording\n### Aliases: labkey.provenance.stopRecording\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D library(Rlabkey)\n##D \n##D ## object inputs (from an assay run) and material inputs\n##D ##\n##D oi <- labkey.selectRows(baseUrl=\"https://labkey.org/labkey/\", folderPath = \"Provenance\",\n##D schemaName=\"assay.General.titer\",\n##D queryName=\"Data\",\n##D colSelect= c(\"LSID\"),\n##D colFilter=makeFilter(c(\"Run/RowId\",\"EQUAL\",\"253\")))\n##D mi <- data.frame(lsid=c(\"urn:lsid:labkey.com:Sample.251.MySamples:sample1\",\n##D \"urn:lsid:labkey.com:Sample.251.MySamples:sample2\"))\n##D \n##D p <- labkey.provenance.createProvenanceParams(name=\"step1\", description=\"initial step\",\n##D objectInputs=oi[[\"LSID\"]], materialInputs=mi)\n##D r <- labkey.provenance.startRecording(baseUrl=\"https://labkey.org/labkey/\",\n##D folderPath = \"Provenance\", provenanceParams=p)\n##D run <- labkey.provenance.stopRecording(baseUrl=\"https://labkey.org/labkey/\",\n##D folderPath = \"Provenance\",\n##D provenanceParams=labkey.provenance.createProvenanceParams(name=\"final step\",\n##D recordingId=r$recordingId))\n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.query.import","snippet":"### Name: labkey.query.import\n### Title: Bulk import an R data frame into a LabKey Server table using\n### file import.\n### Aliases: labkey.query.import\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D ## Note that users must have the necessary permissions in the database\n##D ## to be able to modify data through the use of these functions\n##D # library(Rlabkey)\n##D \n##D df <- data.frame(\n##D name=c(\"test1\",\"test2\",\"test3\"),\n##D customInt=c(1:3),\n##D customString=c(\"aaa\", \"bbb\", \"ccc\")\n##D )\n##D \n##D importedInfo <- labkey.query.import(\n##D \"http://localhost:8080/labkey\",\n##D folderPath=\"/apisamples\", schemaName=\"samples\", queryName=\"SampleType1\",\n##D toImport=df, options=list(insertOption = \"MERGE\", auditBehavior = \"DETAILED\")\n##D )\n##D \n##D importedInfo$rowCount\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.rstudio.initRStudio","snippet":"### Name: labkey.rstudio.initRStudio\n### Title: Initialize a RStudio session for LabKey integration\n### Aliases: labkey.rstudio.initRStudio\n\n### ** Examples\n\n## Not run: \n##D ## RStudio console only\n##D library(Rlabkey)\n##D labkey.rstudio.initRStudio(apiKey=\"abcdef0123456789abcdef0123456789\",\n##D baseUrl=\"http://labkey/\", folderPath=\"home\")\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.rstudio.initReport","snippet":"### Name: labkey.rstudio.initReport\n### Title: Initialize a RStudio session for LabKey R report source editing\n### Aliases: labkey.rstudio.initReport\n\n### ** Examples\n\n## Not run: \n##D ## RStudio console only\n##D library(Rlabkey)\n##D labkey.rstudio.initReport(apiKey=\"abcdef0123456789abcdef0123456789\",\n##D baseUrl=\"http://labkey/\", folderPath=\"home\",\n##D reportEntityId=\"0123456a-789b-1000-abcd-01234567abcde\")\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.rstudio.initSession","snippet":"### Name: labkey.rstudio.initSession\n### Title: Initialize a RStudio session for LabKey integration using a time\n### one request id\n### Aliases: labkey.rstudio.initSession\n\n### ** Examples\n\n## Not run: \n##D ## RStudio console only\n##D library(Rlabkey)\n##D labkey.rstudio.initSession(requestId=\"a60228c8-9448-1036-a7c5-ab541dc15ee9\",\n##D baseUrl=\"http://labkey/\")\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.rstudio.isInitialized","snippet":"### Name: labkey.rstudio.isInitialized\n### Title: Check valid rlabkey session\n### Aliases: labkey.rstudio.isInitialized\n\n### ** Examples\n\n## Not run: \n##D ## RStudio console only\n##D library(Rlabkey)\n##D labkey.rstudio.isInitialized()\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.rstudio.saveReport","snippet":"### Name: labkey.rstudio.saveReport\n### Title: Update RStudio report source back to LabKey\n### Aliases: labkey.rstudio.saveReport\n\n### ** Examples\n\n## Not run: \n##D ## RStudio console only\n##D library(Rlabkey)\n##D labkey.rstudio.saveReport(folderPath=\"home\",\n##D reportEntityId=\"0123456a-789b-1000-abcd-01234567abcde\",\n##D reportFilename=\"knitrReport.Rhtml\", useWarning=TRUE)\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.saveBatch","snippet":"### Name: labkey.saveBatch\n### Title: Save an assay batch object to a labkey database\n### Aliases: labkey.saveBatch\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D ## Very simple example of an analysis flow: query some data, calculate\n##D ## some stats, then save the calculations as an assay result set in\n##D ## LabKey Server\n##D ## Note this example expects to find an assay named \"SimpleMeans\" in\n##D ## the apisamples project\n##D # library(Rlabkey)\n##D \n##D simpledf <- labkey.selectRows(\n##D \tbaseUrl=\"http://localhost:8080/labkey\",\n##D \tfolderPath=\"/apisamples\",\n##D \tschemaName=\"lists\", \n##D \tqueryName=\"AllTypes\")\n##D \n##D ## some dummy calculations to produce and example analysis result\n##D testtable <- simpledf[,3:4]\n##D colnames(testtable) <- c(\"IntFld\", \"DoubleFld\")\n##D row <- c(list(\"Measure\"=\"colMeans\"), colMeans(testtable, na.rm=TRUE))\n##D results <- data.frame(row, row.names=NULL, stringsAsFactors=FALSE)\n##D row <- c(list(\"Measure\"=\"colSums\"), colSums(testtable, na.rm=TRUE))\n##D results <- rbind(results, as.vector(row))\n##D \n##D bprops <- list(LabNotes=\"this is a simple demo\")\n##D bpl <- list(name=paste(\"Batch \", as.character(date())),properties=bprops) \n##D rpl <- list(name=paste(\"Assay Run \", as.character(date())))\n##D \n##D assayInfo<- labkey.saveBatch(\n##D \tbaseUrl=\"http://localhost:8080/labkey\",\n##D \tfolderPath=\"/apisamples\",\n##D \t\"SimpleMeans\", \n##D \tresults, \n##D \tbatchPropertyList=bpl,\n##D \trunPropertyList=rpl\n##D )\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.security.createContainer","snippet":"### Name: labkey.security.createContainer\n### Title: Creates a new container, which may be a project, folder, or\n### workbook, on the server\n### Aliases: labkey.security.createContainer\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D library(Rlabkey)\n##D \n##D labkey.security.createContainer(baseUrl=\"http://labkey/\", parentPath = \"/home\",\n##D name = \"NewFolder\", description = \"My new folder has this description\",\n##D folderType = \"Collaboration\"\n##D )\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.security.deleteContainer","snippet":"### Name: labkey.security.deleteContainer\n### Title: Deletes an existing container, which may be a project, folder,\n### or workbook\n### Aliases: labkey.security.deleteContainer\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D library(Rlabkey)\n##D \n##D labkey.security.deleteContainer(baseUrl=\"http://labkey/\", folderPath = \"/home/FolderToDelete\")\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.security.getContainers","snippet":"### Name: labkey.security.getContainers\n### Title: Returns information about the specified container\n### Aliases: labkey.security.getContainers\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D library(Rlabkey)\n##D \n##D labkey.security.getContainers(\n##D baseUrl=\"http://labkey/\", folderPath = \"home\",\n##D includeEffectivePermissions = FALSE, includeSubfolders = TRUE, depth = 2,\n##D includeChildWorkbooks = FALSE, includeStandardProperties = FALSE\n##D )\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.security.impersonateUser","snippet":"### Name: labkey.security.impersonateUser\n### Title: Start impersonating a user\n### Aliases: labkey.security.impersonateUser\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D library(Rlabkey)\n##D \n##D labkey.security.impersonateUser(baseUrl=\"http://labkey/\", folderPath = \"/home\",\n##D email = \"reader@localhost.test\"\n##D )\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.security.moveContainer","snippet":"### Name: labkey.security.moveContainer\n### Title: Moves an existing container, which may be a folder or workbook\n### Aliases: labkey.security.moveContainer\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D library(Rlabkey)\n##D \n##D labkey.security.moveContainer(baseUrl=\"http://labkey/\", folderPath = \"/home/FolderToMove\",\n##D destinationParent = \"/OtherProject\", addAlias = TRUE\n##D )\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.security.renameContainer","snippet":"### Name: labkey.security.renameContainer\n### Title: Rename an existing container at the given container path\n### Aliases: labkey.security.renameContainer\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D library(Rlabkey)\n##D \n##D labkey.security.renameContainer(baseUrl=\"http://labkey/\", folderPath = \"/home/OriginalFolder\",\n##D name = \"NewFolderName\", title = \"New Folder Title\", addAlias = TRUE\n##D )\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.security.stopImpersonating","snippet":"### Name: labkey.security.stopImpersonating\n### Title: Stop impersonating a user\n### Aliases: labkey.security.stopImpersonating\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D library(Rlabkey)\n##D \n##D labkey.security.stopImpersonating(baseUrl=\"http://labkey/\")\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.selectRows","snippet":"### Name: labkey.selectRows\n### Title: Retrieve data from a labkey database\n### Aliases: labkey.selectRows\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D ## select from a list named AllTypes\n##D # library(Rlabkey)\n##D \n##D rows <- labkey.selectRows(\n##D \tbaseUrl=\"http://localhost:8080/labkey\",\n##D \tfolderPath=\"/apisamples\",\n##D \tschemaName=\"lists\", \n##D \tqueryName=\"AllTypes\")\n##D \t\n##D ## select from a view on that list\n##D viewrows <- labkey.selectRows(baseUrl=\"http://localhost:8080/labkey\",\n##D folderPath=\"/apisamples\", schemaName=\"Lists\", queryName=\"AllTypes\",\n##D viewName=\"rowbyrow\")\n##D \n##D ## select a subset of columns\n##D colSelect=c(\"TextFld\", \"IntFld\")\n##D subsetcols <- labkey.selectRows(baseUrl=\"http://localhost:8080/labkey\",\n##D folderPath=\"/apisamples\", schemaName=\"lists\", queryName=\"AllTypes\",\n##D colSelect=colSelect)\n##D \n##D ## including columns from a lookup (foreign key) field\n##D lookupcols <- labkey.selectRows(baseUrl=\"http://localhost:8080/labkey\",\n##D folderPath=\"/apisamples\", schemaName=\"lists\", queryName=\"AllTypes\",\n##D colSelect=\"TextFld,IntFld,IntFld/LookupValue\")\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.setDebugMode","snippet":"### Name: labkey.setDebugMode\n### Title: Helper function to enable/disable debug mode.\n### Aliases: labkey.setDebugMode\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D library(Rlabkey)\n##D labkey.setDebugMode(TRUE)\n##D labkey.executeSql(\n##D baseUrl=\"http://localhost:8080/labkey\",\n##D folderPath=\"/home\",\n##D schemaName=\"core\",\n##D sql = \"select * from containers\")\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.setDefaults","snippet":"### Name: labkey.setDefaults\n### Title: Set the default parameters used for all http or https requests\n### Aliases: labkey.setDefaults\n\n### ** Examples\n\n\n## Example of setting and clearing email/password, API key, and Session key\n# library(Rlabkey)\n\nlabkey.setDefaults(email=\"testing@localhost.test\", password=\"password\")\n\n## Functions invoked at this point respect the role assignments and\n## other authorizations of the specified user\n\n## A user can create an API key via the LabKey UI and set it as follows:\n\nlabkey.setDefaults(apiKey=\"abcdef0123456789abcdef0123456789\")\n\n## Functions invoked at this point respect the role assignments and\n## other authorizations of the user who created the API key\n\n## A user can create a session key via the LabKey UI and set it as follows:\n\nlabkey.setDefaults(apiKey=\"0123456789abcdef0123456789abcdef\")\n\n## Functions invoked at this point share authorization\n## and session information with the user's browser session\n\nlabkey.setDefaults() # called without any parameters will reset/clear the environment variables\n\n\n\n"} {"package":"Rlabkey","topic":"labkey.setModuleProperty","snippet":"### Name: labkey.setModuleProperty\n### Title: Set module property value\n### Aliases: labkey.setModuleProperty\n\n### ** Examples\n\n## Not run: \n##D library(Rlabkey)\n##D labkey.setModuleProperty(baseUrl=\"http://labkey/\", folderPath=\"flowProject\",\n##D moduleName=\"flow\", propName=\"ExportToScriptFormat\", propValue=\"zip\")\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.setWafEncoding","snippet":"### Name: labkey.setWafEncoding\n### Title: Helper function to enable/disable wafEncoding mode.\n### Aliases: labkey.setWafEncoding\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D library(Rlabkey)\n##D labkey.setWafEncoding(FALSE)\n##D labkey.executeSql(\n##D baseUrl=\"http://localhost:8080/labkey\",\n##D folderPath=\"/home\",\n##D schemaName=\"core\",\n##D sql = \"select * from containers\")\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.storage.create","snippet":"### Name: labkey.storage.create\n### Title: Create a new LabKey Freezer Manager storage item\n### Aliases: labkey.storage.create\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D library(Rlabkey)\n##D ## create a storage Freezer with a Shelf and 2 Plates on that Shelf\n##D \n##D freezer <- labkey.storage.create(\n##D baseUrl=\"http://labkey/\",\n##D folderPath=\"home\",\n##D type=\"Freezer\",\n##D props=list(name=\"Test Freezer\", description=\"My example storage freezer\")\n##D )\n##D \n##D shelf = labkey.storage.create(\n##D baseUrl=\"http://labkey/\",\n##D folderPath=\"home\",\n##D type=\"Shelf\",\n##D props=list(name=\"Test Shelf\", locationId=freezer$data$rowId )\n##D )\n##D \n##D plateType = labkey.storage.create(\n##D baseUrl=\"http://labkey/\",\n##D folderPath=\"home\",\n##D type=\"Storage Unit Type\",\n##D props=list(name=\"Test 8X12 Well Plate\", unitType=\"Plate\", rows=8, cols=12 )\n##D )\n##D \n##D plate1 = labkey.storage.create(\n##D baseUrl=\"http://labkey/\",\n##D folderPath=\"home\",\n##D type=\"Terminal Storage Location\",\n##D props=list(name=\"Plate #1\", typeId=plateType$data$rowId, locationId=shelf$data$rowId )\n##D )\n##D \n##D plate2 = labkey.storage.create(\n##D baseUrl=\"http://labkey/\",\n##D folderPath=\"home\",\n##D type=\"Terminal Storage Location\",\n##D props=list(name=\"Plate #2\", typeId=plateType$data$rowId, locationId=shelf$data$rowId )\n##D )\n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.storage.delete","snippet":"### Name: labkey.storage.delete\n### Title: Delete a LabKey Freezer Manager storage item\n### Aliases: labkey.storage.delete\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D library(Rlabkey)\n##D ## delete a freezer and its child locations and terminal storage locations\n##D \n##D freezer <- labkey.storage.create(\n##D baseUrl=\"http://labkey/\",\n##D folderPath=\"home\",\n##D type=\"Freezer\",\n##D props=list(name=\"Test Freezer\", description=\"My example storage freezer\")\n##D )\n##D \n##D shelf = labkey.storage.create(\n##D baseUrl=\"http://labkey/\",\n##D folderPath=\"home\",\n##D type=\"Shelf\",\n##D props=list(name=\"Test Shelf\", locationId=freezer$data$rowId )\n##D )\n##D \n##D plateType = labkey.storage.create(\n##D baseUrl=\"http://labkey/\",\n##D folderPath=\"home\",\n##D type=\"Storage Unit Type\",\n##D props=list(name=\"Test 8X12 Well Plate\", unitType=\"Plate\", rows=8, cols=12 )\n##D )\n##D \n##D plate1 = labkey.storage.create(\n##D baseUrl=\"http://labkey/\",\n##D folderPath=\"home\",\n##D type=\"Terminal Storage Location\",\n##D props=list(name=\"Plate #1\", typeId=plateType$data$rowId, locationId=shelf$data$rowId )\n##D )\n##D \n##D plate2 = labkey.storage.create(\n##D baseUrl=\"http://labkey/\",\n##D folderPath=\"home\",\n##D type=\"Terminal Storage Location\",\n##D props=list(name=\"Plate #2\", typeId=plateType$data$rowId, locationId=shelf$data$rowId )\n##D )\n##D \n##D # NOTE: this will delete freezer, shelf, plate1 and plate2 but it will not delete\n##D # the plateType as that is not a part of the freezer hierarchy\n##D freezer <- labkey.storage.delete(\n##D baseUrl=\"http://labkey/\",\n##D folderPath=\"home\",\n##D type=\"Freezer\",\n##D rowId=freezer$data$rowId\n##D )\n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.storage.update","snippet":"### Name: labkey.storage.update\n### Title: Update a LabKey Freezer Manager storage item\n### Aliases: labkey.storage.update\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D library(Rlabkey)\n##D ## create a storage unit type and then update it to change some properties\n##D \n##D plateType = labkey.storage.create(\n##D baseUrl=\"http://labkey/\",\n##D folderPath=\"home\",\n##D type=\"Storage Unit Type\",\n##D props=list(name=\"Test 8X12 Well Plate\", unitType=\"Plate\", rows=8, cols=12 )\n##D )\n##D \n##D plateType = labkey.storage.update(\n##D baseUrl=\"http://labkey/\",\n##D folderPath=\"home\",\n##D type=\"Storage Unit Type\",\n##D props=list(rowId=plateType$data$rowId, positionFormat=\"NumAlpha\", positionOrder=\"ColumnRow\" )\n##D )\n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.transform.getRunPropertyValue","snippet":"### Name: labkey.transform.getRunPropertyValue\n### Title: Assay transform script helper function to get a run property\n### value from a data.frame\n### Aliases: labkey.transform.getRunPropertyValue\n\n### ** Examples\n\n## Not run: \n##D \n##D # library(Rlabkey)\n##D \n##D run.props = labkey.transform.readRunPropertiesFile(\"${runInfo}\");\n##D run.data.file = labkey.transform.getRunPropertyValue(run.props, \"runDataFile\");\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.transform.readRunPropertiesFile","snippet":"### Name: labkey.transform.readRunPropertiesFile\n### Title: Assay transform script helper function to read a run properties\n### file\n### Aliases: labkey.transform.readRunPropertiesFile\n\n### ** Examples\n\n## Not run: \n##D \n##D # library(Rlabkey)\n##D \n##D labkey.transform.readRunPropertiesFile(\"${runInfo}\")\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.truncateTable","snippet":"### Name: labkey.truncateTable\n### Title: Delete all rows from a table\n### Aliases: labkey.truncateTable\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D ## create a data frame and infer it's fields\n##D library(Rlabkey)\n##D \n##D labkey.truncateTable(baseUrl=\"http://labkey/\", folderPath=\"home\",\n##D schemaName=\"lists\", queryName=\"people\")\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.updateRows","snippet":"### Name: labkey.updateRows\n### Title: Update existing rows of data in a labkey database\n### Aliases: labkey.updateRows\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D ## Insert, update and delete\n##D ## Note that users must have the necessary permissions in the database\n##D ## to be able to modify data through the use of these functions\n##D # library(Rlabkey)\n##D \n##D newrow <- data.frame(\n##D \tDisplayFld=\"Inserted from R\"\n##D \t, TextFld=\"how its done\"\n##D \t, IntFld= 98 \n##D \t, DoubleFld = 12.345\n##D \t, DateTimeFld = \"03/01/2010\"\n##D \t, BooleanFld= FALSE\n##D \t, LongTextFld = \"Four score and seven years ago\"\n##D #\t, AttachmentFld = NA \t\t#attachment fields not supported \n##D \t, RequiredText = \"Veni, vidi, vici\"\n##D \t, RequiredInt = 0\n##D \t, Category = \"LOOKUP2\"\n##D \t, stringsAsFactors=FALSE)\n##D \n##D insertedRow <- labkey.insertRows(\"http://localhost:8080/labkey\",\n##D folderPath=\"/apisamples\", schemaName=\"lists\", queryName=\"AllTypes\",\n##D toInsert=newrow, options=list(auditBehavior=\"DETAILED\",\n##D auditUserComment=\"testing audit comment for insert\"))\n##D newRowId <- insertedRow$rows[[1]]$RowId\n##D \n##D selectedRow<-labkey.selectRows(\"http://localhost:8080/labkey\",\n##D folderPath=\"/apisamples\", schemaName=\"lists\", queryName=\"AllTypes\",\n##D colFilter=makeFilter(c(\"RowId\", \"EQUALS\", newRowId)))\n##D selectedRow\n##D \n##D updaterow=data.frame(\n##D \tRowId=newRowId\n##D \t, DisplayFld=\"Updated from R\"\n##D \t, TextFld=\"how to update\"\n##D \t, IntFld= 777 \n##D \t, stringsAsFactors=FALSE)\n##D \n##D updatedRow <- labkey.updateRows(\"http://localhost:8080/labkey\",\n##D folderPath=\"/apisamples\", schemaName=\"lists\", queryName=\"AllTypes\",\n##D toUpdate=updaterow, options=list(auditBehavior=\"DETAILED\",\n##D auditUserComment=\"testing audit comment for update\"))\n##D selectedRow<-labkey.selectRows(\"http://localhost:8080/labkey\",\n##D folderPath=\"/apisamples\", schemaName=\"lists\", queryName=\"AllTypes\",\n##D colFilter=makeFilter(c(\"RowId\", \"EQUALS\", newRowId)))\n##D selectedRow\n##D \n##D deleterow <- data.frame(RowId=newRowId, stringsAsFactors=FALSE)\n##D result <- labkey.deleteRows(baseUrl=\"http://localhost:8080/labkey\",\n##D folderPath=\"/apisamples\", schemaName=\"lists\", queryName=\"AllTypes\",\n##D toDelete=deleterow)\n##D str(result)\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.webdav.delete","snippet":"### Name: labkey.webdav.delete\n### Title: Deletes the provided file/folder on a LabKey Server via WebDAV\n### Aliases: labkey.webdav.delete\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D library(Rlabkey)\n##D \n##D #delete an entire directory and contents\n##D labkey.webdav.delete(baseUrl=\"http://labkey/\", folderPath=\"home\", remoteFilePath=\"folder1\")\n##D \n##D #delete single file\n##D labkey.webdav.delete(baseUrl=\"http://labkey/\", folderPath=\"home\", remoteFilePath=\"folder/file.txt\")\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.webdav.downloadFolder","snippet":"### Name: labkey.webdav.downloadFolder\n### Title: Recursively download a folder via WebDAV\n### Aliases: labkey.webdav.downloadFolder\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D ## download folder from a LabKey Server\n##D library(Rlabkey)\n##D \n##D labkey.webdav.downloadFolder(baseUrl=\"http://labkey/\",\n##D folderPath=\"home\",\n##D remoteFilePath=\"folder1\",\n##D localBaseDir=\"destFolder\",\n##D overwrite=TRUE\n##D )\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.webdav.get","snippet":"### Name: labkey.webdav.get\n### Title: Download a file via WebDAV\n### Aliases: labkey.webdav.get\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D ## download a single file from a LabKey Server\n##D library(Rlabkey)\n##D \n##D labkey.webdav.get(\n##D baseUrl=\"http://labkey/\",\n##D folderPath=\"home\",\n##D remoteFilePath=\"folder/myFile.txt\",\n##D localFilePath=\"myDownloadedFile.txt\",\n##D overwrite=TRUE\n##D )\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.webdav.listDir","snippet":"### Name: labkey.webdav.listDir\n### Title: List the contents of a LabKey Server folder via WebDAV\n### Aliases: labkey.webdav.listDir\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D library(Rlabkey)\n##D \n##D json <- labkey.webdav.listDir(\n##D baseUrl=\"http://labkey/\",\n##D folderPath=\"home\",\n##D remoteFilePath=\"myFolder\"\n##D )\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.webdav.mkDir","snippet":"### Name: labkey.webdav.mkDir\n### Title: Create a folder via WebDAV\n### Aliases: labkey.webdav.mkDir\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D library(Rlabkey)\n##D \n##D labkey.webdav.mkDir(\n##D baseUrl=\"http://labkey/\",\n##D folderPath=\"home\",\n##D remoteFilePath=\"toCreate\"\n##D )\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.webdav.mkDirs","snippet":"### Name: labkey.webdav.mkDirs\n### Title: Create a folder via WebDAV\n### Aliases: labkey.webdav.mkDirs\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D library(Rlabkey)\n##D \n##D labkey.webdav.mkDirs(\n##D baseUrl=\"http://labkey/\",\n##D folderPath=\"home\",\n##D remoteFilePath=\"folder1/folder2/toCreate\"\n##D )\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.webdav.pathExists","snippet":"### Name: labkey.webdav.pathExists\n### Title: Tests if a path exists on a LabKey Server via WebDAV\n### Aliases: labkey.webdav.pathExists\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D library(Rlabkey)\n##D \n##D # Test folder\n##D labkey.webdav.pathExists(\n##D baseUrl=\"http://labkey/\",\n##D folderPath=\"home\",\n##D remoteFilePath=\"pathToTest\"\n##D )\n##D \n##D # Test file\n##D labkey.webdav.pathExists(\n##D baseUrl=\"http://labkey/\",\n##D folderPath=\"home\",\n##D remoteFilePath=\"folder/fileToTest.txt\"\n##D )\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.webdav.put","snippet":"### Name: labkey.webdav.put\n### Title: Upload a file via WebDAV\n### Aliases: labkey.webdav.put\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D ## upload a single file to a LabKey Server\n##D library(Rlabkey)\n##D \n##D labkey.webdav.put(\n##D localFile=\"myFileToUpload.txt\",\n##D baseUrl=\"http://labkey/\",\n##D folderPath=\"home\",\n##D remoteFilePath=\"myFileToUpload.txt\"\n##D )\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"labkey.whoAmI","snippet":"### Name: labkey.whoAmI\n### Title: Call the whoami API\n### Aliases: labkey.whoAmI\n### Keywords: IO\n\n### ** Examples\n\n## Not run: \n##D \n##D library(Rlabkey)\n##D \n##D labkey.whoAmI(baseUrl=\"http://labkey/\")\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"lsFolders","snippet":"### Name: lsFolders\n### Title: List the available folder paths\n### Aliases: lsFolders\n### Keywords: file\n\n### ** Examples\n\n## Not run: \n##D \n##D ##get a list if projects and folders\n##D # library(Rlabkey)\n##D \n##D lks<- getSession(\"https://www.labkey.org\", \"/home\")\n##D \n##D #returns values \"/home\" , \"/home/_menus\" , ...\n##D lsFolders(lks)\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"lsProjects","snippet":"### Name: lsProjects\n### Title: List the projects available at a given LabKey Server address\n### Aliases: lsProjects\n### Keywords: file\n\n### ** Examples\n\n## Not run: \n##D \n##D ## get list of projects on server, connect a session in one project,\n##D ## then list the folders in that project\n##D # library(Rlabkey)\n##D \n##D lsProjects(\"https://www.labkey.org\")\n##D \n##D lkorg <- getSession(\"https://www.labkey.org\", \"/home\")\n##D lsFolders(lkorg)\n##D \n##D lkorg <- getSession(\"https://www.labkey.org\", \"/home/Study/ListDemo\")\n##D lsSchemas(lkorg)\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"lsSchemas","snippet":"### Name: lsSchemas\n### Title: List the available schemas\n### Aliases: lsSchemas\n### Keywords: file\n\n### ** Examples\n\n## Not run: \n##D \n##D ## get a list of schemas available in the current session context\n##D # library(Rlabkey)\n##D \n##D lks<- getSession(baseUrl=\"http://localhost:8080/labkey\",\n##D folderPath=\"/apisamples\")\n##D \n##D #returns several schema names, e.g. \"lists\", \"core\", \"MS1\", etc.\n##D lsSchemas(lks)\n##D \n## End(Not run)\n\n\n"} {"package":"Rlabkey","topic":"makeFilter","snippet":"### Name: makeFilter\n### Title: Builds filters to be used in labkey.selectRows and getRows\n### Aliases: makeFilter\n### Keywords: file\n\n### ** Examples\n\n\n# library(Rlabkey)\n\n## Two filters, ANDed together\nmakeFilter(c(\"TextFld\",\"CONTAINS\",\"h\"),\n c(\"BooleanFld\",\"EQUAL\",\"TRUE\"))\n\n## Using \"in\" operator:\nmakeFilter(c(\"RowId\",\"IN\",\"2;3;6\"))\n\n## Using \"missing\" operator:\nmakeFilter(c(\"IntFld\",\"MISSING\",\"\"))\n\n\n\n"} {"package":"Rlabkey","topic":"saveResults","snippet":"### Name: saveResults\n### Title: Returns an object representing a LabKey schema\n### Aliases: saveResults\n### Keywords: file\n\n### ** Examples\n\n## Not run: \n##D \n##D ## Very simple example of an analysis flow: query some data,\n##D ## calculate some stats, then save the calculations as an assay\n##D ## result set in LabKey Server\n##D # library(Rlabkey)\n##D \n##D s<- getSession(baseUrl=\"http://localhost:8080/labkey\",\n##D folderPath=\"/apisamples\")\n##D scobj <- getSchema(s, \"lists\")\n##D simpledf <- getRows(s, scobj$AllTypes)\n##D \n##D ## some dummy calculations to produce and example analysis result\n##D testtable <- simpledf[,3:4]\n##D colnames(testtable) <- c(\"IntFld\", \"DoubleFld\")\n##D row <- c(list(\"Measure\"=\"colMeans\"), colMeans(testtable, na.rm=TRUE))\n##D results <- data.frame(row, row.names=NULL, stringsAsFactors=FALSE)\n##D row <- c(list(\"Measure\"=\"colSums\"), colSums(testtable, na.rm=TRUE))\n##D results <- rbind(results, as.vector(row))\n##D \n##D bprops <- list(LabNotes=\"this is a simple demo\")\n##D bpl<- list(name=paste(\"Batch \", as.character(date())),properties=bprops) \n##D rpl<- list(name=paste(\"Assay Run \", as.character(date())))\n##D \n##D assayInfo<- saveResults(s, \"SimpleMeans\", results,\n##D batchPropertyList=bpl, runPropertyList=rpl)\n##D \n## End(Not run)\n\n\n"} {"package":"gglm","topic":"gglm","snippet":"### Name: gglm\n### Title: gglm\n### Aliases: gglm\n\n### ** Examples\n\ndata(mtcars)\nm1 <- lm(mpg ~ cyl + disp + hp, data = mtcars)\ngglm(m1)\n\n\n"} {"package":"gglm","topic":"list_model_classes","snippet":"### Name: list_model_classes\n### Title: list_model_classes\n### Aliases: list_model_classes\n\n### ** Examples\n\nlist_model_classes()\n\n\n"} {"package":"gglm","topic":"stat_cooks_leverage","snippet":"### Name: stat_cooks_leverage\n### Title: stat_cooks_leverage\n### Aliases: stat_cooks_leverage\n\n### ** Examples\n\ndata(mtcars)\nmodel <- lm(mpg ~ cyl + disp + hp, data = mtcars)\nggplot2::ggplot(data = model) + stat_cooks_leverage()\n\n\n"} {"package":"gglm","topic":"stat_cooks_obs","snippet":"### Name: stat_cooks_obs\n### Title: stat_cooks_obs\n### Aliases: stat_cooks_obs\n\n### ** Examples\n\ndata(mtcars)\nmodel <- lm(mpg ~ cyl + disp + hp, data = mtcars)\nggplot2::ggplot(data = model) + stat_cooks_obs()\n\n\n"} {"package":"gglm","topic":"stat_fitted_resid","snippet":"### Name: stat_fitted_resid\n### Title: stat_fitted_resid\n### Aliases: stat_fitted_resid\n\n### ** Examples\n\ndata(mtcars)\nmodel <- lm(mpg ~ cyl + disp + hp, data = mtcars)\nggplot2::ggplot(data = model) + stat_fitted_resid()\n\n\n"} {"package":"gglm","topic":"stat_normal_qq","snippet":"### Name: stat_normal_qq\n### Title: stat_normal_qq\n### Aliases: stat_normal_qq\n\n### ** Examples\n\ndata(mtcars)\nmodel <- lm(mpg ~ cyl + disp + hp, data = mtcars)\nggplot2::ggplot(data = model) + stat_normal_qq()\n\n\n"} {"package":"gglm","topic":"stat_resid_hist","snippet":"### Name: stat_resid_hist\n### Title: stat_resid_hist\n### Aliases: stat_resid_hist\n\n### ** Examples\n\ndata(mtcars)\nmodel <- lm(mpg ~ cyl + disp + hp, data = mtcars)\nggplot2::ggplot(data = model) + stat_resid_hist()\n\n\n"} {"package":"gglm","topic":"stat_resid_leverage","snippet":"### Name: stat_resid_leverage\n### Title: stat_resid_leverage\n### Aliases: stat_resid_leverage\n\n### ** Examples\n\ndata(mtcars)\nmodel <- lm(mpg ~ cyl + disp + hp, data = mtcars)\nggplot2::ggplot(data = model) + stat_resid_leverage()\n\n\n"} {"package":"gglm","topic":"stat_scale_location","snippet":"### Name: stat_scale_location\n### Title: stat_scale_location\n### Aliases: stat_scale_location\n\n### ** Examples\n\ndata(mtcars)\nmodel <- lm(mpg ~ cyl + disp + hp, data = mtcars)\nggplot2::ggplot(data = model) + stat_scale_location()\n\n\n"} {"package":"stats4teaching","topic":"anova1way","snippet":"### Name: anova1way\n### Title: One-Way ANOVA\n### Aliases: anova1way\n\n### ** Examples\n\nanova1way(k=4,n=c(40,31,50),mean=c(55,52,48,59),coefvar=c(0.12,0.15,0.13),conf.level = 0.99)\n\nanova1way(k=3,n=15,mean=c(10,15,20),sigma =c(1,1.25,1.1),method =\"B\")\n\n\n\n\n"} {"package":"stats4teaching","topic":"anova2way","snippet":"### Name: anova2way\n### Title: Two-Way ANOVA\n### Aliases: anova2way\n\n### ** Examples\n\n\nanova2way(k=3, j=2, n=c(3,4,4,5,5,3), mean = c(1,4,2.5,5,6,3.75), sigma = c(1,1.5))\n\n\n\n"} {"package":"stats4teaching","topic":"cassay","snippet":"### Name: cassay\n### Title: Clinical Assay\n### Aliases: cassay\n\n### ** Examples\n\ncassay(c(10,12), mean = 115, sigma = 7.5, d.cohen= 1.5)\ncassay(24, mean = 100, sigma = 5.1)\n\n\n\n"} {"package":"stats4teaching","topic":"generator","snippet":"### Name: generator\n### Title: Generation of multivariate normal data.\n### Aliases: generator\n\n### ** Examples\n\ngenerator(4,0,2)\n\nsigma <- matrix(c(1,0.8,0.8,1),nrow = 2, byrow = 2)\nd <- generator(4,mean = c(1,2),sigma, sigmaSup = 1)\n\ngenerator(10,1,coefvar = c(0.3,0.5))\n\ngenerator(c(10,11,10),c(1,2),coefvar = c(0.3,0.5))\n\n\n\n\n"} {"package":"stats4teaching","topic":"is.corrmatrix","snippet":"### Name: is.corrmatrix\n### Title: Correlation matrix\n### Aliases: is.corrmatrix\n\n### ** Examples\n\n\nm1<-matrix(c(1,2,2,1),nrow = 2,byrow = TRUE)\nis.corrmatrix(m1)\n\nm2<-matrix(c(1,0.8,0.8,1),nrow = 2,byrow = TRUE)\nis.corrmatrix(m2)\n\nm3<-matrix(c(1,0.7,0.8,1),nrow = 2,byrow = TRUE)\nis.corrmatrix(m3)\n\n\n\n"} {"package":"stats4teaching","topic":"is.covmatrix","snippet":"### Name: is.covmatrix\n### Title: Covariance matrix\n### Aliases: is.covmatrix\n\n### ** Examples\n\n\nm1 <- matrix(c(2,1.5,1.5,1), nrow = 2, byrow = TRUE)\nis.covmatrix(m1)\n\nm2 <- matrix(c(1,0.8,0.8,1), nrow = 2, byrow = TRUE)\nis.covmatrix(m2)\n\nm3 <- matrix(c(1,0.7,0.8,1), nrow = 2, byrow = TRUE)\nis.covmatrix(m3)\n\n\n\n"} {"package":"stats4teaching","topic":"is.posDef","snippet":"### Name: is.posDef\n### Title: Positive definited matrices\n### Aliases: is.posDef\n\n### ** Examples\n\nA <- matrix(c(1,2,2,1), nrow = 2, byrow = TRUE)\nis.posDef(A)\n\nB <- matrix(c(1,2,3,3,1,2,1,2,1), nrow = 3, byrow = TRUE)\nis.posDef(B)\n\n\n\n"} {"package":"stats4teaching","topic":"is.semiposDef","snippet":"### Name: is.semiposDef\n### Title: Semi-Positive definited matrices\n### Aliases: is.semiposDef\n\n### ** Examples\n\nA<-matrix(c(2.2,1,1,3), nrow = 2, byrow = TRUE)\nis.semiposDef(A)\n\nB<-matrix(c(1,2,3,3,1,2,1,2,1), nrow = 3, byrow = TRUE)\nis.semiposDef(B)\n\n\n\n"} {"package":"stats4teaching","topic":"mCorrCov","snippet":"### Name: mCorrCov\n### Title: Correlation & Covariance matrices.\n### Aliases: mCorrCov\n\n### ** Examples\n\nA <- matrix(c(1,2,2,1), nrow = 2, byrow = TRUE)\nmCorrCov(A)\n\nB <- matrix(c(1,0.8,0.7,0.8,1,0.55,0.7,0.55,1), nrow = 3, byrow = TRUE)\nmCorrCov(B,mu = c(2,3.5,1), coefvar = c(0.3,0.5,0.7))\n\n\n\n"} {"package":"stats4teaching","topic":"pairedm","snippet":"### Name: pairedm\n### Title: Paired measures (T-Test & Regression)\n### Aliases: pairedm\n\n### ** Examples\n\n\npairedm(10, mean = c(10,2), sigma = c(1.2,0.7), rho = 0.5, alternative = \"g\")\npairedm(15, mean =c(1,2), coefvar = 0.1, random = TRUE)\n\n\n\n"} {"package":"stats4teaching","topic":"repeatedm","snippet":"### Name: repeatedm\n### Title: Repeated Measures (ANOVA & Multiple Regression)\n### Aliases: repeatedm\n\n### ** Examples\n\nrandm <- clusterGeneration::genPositiveDefMat(8, covMethod = \"unifcorrmat\")\nmcov <- randm$Sigma\nSigma <- cov2cor(mcov)\nis.corrmatrix(Sigma)\nrepeatedm(k = 8, n = 8, mean = c(20,5, 30, 15),sigma = Sigma, sigmaSup = 2, dec = 2)\n\nrepeatedm(k = 5, n = 5, mean = c(8,10,5,14,22.5), random = TRUE)\nrepeatedm(k = 3, n = 8, mean = c(10,5,22.5), sigma = c(3.3,1.5,5), dec = 2)\n\n\n\n"} {"package":"stats4teaching","topic":"sample2indp","snippet":"### Name: sample2indp\n### Title: Independent normal data\n### Aliases: sample2indp\n\n### ** Examples\n\nsample2indp(c(10,12),mean = c(2,3),coefvar = c(0.3,0.5), alternative = \"less\", delta = -1)\n\nsample2indp(8,sigma = c(1,1.5), dec = 3)\n\n\n\n"} {"package":"stats4teaching","topic":"sample2indp.pow","snippet":"### Name: sample2indp.pow\n### Title: Independent normal data\n### Aliases: sample2indp.pow\n\n### ** Examples\n\nsample2indp.pow(n1 = 30, mean = c(2,3), s1= 0.5, d.cohen = 0.8, power = 0.85, delta = 1)\nsample2indp.pow(n1 = 50, mean = c(15.5,16), s1=2 , d.cohen = 0.3, power = 0.33, delta = 0.5)\n\n\n\n"} {"package":"sdpt3r","topic":"control_theory","snippet":"### Name: control_theory\n### Title: Control Theory\n### Aliases: control_theory\n\n### ** Examples\n\nB <- matrix(list(),2,1)\nB[[1]] <- matrix(c(-.8,1.2,-.5,-1.1,-1,-2.5,2,.2,-1),nrow=3,byrow=TRUE)\nB[[2]] <- matrix(c(-1.5,.5,-2,1.1,-2,.2,-1.4,1.1,-1.5),nrow=3,byrow=TRUE)\n\nout <- control_theory(B)\n\n\n\n"} {"package":"sdpt3r","topic":"doptimal","snippet":"### Name: doptimal\n### Title: D-Optimal Experimental Design\n### Aliases: doptimal\n\n### ** Examples\n\ndata(DoptDesign)\n\nout <- doptimal(DoptDesign)\n\n\n\n"} {"package":"sdpt3r","topic":"dwd","snippet":"### Name: dwd\n### Title: Distance Weighted Discrimination\n### Aliases: dwd\n\n### ** Examples\n\ndata(Andwd)\ndata(Apdwd)\npenalty <- 0.5\n\n#Not Run\n#out <- dwd(Apdwd,Andwd,penalty)\n\n\n\n"} {"package":"sdpt3r","topic":"etp","snippet":"### Name: etp\n### Title: Educational Testing Problem\n### Aliases: etp\n\n### ** Examples\n\ndata(Betp)\n\nout <- etp(Betp)\n\n\n\n"} {"package":"sdpt3r","topic":"gpp","snippet":"### Name: gpp\n### Title: Graph Partitioning Problem\n### Aliases: gpp\n\n### ** Examples\n\ndata(Bgpp)\nalpha <- nrow(Bgpp)\n\nout <- gpp(Bgpp, alpha)\n\n\n\n"} {"package":"sdpt3r","topic":"lmi1","snippet":"### Name: lmi1\n### Title: Linear Matrix Inequality 1\n### Aliases: lmi1\n\n### ** Examples\n\nB <- matrix(c(-1,5,1,0,-2,1,0,0,-1), nrow=3)\n\n#Not Run\n#out <- lmi1(B)\n\n\n\n"} {"package":"sdpt3r","topic":"lmi2","snippet":"### Name: lmi2\n### Title: Linear Matrix Inequality 2\n### Aliases: lmi2\n\n### ** Examples\n\nA1 <- matrix(c(-1,0,1,0,-2,1,0,0,-1),3,3)\nA2 <- A1 + 0.1*t(A1)\nB <- matrix(c(1,3,5,2,4,6),3,2)\n\nout <- lmi2(A1,A2,B)\n\n\n\n"} {"package":"sdpt3r","topic":"lmi3","snippet":"### Name: lmi3\n### Title: Linear Matrix Inequality 3\n### Aliases: lmi3\n\n### ** Examples\n\nA <- matrix(c(-1,0,1,0,-2,1,0,0,-1),3,3)\nB <- matrix(c(1,2,3,4,5,6), 2, 3)\nG <- matrix(1,3,3)\n\nout <- lmi3(A,B,G)\n\n\n\n"} {"package":"sdpt3r","topic":"logcheby","snippet":"### Name: logcheby\n### Title: Log Chebyshev Approximation\n### Aliases: logcheby\n\n### ** Examples\n\ndata(Blogcheby)\ndata(flogcheby)\n\n#Not Run\n#out <- logcheby(Blogcheby, flogcheby)\n\n\n\n"} {"package":"sdpt3r","topic":"lovasz","snippet":"### Name: lovasz\n### Title: Lovasz Number of a Graph\n### Aliases: lovasz\n\n### ** Examples\n\ndata(Glovasz)\n\nout <- lovasz(Glovasz)\n\n\n\n"} {"package":"sdpt3r","topic":"maxcut","snippet":"### Name: maxcut\n### Title: Max-Cut Problem\n### Aliases: maxcut\n\n### ** Examples\n\ndata(Bmaxcut)\n\nout <- maxcut(Bmaxcut)\n\n\n\n"} {"package":"sdpt3r","topic":"maxkcut","snippet":"### Name: maxkcut\n### Title: Max-kCut Problem\n### Aliases: maxkcut\n\n### ** Examples\n\ndata(Bmaxkcut)\n\nout <- maxkcut(Bmaxkcut,2)\n\n\n\n"} {"package":"sdpt3r","topic":"minelips","snippet":"### Name: minelips\n### Title: The Minimum Ellipsoid Problem\n### Aliases: minelips\n\n### ** Examples\n\ndata(Vminelips)\n\n#Not Run\n#out <- minelips(Vminelips)\n\n\n\n"} {"package":"sdpt3r","topic":"nearcorr","snippet":"### Name: nearcorr\n### Title: Nearest Correlation Matrix Problem\n### Aliases: nearcorr\n\n### ** Examples\n\ndata(Hnearcorr)\n\nout <- nearcorr(Hnearcorr)\n\n\n\n"} {"package":"sdpt3r","topic":"smat","snippet":"### Name: smat\n### Title: Create a Symmetrix Matrix\n### Aliases: smat\n\n### ** Examples\n\n\ny <- c(1,0.00000279, 3.245, 2.140, 2.44, 2.321, 4.566)\n\nblk <- matrix(list(),1,2)\nblk[[1,1]] <- \"s\"\nblk[[1,2]] <- 3\n\nP <- smat(blk,1, y)\n\n\n\n"} {"package":"sdpt3r","topic":"sqlp","snippet":"### Name: sqlp\n### Title: Semidefinite Quadratic Linear Programming Solver\n### Aliases: sqlp\n\n### ** Examples\n\n\nblk = c(\"l\" = 2)\nC = matrix(c(1,1),nrow=1)\nA = matrix(c(1,3,4,-1), nrow=2)\nAt = t(A)\nb = c(12,10)\nout = sqlp(blk,list(At),list(C),b)\n\n\n\n"} {"package":"sdpt3r","topic":"svec","snippet":"### Name: svec\n### Title: Upper Triangular Vectorization\n### Aliases: svec\n\n### ** Examples\n\n\ndata(Hnearcorr)\nblk <- matrix(list(),1,2)\nblk[[1]] <- \"s\"\nblk[[2]] <- nrow(Hnearcorr)\n\nsvec(blk,Hnearcorr)\n\n\n\n"} {"package":"sdpt3r","topic":"toep","snippet":"### Name: toep\n### Title: Toeplitz Approximation Problem\n### Aliases: toep\n\n### ** Examples\n\ndata(Ftoep)\n\n#Not Run\n#out <- toep(Ftoep)\n\n\n\n"} {"package":"rbi","topic":"Equals.bi_model","snippet":"### Name: Equals.bi_model\n### Title: Check if two models are equal\n### Aliases: Equals.bi_model ==.bi_model `==.bi_model`\n\n### ** Examples\n\nmodel_file_name <- system.file(package = \"rbi\", \"PZ.bi\")\nPZ <- bi_model(filename = model_file_name)\nPZ == PZ # TRUE\n\n\n"} {"package":"rbi","topic":"Extract.bi_model","snippet":"### Name: Extract.bi_model\n### Title: Subset model lines\n### Aliases: Extract.bi_model [.bi_model `[.bi_model`\n\n### ** Examples\n\nmodel_file_name <- system.file(package = \"rbi\", \"PZ.bi\")\nPZ <- bi_model(filename = model_file_name)\nPZ[3:4]\n\n\n"} {"package":"rbi","topic":"Extract_assign.bi_model","snippet":"### Name: Extract_assign.bi_model\n### Title: Subset and replace model lines\n### Aliases: Extract_assign.bi_model [<-.bi_model `[<-.bi_model`\n\n### ** Examples\n\nmodel_file_name <- system.file(package = \"rbi\", \"PZ.bi\")\nPZ <- bi_model(filename = model_file_name)\nPZ[3:4] <- c(\"const e = 0.4\", \"const m_l = 0.05\")\n\n\n"} {"package":"rbi","topic":"Unequals.bi_model","snippet":"### Name: Unequals.bi_model\n### Title: Check if two models are unequal\n### Aliases: Unequals.bi_model !=.bi_model `!=.bi_model`\n\n### ** Examples\n\nmodel_file_name <- system.file(package = \"rbi\", \"PZ.bi\")\nPZ <- bi_model(filename = model_file_name)\nPZ != PZ # FALSE\n\n\n"} {"package":"rbi","topic":"attach_data","snippet":"### Name: attach_data\n### Title: Attach a new file or data set to a 'libbi' object\n### Aliases: attach_data attach_data.libbi\n\n### ** Examples\n\nbi <- libbi(model = system.file(package = \"rbi\", \"PZ.bi\"))\nexample_output <- bi_read(system.file(package = \"rbi\", \"example_output.nc\"))\nbi <- attach_data(bi, \"output\", example_output)\n\n\n"} {"package":"rbi","topic":"bi_contents","snippet":"### Name: bi_contents\n### Title: Bi contents\n### Aliases: bi_contents\n\n### ** Examples\n\nexample_output_file <- system.file(package = \"rbi\", \"example_output.nc\")\nbi_contents(example_output_file)\n\n\n"} {"package":"rbi","topic":"bi_file_summary","snippet":"### Name: bi_file_summary\n### Title: NetCDF File Summary\n### Aliases: bi_file_summary\n\n### ** Examples\n\nexample_output_file <- system.file(package = \"rbi\", \"example_output.nc\")\nbi_file_summary(example_output_file)\n\n\n"} {"package":"rbi","topic":"bi_model","snippet":"### Name: bi_model\n### Title: Bi Model\n### Aliases: bi_model\n\n### ** Examples\n\nmodel_file_name <- system.file(package = \"rbi\", \"PZ.bi\")\nPZ <- bi_model(filename = model_file_name)\n\n\n"} {"package":"rbi","topic":"bi_read","snippet":"### Name: bi_read\n### Title: Bi Read\n### Aliases: bi_read\n\n### ** Examples\n\nexample_output_file <- system.file(package = \"rbi\", \"example_output.nc\")\nd <- bi_read(example_output_file)\n\n\n"} {"package":"rbi","topic":"bi_write","snippet":"### Name: bi_write\n### Title: Create (e.g., init or observation) files for LibBi\n### Aliases: bi_write\n\n### ** Examples\n\nfilename <- tempfile(pattern = \"dummy\", fileext = \".nc\")\na <- 3\nb <- data.frame(\n dim_a = rep(1:3, time = 2), dim_b = rep(1:2, each = 3), value = 1:6\n)\nvariables <- list(a = a, b = b)\nbi_write(filename, variables)\nbi_file_summary(filename)\n\n\n"} {"package":"rbi","topic":"enable_outputs","snippet":"### Name: enable_outputs\n### Title: Enable outputting variables in a 'bi_model'\n### Aliases: enable_outputs\n\n### ** Examples\n\nmodel_file_name <- system.file(package = \"rbi\", \"PZ.bi\")\nPZ <- bi_model(filename = model_file_name)\nPZ[6] <- \"param mu (has_output=0)\"\nPZ <- enable_outputs(PZ)\n\n\n"} {"package":"rbi","topic":"fix","snippet":"### Name: fix\n### Title: Fix noise term, state or parameter of a libbi model\n### Aliases: fix fix.bi_model\n\n### ** Examples\n\nmodel_file_name <- system.file(package = \"rbi\", \"PZ.bi\")\nPZ <- bi_model(filename = model_file_name)\nPZ <- fix(PZ, alpha = 0)\n\n\n"} {"package":"rbi","topic":"get_name","snippet":"### Name: get_name\n### Title: Get the name of a bi model\n### Aliases: get_name get_name.bi_model\n\n### ** Examples\n\nmodel_file_name <- system.file(package = \"rbi\", \"PZ.bi\")\nPZ <- bi_model(filename = model_file_name)\nget_name(PZ)\n\n\n"} {"package":"rbi","topic":"insert_lines","snippet":"### Name: insert_lines\n### Title: Insert lines in a LibBi model\n### Aliases: insert_lines insert_lines.bi_model\n\n### ** Examples\n\nmodel_file_name <- system.file(package = \"rbi\", \"PZ.bi\")\nPZ <- bi_model(filename = model_file_name)\nPZ <- insert_lines(PZ, lines = \"noise beta\", after = 8)\n\n\n"} {"package":"rbi","topic":"libbi","snippet":"### Name: libbi\n### Title: LibBi Wrapper\n### Aliases: libbi\n\n### ** Examples\n\nbi_object <- libbi(model = system.file(package = \"rbi\", \"PZ.bi\"))\n\n\n"} {"package":"rbi","topic":"remove_lines","snippet":"### Name: remove_lines\n### Title: Remove line(s) and/or block(s) in a libbi model\n### Aliases: remove_lines remove_lines.bi_model\n\n### ** Examples\n\nmodel_file_name <- system.file(package = \"rbi\", \"PZ.bi\")\nPZ <- bi_model(filename = model_file_name)\nPZ <- remove_lines(PZ, 2)\n\n\n"} {"package":"rbi","topic":"set_name","snippet":"### Name: set_name\n### Title: Set the name of a bi model\n### Aliases: set_name set_name.bi_model\n\n### ** Examples\n\nmodel_file_name <- system.file(package = \"rbi\", \"PZ.bi\")\nPZ <- bi_model(filename = model_file_name)\nPZ <- set_name(PZ, \"new_PZ\")\n\n\n"} {"package":"rbi","topic":"write_model","snippet":"### Name: write_model\n### Title: Writes a bi model to a file.\n### Aliases: write_model write_model.bi_model write_model.libbi\n\n### ** Examples\n\nmodel_file_name <- system.file(package = \"rbi\", \"PZ.bi\")\nPZ <- bi_model(filename = model_file_name)\nnew_file_name <- tempfile(\"PZ\", fileext = \".bi\")\nwrite_model(PZ, new_file_name)\n\n\n"} {"package":"fcfdr","topic":"binary_cfdr","snippet":"### Name: binary_cfdr\n### Title: Perform cFDR leveraging binary auxiliary covariates\n### Aliases: binary_cfdr\n\n### ** Examples\n\n\n# In this example, we generate some p-values (representing GWAS p-values)\n# and some arbitrary auxiliary data values (e.g. representing functional genomic data).\n# We use the parameters_in_locfdr() function to extract the parameters estimated by\n# the locfdr function.\n\n# generate p\nset.seed(2)\nn <- 1000\nn1p <- 50 \nzp <- c(rnorm(n1p, sd=5), rnorm(n-n1p, sd=1))\np <- 2*pnorm(-abs(zp))\n\n# generate q\nq <- rbinom(n, 1, 0.1)\n\ngroup <- c(rep(\"A\", n/2), rep(\"B\", n/2)) \n\nbinary_cfdr(p, q, group)\n\n\n\n"} {"package":"fcfdr","topic":"corr_plot","snippet":"### Name: corr_plot\n### Title: Violin plot of p-values for quantiles of q\n### Aliases: corr_plot\n\n### ** Examples\n\n\n# In this example, we generate some p-values (representing GWAS p-values)\n# and some arbitrary auxiliary data values (e.g. representing functional genomic data).\n# We use the corr_plot() function to visualise the relationship between p and q.\n\n# generate p\nset.seed(1)\nn <- 1000\nn1p <- 50 \nzp <- c(rnorm(n1p, sd=5), rnorm(n-n1p, sd=1))\np <- 2*pnorm(-abs(zp))\n\n# generate q\nmixture_comp1 <- function(x) rnorm(x, mean = -0.5, sd = 0.5)\nmixture_comp2 <- function(x) rnorm(x, mean = 2, sd = 1)\nq <- c(mixture_comp1(n1p), mixture_comp2(n-n1p))\n\ncorr_plot(p, q)\n\n\n\n"} {"package":"fcfdr","topic":"flexible_cfdr","snippet":"### Name: flexible_cfdr\n### Title: Perform Flexible cFDR\n### Aliases: flexible_cfdr\n\n### ** Examples\n\n## No test: \n# this is a long running example\n \n# In this example, we generate some p-values (representing GWAS p-values)\n# and some arbitrary auxiliary data values (e.g. representing functional genomic data).\n# We use the flexible_cfdr() function to generate v-values using default parameter values.\n\n# generate p\nset.seed(1)\nn <- 1000\nn1p <- 50 \nzp <- c(rnorm(n1p, sd=5), rnorm(n-n1p, sd=1))\np <- 2*pnorm(-abs(zp))\n\n# generate q\nmixture_comp1 <- function(x) rnorm(x, mean = -0.5, sd = 0.5)\nmixture_comp2 <- function(x) rnorm(x, mean = 2, sd = 1)\nq <- c(mixture_comp1(n1p), mixture_comp2(n-n1p))\n\nn_indep <- n\n\nflexible_cfdr(p, q, indep_index = 1:n_indep)\n## End(No test)\n\n\n\n"} {"package":"fcfdr","topic":"log10pv_plot","snippet":"### Name: log10pv_plot\n### Title: Plot -log10(p) against -log10(v) and colour by q\n### Aliases: log10pv_plot\n\n### ** Examples\n\n## No test: \n# this is a long running example\n\n# In this example, we generate some p-values (representing GWAS p-values)\n# and some arbitrary auxiliary data values (e.g. representing functional genomic data).\n# We use the flexible_cfdr() function to generate v-values and then the log10pv_plot() function\n# to visualise the results.\n\n# generate p\nset.seed(1)\nn <- 1000\nn1p <- 50 \nzp <- c(rnorm(n1p, sd=5), rnorm(n-n1p, sd=1))\np <- 2*pnorm(-abs(zp))\n\n# generate q\nmixture_comp1 <- function(x) rnorm(x, mean = -0.5, sd = 0.5)\nmixture_comp2 <- function(x) rnorm(x, mean = 2, sd = 1)\nq <- c(mixture_comp1(n1p), mixture_comp2(n-n1p))\n\nn_indep <- n\n\nres <- flexible_cfdr(p, q, indep_index = 1:n_indep)\n\nlog10pv_plot(p = res[[1]]$p, q = res[[1]]$q, v = res[[1]]$v)\n## End(No test)\n\n\n\n"} {"package":"fcfdr","topic":"parameters_in_locfdr","snippet":"### Name: parameters_in_locfdr\n### Title: parameters_in_locfdr\n### Aliases: parameters_in_locfdr\n\n### ** Examples\n\n\n# In this example, we generate some p-values (representing GWAS p-values)\n# and some arbitrary auxiliary data values (e.g. representing functional genomic data).\n# We use the parameters_in_locfdr() function to extract the parameters estimated by\n# the locfdr function.\n\n# generate p\nset.seed(1)\nn <- 1000\nn1p <- 50 \nzp <- c(rnorm(n1p, sd=5), rnorm(n-n1p, sd=1))\np <- 2*pnorm(-abs(zp))\n\n# generate q\nmixture_comp1 <- function(x) rnorm(x, mean = -0.5, sd = 0.5)\nmixture_comp2 <- function(x) rnorm(x, mean = 2, sd = 1)\nq <- c(mixture_comp1(n1p), mixture_comp2(n-n1p))\n\nn_indep <- n\n\nparameters_in_locfdr(p, q, indep_index = 1:n_indep)\n\n\n\n\n"} {"package":"fcfdr","topic":"pv_plot","snippet":"### Name: pv_plot\n### Title: Plot p against v and colour by q\n### Aliases: pv_plot\n\n### ** Examples\n\n## No test: \n # this is a long running example\n \n# In this example, we generate some p-values (representing GWAS p-values)\n# and some arbitrary auxiliary data values (e.g. representing functional genomic data).\n# We use the flexible_cfdr() function to generate v-values and then the pv_plot() function\n# to visualise the results.\n\n# generate p\nset.seed(1)\nn <- 1000\nn1p <- 50 \nzp <- c(rnorm(n1p, sd=5), rnorm(n-n1p, sd=1))\np <- 2*pnorm(-abs(zp))\n\n# generate q\nmixture_comp1 <- function(x) rnorm(x, mean = -0.5, sd = 0.5)\nmixture_comp2 <- function(x) rnorm(x, mean = 2, sd = 1)\nq <- c(mixture_comp1(n1p), mixture_comp2(n-n1p))\n\nn_indep <- n\n\nres <- flexible_cfdr(p, q, indep_index = 1:n_indep)\n\npv_plot(p = res[[1]]$p, q = res[[1]]$q, v = res[[1]]$v)\n## End(No test)\n\n\n\n"} {"package":"fcfdr","topic":"stratified_qqplot","snippet":"### Name: stratified_qqplot\n### Title: Stratified Q-Q plot.\n### Aliases: stratified_qqplot\n\n### ** Examples\n\n\n# In this example, we generate some p-values (representing GWAS p-values)\n# and some arbitrary auxiliary data values (e.g. representing GWAS p-values for a related trait).\n# We use the stratified_qqplot() function to examine the relationship between p and q\n\n# generate p\nset.seed(1)\nn <- 1000\nn1p <- 50 \nzp <- c(rnorm(n1p, sd=5), rnorm(n-n1p, sd=1))\np <- 2*pnorm(-abs(zp))\n\n# generate q\nzq <- c(rnorm(n1p, sd=4), rnorm(n-n1p, sd=1.2))\nq <- 2*pnorm(-abs(zq))\n\ndf <- data.frame(p, q)\n\nstratified_qqplot(data_frame = df, prin_value_label = \"p\", cond_value_label = \"q\")\n\n\n\n"} {"package":"bnClustOmics","topic":"annotateEdges","snippet":"### Name: annotateEdges\n### Title: Annotating edges from discovered networks\n### Aliases: annotateEdges\n\n### ** Examples\n\nbnnames<-bnInfo(simdata,c(\"b\",\"c\"),c(\"M\",\"T\"))\nintlist<-annotateEdges(bnres3,bnnames,dblist=simint)\nlength(which(intlist$db))\n\n\n"} {"package":"bnClustOmics","topic":"bnInfo","snippet":"### Name: bnInfo\n### Title: Constructing object of class bnInfo\n### Aliases: bnInfo\n\n### ** Examples\n\n#with mappings\nbnnames<-bnInfo(toydata,c(\"b\",\"o\",\"c\",\"c\",\"c\"),c(\"M\",\"CN\",\"T\",\"P\",\"PP\"),mappings)\n#no mappings\nbnnames<-bnInfo(simdata,c(\"b\",\"c\"),c(\"M\",\"T\"))\n\n\n"} {"package":"bnClustOmics","topic":"bnclustNetworks","snippet":"### Name: bnclustNetworks\n### Title: Deriving consensus networks based on posterior probabilities of\n### mixture model\n### Aliases: bnclustNetworks\n\n### ** Examples\n\nbnnames<-bnInfo(simdata,c(\"b\",\"c\"),c(\"M\",\"T\"))\nintlist<-bnclustNetworks(bnres3,bnnames)\n\n\n"} {"package":"bnClustOmics","topic":"bnclustOmics","snippet":"### Name: bnclustOmics\n### Title: Bayesian network based clustering of multi-omics data\n### Aliases: bnclustOmics\n\n### ** Examples\n\nbnnames<-bnInfo(simdata,c(\"b\",\"c\"),c(\"M\",\"T\"))\n## No test: \nfit<-bnclustOmics(simdata,bnnames,maxEM=4, kclust=2, startpoint = \"mclustPCA\")\nclusters(fit)\ncheckmembership(clusters(fit),simclusters)\n## End(No test)\n\n\n"} {"package":"bnClustOmics","topic":"chooseK","snippet":"### Name: chooseK\n### Title: Choosing the number of clusters\n### Aliases: chooseK\n\n### ** Examples\n\nbnlist<-list()\n\n#bnlist[[k]]<-bnclustOmics(simdata,bnnames,maxEM=4, kclust=k,startpoint = \"mclustPCA\")\nbnlist[[2]]<-bnres2\nbnlist[[3]]<-bnres3\nbnlist[[4]]<-bnres4\n\nchooseK(bnlist,fun=\"BIC\")\nchooseK(bnlist,fun=\"AIC\")\n\n\n"} {"package":"bnClustOmics","topic":"clusters","snippet":"### Name: clusters\n### Title: Extracting cluster memberships\n### Aliases: clusters\n\n### ** Examples\n\nclusters(bnres3)\n\n\n"} {"package":"bnClustOmics","topic":"dags","snippet":"### Name: dags\n### Title: Extracting edge posterior probabilities\n### Aliases: dags\n\n### ** Examples\n\nDAGs<-dags(bnres3)\n\n\n"} {"package":"bnClustOmics","topic":"getModels","snippet":"### Name: getModels\n### Title: Deriving consensus graphs\n### Aliases: getModels\n\n### ** Examples\n\nMAPmod<-dags(bnres3)\nCONSmod1<-getModels(bnres3,p=0.5)\nCONSmod2<-getModels(bnres3,p=0.9)\nlibrary(BiDAG)\ncompareDAGs(MAPmod[[1]],simdags[[1]])\ncompareDAGs(CONSmod1[[1]],simdags[[1]])\ncompareDAGs(CONSmod2[[1]],simdags[[1]])\n\n\n"} {"package":"bnClustOmics","topic":"plotNode","snippet":"### Name: plotNode\n### Title: Plotting all connections of one node\n### Aliases: plotNode\n\n### ** Examples\n\nbnnames<-bnInfo(simdata,c(\"b\",\"c\"),c(\"M\",\"T\"))\nallInteractions<-annotateEdges(bnres3,bnnames,sump=1.2,minp=0.5,minkp=0.9,dblist=simint)\nplotNode(allInteractions,\"T43\",p=0.5)\nplotNode(allInteractions,\"T43\",p=0.5,dbcheck=FALSE)\n\n\n"} {"package":"bnClustOmics","topic":"posteriors","snippet":"### Name: posteriors\n### Title: Extracting edge posterior probabilities\n### Aliases: posteriors\n\n### ** Examples\n\npost<-posteriors(bnres4)\n\n\n"} {"package":"rSAFE","topic":"safe_extraction","snippet":"### Name: safe_extraction\n### Title: Creating SAFE Extractor - an Object Used for Surrogate-Assisted\n### Feature Extraction\n### Aliases: safe_extraction\n\n### ** Examples\n\n\nlibrary(DALEX)\nlibrary(randomForest)\nlibrary(rSAFE)\n\ndata <- apartments[1:500,]\nset.seed(111)\nmodel_rf <- randomForest(m2.price ~ construction.year + surface + floor +\n no.rooms + district, data = data)\nexplainer_rf <- explain(model_rf, data = data[,2:6], y = data[,1], verbose = FALSE)\nsafe_extractor <- safe_extraction(explainer_rf, grid_points = 30, N = 100, verbose = FALSE)\nprint(safe_extractor)\nplot(safe_extractor, variable = \"construction.year\")\n\n\n\n"} {"package":"rSAFE","topic":"safely_detect_changepoints","snippet":"### Name: safely_detect_changepoints\n### Title: Identifying Changes in a Series Using PELT Algorithm\n### Aliases: safely_detect_changepoints\n\n### ** Examples\n\n\nlibrary(rSAFE)\n\ndata <- rep(c(2,7), each=4)\nsafely_detect_changepoints(data)\n\nset.seed(123)\ndata <- c(rnorm(15, 0), rnorm(20, 2), rnorm(30, 8))\nsafely_detect_changepoints(data)\nsafely_detect_changepoints(data, penalty = 25)\n\n\n"} {"package":"rSAFE","topic":"safely_detect_interactions","snippet":"### Name: safely_detect_interactions\n### Title: Detecting Interactions via Permutation Approach\n### Aliases: safely_detect_interactions\n\n### ** Examples\n\n\nlibrary(DALEX)\nlibrary(randomForest)\nlibrary(rSAFE)\n\ndata <- apartments[1:500,]\nset.seed(111)\nmodel_rf <- randomForest(m2.price ~ construction.year + surface + floor +\n no.rooms + district, data = data)\nexplainer_rf <- explain(model_rf, data = data[,2:6], y = data[,1])\nsafely_detect_interactions(explainer_rf, inter_param = 0.25,\n inter_threshold = 0.2, verbose = TRUE)\n\n\n\n"} {"package":"rSAFE","topic":"safely_select_variables","snippet":"### Name: safely_select_variables\n### Title: Performing Feature Selection on the Dataset with Transformed\n### Variables\n### Aliases: safely_select_variables\n\n### ** Examples\n\n\nlibrary(DALEX)\nlibrary(randomForest)\nlibrary(rSAFE)\n\ndata <- apartments[1:500,]\nset.seed(111)\nmodel_rf <- randomForest(m2.price ~ construction.year + surface + floor +\n no.rooms + district, data = data)\nexplainer_rf <- explain(model_rf, data = data[,2:6], y = data[,1])\nsafe_extractor <- safe_extraction(explainer_rf, verbose = FALSE)\nsafely_select_variables(safe_extractor, data, which_y = \"m2.price\", verbose = FALSE)\n\n\n\n"} {"package":"rSAFE","topic":"safely_transform_categorical","snippet":"### Name: safely_transform_categorical\n### Title: Calculating a Transformation of Categorical Feature Using\n### Hierarchical Clustering\n### Aliases: safely_transform_categorical\n\n### ** Examples\n\n\nlibrary(DALEX)\nlibrary(randomForest)\nlibrary(rSAFE)\n\ndata <- apartments[1:500,]\nset.seed(111)\nmodel_rf <- randomForest(m2.price ~ construction.year + surface + floor +\n no.rooms + district, data = data)\nexplainer_rf <- explain(model_rf, data = data[,2:6], y = data[,1])\nsafely_transform_categorical(explainer_rf, \"district\")\n\n\n\n"} {"package":"rSAFE","topic":"safely_transform_continuous","snippet":"### Name: safely_transform_continuous\n### Title: Calculating a Transformation of a Continuous Feature Using\n### PDP/ALE Plot\n### Aliases: safely_transform_continuous\n\n### ** Examples\n\n\nlibrary(DALEX)\nlibrary(randomForest)\nlibrary(rSAFE)\n\ndata <- apartments[1:500,]\nset.seed(111)\nmodel_rf <- randomForest(m2.price ~ construction.year + surface + floor +\n no.rooms + district, data = data)\nexplainer_rf <- explain(model_rf, data = data[,2:6], y = data[,1])\nsafely_transform_continuous(explainer_rf, \"construction.year\")\n\n\n\n"} {"package":"rSAFE","topic":"safely_transform_data","snippet":"### Name: safely_transform_data\n### Title: Performing Transformations on All Features in the Dataset\n### Aliases: safely_transform_data\n\n### ** Examples\n\n\nlibrary(DALEX)\nlibrary(randomForest)\nlibrary(rSAFE)\n\ndata <- apartments[1:500,]\nset.seed(111)\nmodel_rf <- randomForest(m2.price ~ construction.year + surface + floor +\n no.rooms + district, data = data)\nexplainer_rf <- explain(model_rf, data = data[,2:6], y = data[,1])\nsafe_extractor <- safe_extraction(explainer_rf, verbose = FALSE)\nsafely_transform_data(safe_extractor, data, verbose = FALSE)\n\n\n\n"} {"package":"rerddapXtracto","topic":"plotBBox","snippet":"### Name: plotBBox\n### Title: plot result of 'rxtracto_3D'\n### Aliases: plotBBox\n\n### ** Examples\n\n## example code to download data for plotBBox\n## dataInfo <- rerddap::info('erdMBsstd1day')\n## parameter <- 'sst'\n## xcoord <- c(230, 230.1)\n## ycoord <- c(33, 33.1)\n## tcoord <- c('2006-01-15', '2006-01-15')\n## zcoord <- c(0., 0.)\n## MBsst <- rxtracto_3D(dataInfo, parameter, xcoord = xcoord, ycoord = ycoord,\n## tcoord = tcoord, zcoord = zcoord)\n##\n## low resolution selected to keep time to render down\n# suppressWarnings(p <- plotBBox(MBsst, maxpixels = 50))\n\n\n"} {"package":"rerddapXtracto","topic":"plotTrack","snippet":"### Name: plotTrack\n### Title: plot result of 'rxtracto'\n### Aliases: plotTrack\n\n### ** Examples\n\n## example data download for plotTrack\n## tagData <- Marlintag38606\n## xpos <- tagData$lon[1:20]\n## ypos <- tagData$lat[1:20]\n## tpos <- tagData$date[1:20]\n## zpos <- rep(0., length(xpos))\n\n## example data download for plotTrack\n## swchlInfo <- rerddap::info('erdSWchla8day')\n##scwchl <- rxtracto(swchlInfo, parameter = 'chlorophyll', xcoord = xpos,\n## ycoord = ypos, tcoord = tpos, zcoord = zpos, xlen = .2, ylen = .2)\n##\n# suppressWarnings(p <- plotTrack(swchl, xpos, ypos, tpos, plotColor = 'algae'))\n\n\n"} {"package":"rerddapXtracto","topic":"rxtracto","snippet":"### Name: rxtracto\n### Title: Extract environmental data along a trajectory from an 'ERDDAP'\n### server using 'rerddap'.\n### Aliases: rxtracto\n\n### ** Examples\n\n## toy example to show use\n## but keep execution time down\n##\n# dataInfo <- rerddap::info('erdHadISST')\n##\nparameter <- 'sst'\nxcoord <- c(-130.5)\nycoord <- c(40.5)\ntcoord <- c('2006-01-16')\n# extract <- rxtracto(dataInfo, parameter = parameter, xcoord = xcoord,\n# ycoord = ycoord, tcoord= tcoord\n# )\n##\n## bathymetry example\n## 2-D example getting bathymetry\ndataInfo <- rerddap::info('etopo360')\nparameter <- 'altitude'\n# extract <- rxtracto(dataInfo, parameter, xcoord = xcoord, ycoord = ycoord)\n\n\n\n"} {"package":"rerddapXtracto","topic":"rxtracto_3D","snippet":"### Name: rxtracto_3D\n### Title: Extract environmental data in a 3-dimensional box from an\n### 'ERDDAP' server using 'rerddap'.\n### Aliases: rxtracto_3D\n\n### ** Examples\n\n## toy example to show use\n## and keep execution time low\n##\n# dataInfo <- rerddap::info('erdHadISST')\nparameter <- 'sst'\nxcoord <- c(-130.5, -130.5)\nycoord <- c(40.5, 40.5)\ntcoord <- c('2006-01-16', '2006-01-16')\n# extract <- rxtracto_3D(dataInfo, parameter, xcoord = xcoord, ycoord = ycoord,\n# tcoord = tcoord)\n\n## bathymetry example\n## 2-D example getting bathymetry\ndataInfo <- rerddap::info('etopo360')\nparameter <- 'altitude'\n# extract <- rxtracto_3D(dataInfo, parameter, xcoord = xcoord, ycoord = ycoord)\n\n\n"} {"package":"rerddapXtracto","topic":"rxtractogon","snippet":"### Name: rxtractogon\n### Title: Extract environmental data in a polygon using 'ERDDAP' and\n### 'rerddap'.\n### Aliases: rxtractogon\n\n### ** Examples\n\n## toy example to show use\n## and keep execution time low\n# dataInfo <- rerddap::info('erdHadISST')\nparameter <- 'sst'\ntcoord <- c(\"2016-06-15\")\nxcoord <- mbnms$Longitude[1:3]\nycoord <- mbnms$Latitude[1:3]\n# sanctSST <- rxtractogon (dataInfo, parameter=parameter, xcoord = xcoord,\n# ycoord = ycoord, tcoord= tcoord)\n#\n## MBMS bathymetry example\nxcoord <- mbnms$Longitude\nycoord <- mbnms$Latitude\ndataInfo <- rerddap::info('etopo180')\nparameter = 'altitude'\nxName <- 'longitude'\nyName <- 'latitude'\n# bathy <- rxtractogon (dataInfo, parameter = parameter, xcoord = xcoord, ycoord = ycoord)\n\n\n"} {"package":"rerddapXtracto","topic":"tidy_grid","snippet":"### Name: tidy_grid\n### Title: convert result of 'rxtracto_3D' or 'rxtractogon' to tidy\n### long-format\n### Aliases: tidy_grid\n\n### ** Examples\n\nMBsst_tidy <-tidy_grid(MBsst)\n\n\n"} {"package":"StockDistFit","topic":"AAPL","snippet":"### Name: AAPL\n### Title: Apple Inc. stock prices dataset\n### Aliases: AAPL\n### Keywords: datasets\n\n### ** Examples\n\ndata(AAPL)\nstr(AAPL) ; plot(AAPL)\n\n\n"} {"package":"StockDistFit","topic":"AMZN","snippet":"### Name: AMZN\n### Title: Amazon.com Inc. Stock Prices Dataset\n### Aliases: AMZN\n### Keywords: datasets\n\n### ** Examples\n\ndata(AMZN)\nstr(AMZN) ; plot(AMZN)\n\n\n"} {"package":"StockDistFit","topic":"GOOG","snippet":"### Name: GOOG\n### Title: Alphabet Inc Inc. Stock Prices Dataset\n### Aliases: GOOG\n### Keywords: datasets\n\n### ** Examples\n\ndata(GOOG)\nstr(GOOG) ; plot(GOOG)\n\n\n"} {"package":"StockDistFit","topic":"TSLA","snippet":"### Name: TSLA\n### Title: Tesla Inc. Stock Prices Dataset\n### Aliases: TSLA\n### Keywords: datasets\n\n### ** Examples\n\ndata(TSLA)\nstr(TSLA) ; plot(TSLA)\n\n\n"} {"package":"StockDistFit","topic":"annual_return","snippet":"### Name: annual_return\n### Title: Compute Annual Returns of a Vector.\n### Aliases: annual_return\n\n### ** Examples\n\n\n# Compute annual returns of an asset vector\nrequire(xts)\nasset_returns_xts <- xts(c(29.2, 30.0, 36.2, 30.4, 38.5, -35.6, 34.5),\norder.by = as.Date(c(\"2017-05-07\", \"2018-05-07\", \"2019-05-07\",\n\"2020-05-07\", \"2021-05-07\", \"2022-05-07\",\n\"2023-05-07\")))\nannual_return(asset_returns_xts)\n\n\n\n\n"} {"package":"StockDistFit","topic":"asset_loader","snippet":"### Name: asset_loader\n### Title: Load Asset Data.\n### Aliases: asset_loader\n\n### ** Examples\n\n\nasset_loader(system.file(\"extdata\", package = \"StockDistFit\"), c(\"AAPL\", \"TSLA\"), \"Close\")\n\n\n\n\n"} {"package":"StockDistFit","topic":"best_dist","snippet":"### Name: best_dist\n### Title: Find the best distribution based on AIC values\n### Aliases: best_dist\n\n### ** Examples\n\n\ndata <- asset_loader(system.file(\"extdata\", package = \"StockDistFit\"), c(\"AAPL\", \"TSLA\"), \"Close\")\ndf = fit_multiple_dist(c(\"norm_fit\", \"cauchy_fit\"), data)\nbest_dist(df, c(\"norm_fit\", \"cauchy_fit\"))\n\n\n\n\n"} {"package":"StockDistFit","topic":"cauchy_fit","snippet":"### Name: cauchy_fit\n### Title: Fit Cauchy Distribution to a vector of returns/stock prices.\n### Aliases: cauchy_fit\n\n### ** Examples\n\n\nstock_prices <- c(10, 11, 12, 13, 14, 17, 18)\nreturns <- diff(log(stock_prices))\ncauchy_fit(returns)\n\n\n\n\n"} {"package":"StockDistFit","topic":"data.cumret","snippet":"### Name: data.cumret\n### Title: Compute Cumulative Returns of a Vector.\n### Aliases: data.cumret\n\n### ** Examples\n\n\n# Compute cumulative returns of an asset vector\nlibrary(quantmod)\nasset_returns_xts <- xts(c(29.2, 30.0, 36.2, 30.4, 38.5, -35.6, 34.5),\n order.by = as.Date(c(\"2023-05-01\", \"2023-05-02\", \"2023-05-03\",\n \"2023-05-04\", \"2023-05-05\", \"2023-05-06\",\n \"2023-05-07\")))\ndata.cumret(asset_returns_xts, initial_eq = 100)\n\n\n\n\n"} {"package":"StockDistFit","topic":"fit_multiple_dist","snippet":"### Name: fit_multiple_dist\n### Title: Fits Multiple Probability Distributions to several assets/stock\n### prices.\n### Aliases: fit_multiple_dist\n\n### ** Examples\n\n\ndata <- asset_loader(system.file(\"extdata\", package = \"StockDistFit\"), c(\"AAPL\", \"TSLA\"), \"Close\")\nfit_multiple_dist(c(\"norm_fit\", \"cauchy_fit\"), data)\n\n\n\n\n"} {"package":"StockDistFit","topic":"ged_fit","snippet":"### Name: ged_fit\n### Title: Fit Generalized Error Distribution to a vector of returns/stock\n### prices.\n### Aliases: ged_fit\n\n### ** Examples\n\n\nstock_prices <- c(10, 11, 12, 13, 14, 17, 18)\nreturns <- diff(log(stock_prices))\nged_fit(returns)\n\n\n\n"} {"package":"StockDistFit","topic":"ghd_fit","snippet":"### Name: ghd_fit\n### Title: Fit Generalized Hyperbolic Distribution to a vector of\n### returns/stock prices.\n### Aliases: ghd_fit\n\n### ** Examples\n\n\nstock_prices <- c(10, 11, 12, 13, 14, 16, 24)\nreturns <- diff(log(stock_prices))\nghd_fit(returns)\n\n\n\n\n"} {"package":"StockDistFit","topic":"hd_fit","snippet":"### Name: hd_fit\n### Title: Fit Hyperbolic distribution to return/stock prices.\n### Aliases: hd_fit\n\n### ** Examples\n\n\nstock_prices <- c(10, 11, 12, 13, 14, 15, 16)\nreturns <- diff(log(stock_prices))\nhd_fit(returns)\n\n\n\n\n"} {"package":"StockDistFit","topic":"monthly_return","snippet":"### Name: monthly_return\n### Title: Compute Monthly Returns of a Vector.\n### Aliases: monthly_return\n\n### ** Examples\n\n\n# Compute monthly returns of an asset vector\nrequire(xts)\nasset_returns_xts <- xts(c(29.2, 30.0, 36.2, 30.4, 38.5, -35.6, 34.5),\n order.by = as.Date(c(\"2022-05-02\", \"2022-06-02\", \"2022-07-02\",\n \"2022-08-02\", \"2022-09-02\", \"2022-10-02\",\n \"2022-11-02\")))\nmonthly_return(asset_returns_xts)\n\n\n\n\n"} {"package":"StockDistFit","topic":"nig_fit","snippet":"### Name: nig_fit\n### Title: Fit Normal Inverse Gaussian (NIG) Distribution to a vector of\n### returns/stock prices.\n### Aliases: nig_fit\n\n### ** Examples\n\n\nstock_prices <- c(10, 11, 12, 13, 14, 17, 18)\nreturns <- diff(log(stock_prices))\nnig_fit(returns)\n\n\n\n"} {"package":"StockDistFit","topic":"norm_fit","snippet":"### Name: norm_fit\n### Title: Fit Normal Distribution to a Vector/stock prices.\n### Aliases: norm_fit\n\n### ** Examples\n\n\n# Fit a normal distribution to a vector of returns\nstock_prices <- c(10, 11, 12, 13, 14, 17, 18)\nreturns <- diff(log(stock_prices))\nnorm_fit(returns)\n\n\n\n\n"} {"package":"StockDistFit","topic":"skew.ged_fit","snippet":"### Name: skew.ged_fit\n### Title: Fit Skewed Generalized Error Distribution to a vector of\n### returns/stock prices.\n### Aliases: skew.ged_fit\n\n### ** Examples\n\n\nstock_prices <- c(10, 11, 12, 13, 14, 17, 18)\nreturns <- diff(log(stock_prices))\nskew.ged_fit(returns)\n\n\n\n\n"} {"package":"StockDistFit","topic":"skew.normal_fit","snippet":"### Name: skew.normal_fit\n### Title: Fit Skew Normal Distribution to a vector of returns/stock\n### prices.\n### Aliases: skew.normal_fit\n\n### ** Examples\n\n\nstock_prices <- c(10, 11, 12, 13, 14, 17, 18)\nreturns <- diff(log(stock_prices))\nskew.normal_fit(returns)\n\n\n\n\n"} {"package":"StockDistFit","topic":"skew.t_fit","snippet":"### Name: skew.t_fit\n### Title: Fit Skewed Student-t Distribution to a vector of returns/stock\n### prices.\n### Aliases: skew.t_fit\n\n### ** Examples\n\n\nstock_prices <- c(10, 11, 12, 13, 14, 17, 18)\nreturns <- diff(log(stock_prices))\nskew.t_fit(returns)\n\n\n\n"} {"package":"StockDistFit","topic":"sym.ghd_fit","snippet":"### Name: sym.ghd_fit\n### Title: Fit Symmetric Generalized Hyperbolic Distribution to\n### returns/stock prices.\n### Aliases: sym.ghd_fit\n\n### ** Examples\n\n\nstock_prices <- c(10, 11, 12, 13, 14, 16, 15)\nreturns <- diff(log(stock_prices))\nsym.ghd_fit(returns)\n\n\n\n\n"} {"package":"StockDistFit","topic":"sym.hd_fit","snippet":"### Name: sym.hd_fit\n### Title: Fit a Symmetric Hyperbolic Distribution to a vector of\n### return/stock prices.\n### Aliases: sym.hd_fit\n\n### ** Examples\n\n\nstock_prices <- c(10, 11, 12, 13, 14, 20, 21)\nreturns <- diff(log(stock_prices))\nsym.hd_fit(returns)\n\n\n\n\n"} {"package":"StockDistFit","topic":"sym.vg_fit","snippet":"### Name: sym.vg_fit\n### Title: Fit Symmetric Variance Gamma Distribution to a vector of\n### returns/stock prices.\n### Aliases: sym.vg_fit\n\n### ** Examples\n\n\nstock_prices <- c(10, 11, 12, 13, 14, 17, 18)\nreturns <- diff(log(stock_prices))\nsym.vg_fit(returns)\n\n\n\n\n"} {"package":"StockDistFit","topic":"t_fit","snippet":"### Name: t_fit\n### Title: Fit Student's t Distribution to a vector of returns/stock\n### prices.\n### Aliases: t_fit\n\n### ** Examples\n\n\nstock_prices <- c(10, 11, 12, 13, 14, 17, 18)\nreturns <- diff(log(stock_prices))\nt_fit(returns)\n\n\n\n\n"} {"package":"StockDistFit","topic":"vg_fit","snippet":"### Name: vg_fit\n### Title: Fit Variance Gamma Distribution to a vector of return/stock\n### prices.\n### Aliases: vg_fit\n\n### ** Examples\n\n\nstock_prices <- c(10, 11, 12, 13, 14, 15, 17)\nreturns <- diff(log(stock_prices))\nvg_fit(returns)\n\n\n\n\n"} {"package":"StockDistFit","topic":"weekly_return","snippet":"### Name: weekly_return\n### Title: Compute Weekly Returns of a Vector.\n### Aliases: weekly_return\n\n### ** Examples\n\n\n# Compute weekly returns of an asset vector\nrequire(xts)\nasset_returns_xts <- xts(c(29.2, 30.0, 36.2, 30.4, 38.5, -35.6, 34.5),\n order.by = as.Date(c(\"2022-05-01\", \"2022-05-08\", \"2022-05-15\",\n \"2022-05-22\", \"2022-05-29\", \"2022-06-05\",\n \"2022-06-12\")))\nweekly_return(asset_returns_xts)\n\n\n\n\n"} {"package":"bdrc","topic":"autoplot.plm0","snippet":"### Name: autoplot.plm0\n### Title: Autoplot method for discharge rating curves\n### Aliases: autoplot.plm0 autoplot.plm autoplot.gplm0 autoplot.gplm\n\n### ** Examples\n\n## No test: \nlibrary(ggplot2)\ndata(krokfors)\nset.seed(1)\nplm0.fit <- plm0(Q~W,krokfors,num_cores=2)\nautoplot(plm0.fit)\nautoplot(plm0.fit,transformed=TRUE)\nautoplot(plm0.fit,type='histogram',param='c')\nautoplot(plm0.fit,type='histogram',param='c',transformed=TRUE)\nautoplot(plm0.fit,type='histogram',param='hyperparameters')\nautoplot(plm0.fit,type='histogram',param='latent_parameters')\nautoplot(plm0.fit,type='residuals')\nautoplot(plm0.fit,type='f')\nautoplot(plm0.fit,type='sigma_eps')\n## End(No test)\n\n\n"} {"package":"bdrc","topic":"autoplot.tournament","snippet":"### Name: autoplot.tournament\n### Title: Autoplot method for discharge rating curve tournament\n### Aliases: autoplot.tournament\n\n### ** Examples\n\n## No test: \nlibrary(ggplot2)\ndata(krokfors)\nset.seed(1)\nt_obj <- tournament(formula=Q~W,data=krokfors,num_cores=2)\nautoplot(t_obj)\n## End(No test)\n\n\n"} {"package":"bdrc","topic":"gather_draws","snippet":"### Name: gather_draws\n### Title: Gather MCMC chain draws to data.frame on a long format\n### Aliases: gather_draws\n\n### ** Examples\n\n## No test: \ndata(krokfors)\nset.seed(1)\nplm0.fit <- plm0(formula=Q~W,data=krokfors,num_cores=2)\nhyp_samples <- gather_draws(plm0.fit,'hyperparameters')\nhead(hyp_samples)\nrating_curve_samples <- gather_draws(plm0.fit,'rating_curve','rating_curve_mean')\nhead(rating_curve_samples)\n## End(No test)\n\n\n"} {"package":"bdrc","topic":"get_report","snippet":"### Name: get_report\n### Title: Report for a discharge rating curve or tournament\n### Aliases: get_report get_report.plm0 get_report.plm get_report.gplm0\n### get_report.gplm get_report.tournament\n\n### ** Examples\n\n## No test: \ndata(krokfors)\nset.seed(1)\nplm0.fit <- plm0(formula=Q~W,data=krokfors,num_cores=2)\n## End(No test)\n## Not run: \n##D get_report(plm0.fit)\n## End(Not run)\n\n\n"} {"package":"bdrc","topic":"get_report_pages","snippet":"### Name: get_report_pages\n### Title: Report pages for a discharge rating curve or tournament\n### Aliases: get_report_pages get_report_pages.plm0 get_report_pages.plm\n### get_report_pages.gplm0 get_report_pages.gplm\n### get_report_pages.tournament\n\n### ** Examples\n\n## No test: \ndata(krokfors)\nset.seed(1)\nplm0.fit <- plm0(formula=Q~W,data=krokfors,num_cores=2)\nplm0_pages <- get_report_pages(plm0.fit)\n## End(No test)\n\n\n"} {"package":"bdrc","topic":"gplm","snippet":"### Name: gplm\n### Title: Generalized power-law model with variance that varies with\n### stage.\n### Aliases: gplm\n\n### ** Examples\n\n## No test: \ndata(norn)\nset.seed(1)\ngplm.fit <- gplm(formula=Q~W,data=norn,num_cores=2)\nsummary(gplm.fit)\n## End(No test)\n\n\n"} {"package":"bdrc","topic":"gplm0","snippet":"### Name: gplm0\n### Title: Generalized power-law model with a constant variance\n### Aliases: gplm0\n\n### ** Examples\n\n## No test: \ndata(krokfors)\nset.seed(1)\ngplm0.fit <- gplm0(formula=Q~W,data=krokfors,num_cores=2)\nsummary(gplm0.fit)\n## End(No test)\n\n\n"} {"package":"bdrc","topic":"plm","snippet":"### Name: plm\n### Title: Power-law model with variance that varies with stage.\n### Aliases: plm\n\n### ** Examples\n\n## No test: \ndata(spanga)\nset.seed(1)\nplm.fit <- plm(formula=Q~W,data=spanga,num_cores=2)\nsummary(plm.fit)\n## End(No test)\n\n\n"} {"package":"bdrc","topic":"plm0","snippet":"### Name: plm0\n### Title: Power-law model with a constant variance\n### Aliases: plm0\n\n### ** Examples\n\n## No test: \ndata(skogsliden)\nset.seed(1)\nplm0.fit <- plm0(formula=Q~W,data=skogsliden,num_cores=2)\nsummary(plm0.fit)\n## End(No test)\n\n\n"} {"package":"bdrc","topic":"plot.plm0","snippet":"### Name: plot.plm0\n### Title: Plot method for discharge rating curves\n### Aliases: plot.plm0 plot.plm plot.gplm0 plot.gplm\n\n### ** Examples\n\n## No test: \ndata(krokfors)\nset.seed(1)\nplm0.fit <- plm0(formula=Q~W,data=krokfors,num_cores=2)\n\nplot(plm0.fit)\nplot(plm0.fit,transformed=TRUE)\nplot(plm0.fit,type='histogram',param='c')\nplot(plm0.fit,type='histogram',param='c',transformed=TRUE)\nplot(plm0.fit,type='histogram',param='hyperparameters')\nplot(plm0.fit,type='histogram',param='latent_parameters')\nplot(plm0.fit,type='residuals')\nplot(plm0.fit,type='f')\nplot(plm0.fit,type='sigma_eps')\n## End(No test)\n\n\n"} {"package":"bdrc","topic":"plot.tournament","snippet":"### Name: plot.tournament\n### Title: Plot method for discharge rating curve tournament\n### Aliases: plot.tournament\n\n### ** Examples\n\n## No test: \ndata(krokfors)\nset.seed(1)\nt_obj <- tournament(formula=Q~W,data=krokfors,num_cores=2)\nplot(t_obj)\nplot(t_obj,transformed=TRUE)\nplot(t_obj,type='deviance')\nplot(t_obj,type='f')\nplot(t_obj,type='sigma_eps')\nplot(t_obj,type='residuals')\nplot(t_obj,type='tournament_results')\n## End(No test)\n\n\n"} {"package":"bdrc","topic":"predict.plm0","snippet":"### Name: predict.plm0\n### Title: Predict method for discharge rating curves\n### Aliases: predict.plm0 predict.plm predict.gplm0 predict.gplm\n\n### ** Examples\n\n## No test: \ndata(krokfors)\nset.seed(1)\nplm0.fit <- plm0(formula=Q~W,data=krokfors,h_max=10,num_cores=2)\n#predict rating curve on a equally 10 cm spaced grid from 9 to 10 meters\npredict(plm0.fit,newdata=seq(9,10,by=0.1))\n## End(No test)\n\n\n"} {"package":"bdrc","topic":"spread_draws","snippet":"### Name: spread_draws\n### Title: Spread MCMC chain draws to data.frame on a wide format\n### Aliases: spread_draws\n\n### ** Examples\n\n## No test: \ndata(krokfors)\nset.seed(1)\nplm0.fit <- plm0(formula=Q~W,data=krokfors,num_cores=2)\nhyp_samples <- spread_draws(plm0.fit,'hyperparameters')\nhead(hyp_samples)\nrating_curve_samples <- spread_draws(plm0.fit,'rating_curve','rating_curve_mean')\nhead(rating_curve_samples)\n## End(No test)\n\n\n"} {"package":"bdrc","topic":"summary.plm0","snippet":"### Name: summary.plm0\n### Title: Summary method for discharge rating curves\n### Aliases: summary.plm0 summary.plm summary.gplm0 summary.gplm\n\n### ** Examples\n\n## No test: \ndata(krokfors)\nset.seed(1)\nplm0.fit <- plm0(formula=Q~W,data=krokfors,num_cores=2)\nsummary(plm0.fit)\n## End(No test)\n\n\n"} {"package":"bdrc","topic":"summary.tournament","snippet":"### Name: summary.tournament\n### Title: Summary method for a discharge rating curve tournament\n### Aliases: summary.tournament\n\n### ** Examples\n\n## No test: \ndata(krokfors)\nset.seed(1)\nt_obj <- tournament(Q~W,krokfors,num_cores=2)\nsummary(t_obj)\n## End(No test)\n\n\n"} {"package":"bdrc","topic":"tournament","snippet":"### Name: tournament\n### Title: Tournament - Model comparison\n### Aliases: tournament\n\n### ** Examples\n\n## No test: \ndata(krokfors)\nset.seed(1)\nt_obj <- tournament(formula=Q~W,data=krokfors,num_cores=2)\nt_obj\nsummary(t_obj)\n## End(No test)\n\n\n"} {"package":"SIRE","topic":"causal_decompose","snippet":"### Name: causal_decompose\n### Title: Estimation and decomposition of simultaneous equation model\n### Aliases: causal_decompose\n\n### ** Examples\n\ndata(\"macroIT\")\neq.system = list(\n eq1 = C ~ CP + I + CP_1,\n eq2 = I ~ K + CP_1,\n eq3 = WP ~ I + GDP + GDP_1,\n eq4 = GDP ~ C + I + GDP_1,\n eq5 = CP ~ WP + T,\n eq6 = K ~ I + K_1)\n\ninstruments = ~ T + CP_1 + GDP_1 + K_1\n\ncausal_decompose(data = macroIT,\n eq.system = eq.system,\n resid.est = \"noDfCor\",\n instruments = instruments,\n sigma.in = NULL)\n\n\n"} {"package":"SIRE","topic":"feedback_ml","snippet":"### Name: feedback_ml\n### Title: Testing for Feedback Effects in a Simultaneous Equation Model\n### Aliases: feedback_ml\n\n### ** Examples\n\n## No test: \ndata(\"macroIT\")\neq.system = list(\n eq1 = C ~ CP + I + CP_1,\n eq2 = I ~ K + CP_1,\n eq3 = WP ~ I + GDP + GDP_1,\n eq4 = GDP ~ C + I + GDP_1,\n eq5 = CP ~ WP + T,\n eq6 = K ~ I + K_1)\n\ninstruments = ~ T + CP_1 + GDP_1 + K_1\n\nc.dec = causal_decompose(data = macroIT,\n eq.system = eq.system,\n resid.est = \"noDfCor\",\n instruments = instruments)\n\nfeedback_ml(data = macroIT,\n out.decompose = c.dec,\n eq.id = 5,\n lb = -200,\n ub = 200,\n nrestarts = 10,\n nsim = 20000,\n seed.in = 1)\n## End(No test)\n\n\n"} {"package":"SIRE","topic":"macroIT","snippet":"### Name: macroIT\n### Title: Italian Macroeconomic Data\n### Aliases: macroIT\n### Keywords: datasets\n\n### ** Examples\n\ndata(macroIT)\n\n\n"} {"package":"rotationForest","topic":"predict.rotationForest","snippet":"### Name: predict.rotationForest\n### Title: Predict method for rotationForest objects\n### Aliases: predict.rotationForest\n### Keywords: classification\n\n### ** Examples\n\ndata(iris)\ny <- as.factor(ifelse(iris$Species[1:100]==\"setosa\",0,1))\nx <- iris[1:100,-5]\nrF <- rotationForest(x,y)\npredict(object=rF,newdata=x)\n\n\n"} {"package":"rotationForest","topic":"rotationForest","snippet":"### Name: rotationForest\n### Title: Binary classification with Rotation Forest (Rodriguez en\n### Kuncheva, 2006)\n### Aliases: rotationForest\n### Keywords: classification\n\n### ** Examples\n\ndata(iris)\ny <- as.factor(ifelse(iris$Species[1:100]==\"setosa\",0,1))\nx <- iris[1:100,-5]\nrF <- rotationForest(x,y)\npredict(object=rF,newdata=x)\n\n\n"} {"package":"rotationForest","topic":"rotationForestNews","snippet":"### Name: rotationForestNews\n### Title: Display the NEWS file\n### Aliases: rotationForestNews\n\n### ** Examples\n\nrotationForestNews()\n\n\n"} {"package":"naive","topic":"naive","snippet":"### Name: naive\n### Title: naive\n### Aliases: naive naive-package\n\n### ** Examples\n\n{\nnaive(time_features[, 2:3, drop = FALSE], seq_len = 30, n_samp = 1, n_windows = 5)\n}\n\n\n"} {"package":"landpred","topic":"AUC.landmark","snippet":"### Name: AUC.landmark\n### Title: Estimates the area under the ROC curve (AUC).\n### Aliases: AUC.landmark\n### Keywords: prediction accuracy survival\n\n### ** Examples\n\ndata(data_example_landpred)\nt0=2\ntau = 8\nProb.Null(t0=t0,tau=tau,data=data_example_landpred)\n\nout = Prob.Null(t0=t0,tau=tau,data=data_example_landpred)\nout$Prob\nout$data\n\nAUC.landmark(t0=t0,tau=tau, data = out$data)\n\n\n\n"} {"package":"landpred","topic":"BS.landmark","snippet":"### Name: BS.landmark\n### Title: Estimates the Brier score.\n### Aliases: BS.landmark\n### Keywords: prediction accuracy survival\n\n### ** Examples\n\ndata(data_example_landpred)\nt0=2\ntau = 8\nProb.Null(t0=t0,tau=tau,data=data_example_landpred)\n\nout = Prob.Null(t0=t0,tau=tau,data=data_example_landpred)\nout$Prob\nout$data\n\nBS.landmark(t0=t0,tau=tau, data = out$data)\n\n\n\n"} {"package":"landpred","topic":"Prob.Covariate","snippet":"### Name: Prob.Covariate\n### Title: Estimates P(TL t0, Z), i.e. given discrete\n### covariate.\n### Aliases: Prob.Covariate\n### Keywords: prediction survival\n\n### ** Examples\n\ndata(data_example_landpred)\nt0=2\ntau = 8\nProb.Covariate(t0=t0,tau=tau,data=data_example_landpred)\n\nout = Prob.Covariate(t0=t0,tau=tau,data=data_example_landpred)\nout$Prob\nout$data\n\nnewdata = matrix(c(1,1,1, 3,0,1, 4,1,1, 10,1,0, 11,0,1), ncol = 3, byrow=TRUE)\nout = Prob.Covariate(t0=t0,tau=tau,data=data_example_landpred,newdata=newdata)\nout$Prob\nout$newdata\n\n\n\n"} {"package":"landpred","topic":"Prob.Covariate.ShortEvent","snippet":"### Name: Prob.Covariate.ShortEvent\n### Title: Estimates P(TL t0, Z, min(TS, t0), I(TS<=t0)),\n### i.e. given discrete covariate and TS information.\n### Aliases: Prob.Covariate.ShortEvent\n### Keywords: prediction survival\n\n### ** Examples\n\ndata(data_example_landpred)\nt0=2\ntau = 8\n#note: computationally intensive command below\n#Prob.Covariate.ShortEvent(t0=t0,tau=tau,data=data_example_landpred)\n\n#out = Prob.Covariate.ShortEvent(t0=t0,tau=tau,data=data_example_landpred)\n#out$data\n#data.plot = out$data\n#plot(data.plot$XS[data.plot$Z ==1], data.plot$Probability[data.plot$Z ==1], \n#pch = 20, xlim = c(0,t0))\n#points(data.plot$XS[data.plot$Z ==0], data.plot$Probability[data.plot$Z ==0], \n#pch = 20, col = 2)\n\nnewdata = matrix(c(1,1,0.5,1,0,\n3,0,1,1,1,\n4,1,1.5,1,0,\n10,1,5,1,0,\n11,0,11,0,1), ncol = 5, byrow=TRUE)\n#note: computationally intensive command below\n#out = Prob.Covariate.ShortEvent(t0=t0,tau=tau,data=data_example_landpred,newdata=newdata)\n#out$newdata\n\n\n\n"} {"package":"landpred","topic":"Prob.Null","snippet":"### Name: Prob.Null\n### Title: Estimates P(TL t0).\n### Aliases: Prob.Null\n### Keywords: prediction survival\n\n### ** Examples\n\ndata(data_example_landpred)\nt0=2\ntau = 8\nProb.Null(t0=t0,tau=tau,data=data_example_landpred)\n\nout = Prob.Null(t0=t0,tau=tau,data=data_example_landpred)\nout$Prob\nout$data\n\nnewdata = matrix(c(1,1,3,0,4,1,10,1,11,0), ncol = 2, byrow=TRUE)\nout = Prob.Null(t0=t0,tau=tau,data=data_example_landpred,newdata=newdata)\nout$Prob\nout$newdata\n\n\n\n"} {"package":"landpred","topic":"Wi.FUN","snippet":"### Name: Wi.FUN\n### Title: Computes the inverse probability of censoring weights for a\n### specific t0 and tau\n### Aliases: Wi.FUN\n### Keywords: IPCW survival\n\n### ** Examples\n\ndata(data_example_landpred)\nt0=2\ntau = 8\n\nW2i <- Wi.FUN(data_example_landpred[,1],data = data_example_landpred[,c(1:2)],t0=t0,tau=tau)\n\n\n\n\n"} {"package":"landpred","topic":"data_example_landpred","snippet":"### Name: data_example_landpred\n### Title: Hypothetical data to be used in examples.\n### Aliases: data_example_landpred\n### Keywords: datasets\n\n### ** Examples\n\ndata(data_example_landpred)\n\n\n"} {"package":"landpred","topic":"landpred-package","snippet":"### Name: landpred-package\n### Title: Landmark Prediction of a Survival Outcome\n### Aliases: landpred-package landpred\n### Keywords: survival\n\n### ** Examples\n\ndata(data_example_landpred)\nt0=2\ntau = 8\n\n####Landmark prediction with no covariate or short term information\nProb.Null(t0=t0,tau=tau,data=data_example_landpred)\nout = Prob.Null(t0=t0,tau=tau,data=data_example_landpred)\nout$Prob\nout$data\n\nnewdata = matrix(c(1,1,3,0,4,1,10,1,11,0), ncol = 2, byrow=TRUE)\nout = Prob.Null(t0=t0,tau=tau,data=data_example_landpred,newdata=newdata)\nout$Prob\nout$newdata\n\n#Landmark prediction with covariate information only\nProb.Covariate(t0=t0,tau=tau,data=data_example_landpred)\nout = Prob.Covariate(t0=t0,tau=tau,data=data_example_landpred)\nout$Prob\nout$data\n\nnewdata = matrix(c(1,1,1, 3,0,1, 4,1,1, 10,1,0, 11,0,1), ncol = 3, byrow=TRUE)\nout = Prob.Covariate(t0=t0,tau=tau,data=data_example_landpred,newdata=newdata)\nout$Prob\nout$newdata\n\n#Landmark prediction with covariate information and short term event information\n#note: computationally intensive commands below\n#Prob.Covariate.ShortEvent(t0=t0,tau=tau,data=data_example_landpred)\n#out = Prob.Covariate.ShortEvent(t0=t0,tau=tau,data=data_example_landpred)\n#out$data\n#data.plot = out$data\n#plot(data.plot$XS[data.plot$Z ==1], data.plot$Probability[data.plot$Z ==1], \n#pch = 20, xlim = c(0,t0))\n#points(data.plot$XS[data.plot$Z ==0], data.plot$Probability[data.plot$Z ==0], \n#pch = 20, col = 2)\n\nnewdata = matrix(c(1,1,0.5,1,0,\n3,0,1,1,1,\n4,1,1.5,1,0,\n10,1,5,1,0,\n11,0,11,0,1), ncol = 5, byrow=TRUE)\n#note: computationally intensive command below\n#out=Prob.Covariate.ShortEvent(t0=t0,tau=tau,data=data_example_landpred,newdata=newdata)\n#out$newdata\n\n\n\n"} {"package":"shortIRT","topic":"bp","snippet":"### Name: bp\n### Title: Benchmark Procedure\n### Aliases: bp\n\n### ** Examples\n\n# set a seed to replicate the results\nset.seed(999)\n# Simulate person and item parameters\ntrue_theta <- rnorm(1000)\nb <- runif(30, -3, 3)\na <- runif(30, 0.6, 2)\nparameters <- data.frame(b, a)\n# simulate data\ndata <- sirt::sim.raschtype(true_theta, b = b, fixed.a = a)\nstf <- bp(data, starting_theta = true_theta, item_par = parameters, num_item = 5)\n# check the obtained short test form\nstf$item_stf\n# check the comparison between the short test form and the full-length test\nstf$summary\n\n\n"} {"package":"shortIRT","topic":"change_names","snippet":"### Name: change_names\n### Title: Change column names\n### Aliases: change_names\n\n### ** Examples\n\n# original data frame with 5 columns\ndata <- data.frame(matrix(1:20, nrow = 4, ncol = 5))\nchange_names(data)\n\n\n"} {"package":"shortIRT","topic":"cut_borders","snippet":"### Name: cut_borders\n### Title: Cut borders\n### Aliases: cut_borders\n\n### ** Examples\n\nx <- seq(-3, 3, length = 5)\ngroups <- cut(x, 5, include.lowest = TRUE)\nboundaries <- cut_borders(groups)\n\n\n"} {"package":"shortIRT","topic":"diff_theta","snippet":"### Name: diff_theta\n### Title: Difference between thetas\n### Aliases: diff_theta\n\n### ** Examples\n\n# set a seed to replicate the results\nset.seed(999)\n# Simulate person and item parameters\ntrue_theta <- rnorm(1000)\nb <- runif(30, -3, 3)\na <- runif(30, 0.6, 2)\nparameters <- data.frame(b, a)\n# simulate data\ndata <- sirt::sim.raschtype(true_theta, b = b, fixed.a = a)\nstf <- uip(data, starting_theta = true_theta, item_par = parameters, num_item = 5)\n# without starting theta\nmy_diff <- diff_theta(stf)\nhead(my_diff)\n\n\n"} {"package":"shortIRT","topic":"eip","snippet":"### Name: eip\n### Title: Equal Interval Procedure\n### Aliases: eip\n\n### ** Examples\n\n# set a seed to replicate the results\nset.seed(999)\n# Simulate person and item parameters\ntrue_theta <- rnorm(1000)\nb <- runif(30, -3, 3)\na <- runif(30, 0.6, 2)\nparameters <- data.frame(b, a)\n# simulate data\ndata <- sirt::sim.raschtype(true_theta, b = b, fixed.a = a)\nstf <- eip(data, starting_theta = true_theta, item_par = parameters, num_item = 5)\n# check the obtained short test form\nstf$item_stf\n# check the comparison between the short test form and the full-length test\nstf$summary\n\n# Short test form with cut off values\nstf_cutoff <- eip(data, starting_theta = true_theta,\nitem_par = parameters, theta_targets = rep(2, 5))\nstf_cutoff$item_stf\n\n\n"} {"package":"shortIRT","topic":"plot_difference","snippet":"### Name: plot_difference\n### Title: Plot the difference between thetas\n### Aliases: plot_difference\n\n### ** Examples\n\n# set a seed to replicate the results\nset.seed(999)\n# Simulate person and item parameters\ntrue_theta <- rnorm(1000)\nb <- runif(30, -3, 3)\na <- runif(30, 0.6, 2)\nparameters <- data.frame(b, a)\n# simulate data\ndata <- sirt::sim.raschtype(true_theta, b = b, fixed.a = a)\nstf <- uip(data, starting_theta = true_theta, item_par = parameters, num_item = 5)\n# compute the difference between starting theta and that estimated with the stf\nmy_diff <- diff_theta(stf)\n# plot the difference with default number of levels\nplot_difference(my_diff, type = \"diff\")\n# plot the absolute difference with 10 levels\nplot_difference(my_diff, type = \"absolute_diff\", levels = 10)\n\n\n"} {"package":"shortIRT","topic":"plot_tif","snippet":"### Name: plot_tif\n### Title: Plot Test Information Functions\n### Aliases: plot_tif\n\n### ** Examples\n\n# set a seed to replicate the results\nset.seed(999)\n# Simulate person and item parameters\ntrue_theta <- rnorm(1000)\nb <- runif(30, -3, 3)\na <- runif(30, 0.6, 2)\nparameters <- data.frame(b, a)\n# simulate data\ndata <- sirt::sim.raschtype(true_theta, b = b, fixed.a = a)\nstf <- uip(data, starting_theta = true_theta, item_par = parameters, num_item = 5)\n# plot the test information function of the full-length test\nplot_tif(stf, tif = \"full\")\n# plot the test information of the full-length test and of the short test form\nplot_tif(stf, tif = \"both\")\n\n\n"} {"package":"shortIRT","topic":"uip","snippet":"### Name: uip\n### Title: Unequal interval procedure\n### Aliases: uip\n\n### ** Examples\n\n# set a seed to replicate the results\nset.seed(999)\n# Simulate person and item parameters\ntrue_theta <- rnorm(1000)\nb <- runif(30, -3, 3)\na <- runif(30, 0.6, 2)\nparameters <- data.frame(b, a)\n# simulate data\ndata <- sirt::sim.raschtype(true_theta, b = b, fixed.a = a)\nstf_uip = uip(data, starting_theta = true_theta, item_par = parameters, num_item = 10)\n# check the obtained short test form\nstf_uip$item_stf\n# check the comparison between the short test form and the full-length test\nstf_uip$summary\n\n\n"} {"package":"tm.plugin.lexisnexis","topic":"LexisNexisSource","snippet":"### Name: LexisNexisSource\n### Title: LexisNexis Source\n### Aliases: LexisNexisSource getElem.LexisNexisSource eoi.LexisNexisSource\n\n### ** Examples\n\n library(tm)\n file <- system.file(\"texts\", \"lexisnexis_test_en.html\",\n package = \"tm.plugin.lexisnexis\")\n corpus <- Corpus(LexisNexisSource(file))\n\n # See the contents of the documents\n inspect(corpus)\n\n # See meta-data associated with first article\n meta(corpus[[1]])\n\n\n"} {"package":"mapfit","topic":"as.gph","snippet":"### Name: as.gph\n### Title: Convert from HErlang to GPH\n### Aliases: as.gph\n\n### ** Examples\n\n#' ## create a hyper Erlang with specific parameters\n(param <- herlang(shape=c(2,3), mixrate=c(0.3,0.7), rate=c(1.0,10.0)))\n\n## convert to a general PH\nas.gph(param)\n\n\n\n"} {"package":"mapfit","topic":"as.map","snippet":"### Name: as.map\n### Title: Convert from ERHMM to MAP\n### Aliases: as.map\n\n### ** Examples\n\n## create a hyper Erlang with specific parameters\n(param <- erhmm(shape=c(2,3), alpha=c(0.3,0.7), rate=c(1.0,10.0)))\n\n## convert to a general PH\nas.map(param)\n\n\n\n"} {"package":"mapfit","topic":"cf1","snippet":"### Name: cf1\n### Title: Create CF1\n### Aliases: cf1\n\n### ** Examples\n\n## create a CF1 with 5 phases\n(param1 <- cf1(5))\n\n## create a CF1 with 5 phases\n(param1 <- cf1(size=5))\n\n## create a CF1 with specific parameters\n(param2 <- cf1(alpha=c(1,0,0), rate=c(1.0,2.0,3.0)))\n\n\n\n"} {"package":"mapfit","topic":"cf1.param","snippet":"### Name: cf1.param\n### Title: Create CF1 with data information\n### Aliases: cf1.param\n\n### ** Examples\n\n## Generate group data\ndat <- data.frame.phase.group(c(1,2,0,4), seq(0,10,length.out=5))\n\n## Create an instance of CF1\np <- cf1.param(data=dat, size=5)\n\n\n\n"} {"package":"mapfit","topic":"data.frame.map.group","snippet":"### Name: data.frame.map.group\n### Title: Create group data for map\n### Aliases: data.frame.map.group\n\n### ** Examples\n\nt <- c(1,1,1,1,1)\nn <- c(1,3,0,0,1)\n\ndat <- data.frame.map.group(counts=n, intervals=t)\nmean(dat)\nprint(dat)\n\n\n\n"} {"package":"mapfit","topic":"data.frame.map.time","snippet":"### Name: data.frame.map.time\n### Title: Create data for map\n### Aliases: data.frame.map.time\n\n### ** Examples\n\nx <- runif(10)\n\ndat <- data.frame.map.time(time=x)\nmean(dat)\nprint(dat)\n\n\n\n"} {"package":"mapfit","topic":"data.frame.phase.group","snippet":"### Name: data.frame.phase.group\n### Title: Create group data for phase\n### Aliases: data.frame.phase.group\n\n### ** Examples\n\ndat <- data.frame.phase.group(counts=c(1,2,1,1,0,0,1,4))\nprint(dat)\nmean(dat)\n\n\n\n"} {"package":"mapfit","topic":"data.frame.phase.time","snippet":"### Name: data.frame.phase.time\n### Title: Create data for phase with weighted sample\n### Aliases: data.frame.phase.time\n\n### ** Examples\n\nx <- runif(10)\nw <- runif(10)\n\ndat <- data.frame.phase.time(x=x, weights=w)\nprint(dat)\nmean(dat)\n\n\n\n"} {"package":"mapfit","topic":"dphase","snippet":"### Name: dphase\n### Title: Probability density function of PH distribution\n### Aliases: dphase\n\n### ** Examples\n\n## create a PH with specific parameters\n(phdist <- ph(alpha=c(1,0,0),\n Q=rbind(c(-4,2,0),c(2,-5,1),c(1,0,-1)),\n xi=c(2,2,0)))\n\n## p.d.f. for 0, 0.1, ..., 1\ndphase(x=seq(0, 1, 0.1), ph=phdist)\n\n\n\n"} {"package":"mapfit","topic":"gmmpp","snippet":"### Name: gmmpp\n### Title: Create GMMPP\n### Aliases: gmmpp\n\n### ** Examples\n\n## create a map (full matrix) with 5 phases\n(param1 <- gmmpp(5))\n\n## create a map with specific parameters\n(param2 <- gmmpp(alpha=c(1,0,0),\n D0=rbind(c(-4,2,0),c(2,-5,1),c(1,0,-1)),\n D1=rbind(c(2,0,0),c(0,2,0),c(0,0,0))))\n\n\n\n"} {"package":"mapfit","topic":"gph.param","snippet":"### Name: gph.param\n### Title: Generate GPH using the information on data\n### Aliases: gph.param\n\n### ** Examples\n\n## Create data\nwsample <- rweibull(10, shape=2)\n(dat <- data.frame.phase.time(x=wsample))\n\n## Generate PH that is fitted to dat\n(model <- gph.param(data=dat, skel=ph(5)))\n\n\n\n"} {"package":"mapfit","topic":"herlang","snippet":"### Name: herlang\n### Title: Create HErlang distribution\n### Aliases: herlang\n\n### ** Examples\n\n## create a hyper Erlang consisting of two Erlang\n## with shape parameters 2 and 3.\n(param1 <- herlang(shape=c(2,3)))\n\n## create a hyper Erlang with specific parameters\n(param2 <- herlang(shape=c(2,3), mixrate=c(0.3,0.7), rate=c(1.0,10.0)))\n\n## convert to a general PH\nas.gph(param2)\n\n## p.d.f. for 0, 0.1, ..., 1\n(dphase(x=seq(0, 1, 0.1), ph=param2))\n\n## c.d.f. for 0, 0.1, ..., 1\n(pphase(q=seq(0, 1, 0.1), ph=param2))\n\n## generate 10 samples\n(rphase(n=10, ph=param2))\n\n\n\n"} {"package":"mapfit","topic":"herlang.param","snippet":"### Name: herlang.param\n### Title: Determine hyper-Erlang parameters\n### Aliases: herlang.param\n\n### ** Examples\n\n## Create data\nwsample <- rweibull(10, shape=2)\n(dat <- data.frame.phase.time(x=wsample))\n\n## Generate PH that is fitted to dat\n(model <- herlang.param(data=dat, shape=c(1,2,3)))\n\n\n\n"} {"package":"mapfit","topic":"map","snippet":"### Name: map\n### Title: Create MAP\n### Aliases: map\n\n### ** Examples\n\n## create a map (full matrix) with 5 phases\n(param1 <- map(5))\n\n## create a map with specific parameters\n(param2 <- map(alpha=c(1,0,0),\n D0=rbind(c(-4,2,0),c(2,-5,1),c(1,0,-1)),\n D1=rbind(c(2,0,0),c(0,2,0),c(0,0,0))))\n\n\n\n"} {"package":"mapfit","topic":"map.acf","snippet":"### Name: map.acf\n### Title: k-lag correlation of MAP\n### Aliases: map.acf\n\n### ** Examples\n\n## create an MAP with specific parameters\n(param1 <- map(alpha=c(1,0,0),\n D0=rbind(c(-4,2,0),c(2,-5,1),c(1,0,-4)),\n D1=rbind(c(1,1,0),c(1,0,1),c(2,0,1))))\n\n## create an ER-HMM with specific parameters\n(param2 <- erhmm(shape=c(2,3), alpha=c(0.3,0.7),\n rate=c(1.0,10.0),\n P=rbind(c(0.3, 0.7), c(0.1, 0.9))))\n\nmap.acf(map=param1)\nmap.acf(map=param2)\n\n\n\n"} {"package":"mapfit","topic":"map.jmoment","snippet":"### Name: map.jmoment\n### Title: Joint moments of MAP\n### Aliases: map.jmoment\n\n### ** Examples\n\n## create an MAP with specific parameters\n(param1 <- map(alpha=c(1,0,0),\n D0=rbind(c(-4,2,0),c(2,-5,1),c(1,0,-4)),\n D1=rbind(c(1,1,0),c(1,0,1),c(2,0,1))))\n\n## create an ER-HMM with specific parameters\n(param2 <- erhmm(shape=c(2,3), alpha=c(0.3,0.7),\n rate=c(1.0,10.0),\n P=rbind(c(0.3, 0.7), c(0.1, 0.9))))\n\nmap.jmoment(lag=1, map=param1)\nmap.jmoment(lag=1, map=param2)\n\n\n\n"} {"package":"mapfit","topic":"map.mmoment","snippet":"### Name: map.mmoment\n### Title: Marginal moments of MAP\n### Aliases: map.mmoment\n\n### ** Examples\n\n## create an MAP with specific parameters\n(param1 <- map(alpha=c(1,0,0),\n D0=rbind(c(-4,2,0),c(2,-5,1),c(1,0,-4)),\n D1=rbind(c(1,1,0),c(1,0,1),c(2,0,1))))\n\n## create an ER-HMM with specific parameters\n(param2 <- erhmm(shape=c(2,3), alpha=c(0.3,0.7),\n rate=c(1.0,10.0),\n P=rbind(c(0.3, 0.7), c(0.1, 0.9))))\n\nmap.mmoment(k=3, map=param1)\nmap.mmoment(k=3, map=param2)\n\n\n\n"} {"package":"mapfit","topic":"mapfit.group","snippet":"### Name: mapfit.group\n### Title: MAP fitting with grouped data\n### Aliases: mapfit.group\n\n### ** Examples\n\n## load trace data\ndata(BCpAug89)\nBCpAug89s <- head(BCpAug89, 50)\n\n## make grouped data\nBCpAug89.group <- hist(cumsum(BCpAug89s),\n breaks=seq(0, 0.15, 0.005),\n plot=FALSE)\n \n## MAP fitting for general MAP\n(result1 <- mapfit.group(map=map(2),\n counts=BCpAug89.group$counts,\n breaks=BCpAug89.group$breaks))\n## MAP fitting for MMPP\n(result2 <- mapfit.group(map=mmpp(2),\n counts=BCpAug89.group$counts,\n breaks=BCpAug89.group$breaks))\n \n## MAP fitting with approximate MMPP\n(result3 <- mapfit.group(map=gmmpp(2),\n counts=BCpAug89.group$counts,\n breaks=BCpAug89.group$breaks))\n\n## marginal moments for estimated MAP\nmap.mmoment(k=3, map=result1$model)\nmap.mmoment(k=3, map=result2$model)\nmap.mmoment(k=3, map=result3$model)\n\n## joint moments for estimated MAP\nmap.jmoment(lag=1, map=result1$model)\nmap.jmoment(lag=1, map=result2$model)\nmap.jmoment(lag=1, map=result3$model)\n\n## lag-k correlation\nmap.acf(map=result1$model)\nmap.acf(map=result2$model)\nmap.acf(map=result3$model)\n\n\n\n"} {"package":"mapfit","topic":"mapfit.point","snippet":"### Name: mapfit.point\n### Title: MAP fitting with point data\n### Aliases: mapfit.point\n\n### ** Examples\n\n## load trace data\ndata(BCpAug89)\nBCpAug89s <- head(BCpAug89, 50)\n\n## MAP fitting for general MAP\n(result1 <- mapfit.point(map=map(2), x=cumsum(BCpAug89s)))\n\n## MAP fitting for MMPP\n(result2 <- mapfit.point(map=mmpp(2), x=cumsum(BCpAug89s)))\n\n## MAP fitting for ER-HMM\n(result3 <- mapfit.point(map=erhmm(3), x=cumsum(BCpAug89s)))\n\n## marginal moments for estimated MAP\nmap.mmoment(k=3, map=result1$model)\nmap.mmoment(k=3, map=result2$model)\nmap.mmoment(k=3, map=result3$model)\n\n## joint moments for estimated MAP\nmap.jmoment(lag=1, map=result1$model)\nmap.jmoment(lag=1, map=result2$model)\nmap.jmoment(lag=1, map=result3$model)\n\n## lag-k correlation\nmap.acf(map=result1$model)\nmap.acf(map=result2$model)\nmap.acf(map=result3$model)\n\n\n\n"} {"package":"mapfit","topic":"mmpp","snippet":"### Name: mmpp\n### Title: Create an MMPP\n### Aliases: mmpp\n\n### ** Examples\n\n## create an MMPP with 5 phases\n(param1 <- mmpp(5))\n\n\n\n"} {"package":"mapfit","topic":"ph","snippet":"### Name: ph\n### Title: Create GPH distribution\n### Aliases: ph\n\n### ** Examples\n\n## create a PH (full matrix) with 5 phases\n(param1 <- ph(5))\n\n## create a PH (full matrix) with 5 phases\n(param1 <- ph(size=5))\n\n## create a PH with specific parameters\n(param2 <- ph(alpha=c(1,0,0),\n Q=rbind(c(-4,2,0),c(2,-5,1),c(1,0,-1)),\n xi=c(2,2,0)))\n\n\n\n"} {"package":"mapfit","topic":"ph.bidiag","snippet":"### Name: ph.bidiag\n### Title: Create a bi-diagonal PH distribution\n### Aliases: ph.bidiag\n\n### ** Examples\n\n## create a bidiagonal PH with 5 phases\n(param1 <- ph.bidiag(5))\n\n\n\n"} {"package":"mapfit","topic":"ph.coxian","snippet":"### Name: ph.coxian\n### Title: Create a Coxian PH distribution\n### Aliases: ph.coxian\n\n### ** Examples\n\n## create a Coxian PH with 5 phases\n(param1 <- ph.coxian(5))\n\n\n\n"} {"package":"mapfit","topic":"ph.mean","snippet":"### Name: ph.mean\n### Title: Mean of PH distribution\n### Aliases: ph.mean\n\n### ** Examples\n\n## create a PH with specific parameters\n(param1 <- ph(alpha=c(1,0,0), \n Q=rbind(c(-4,2,0),c(2,-5,1),c(1,0,-1)), \n xi=c(2,2,0)))\n\n## create a CF1 with specific parameters\n(param2 <- cf1(alpha=c(1,0,0), rate=c(1.0,2.0,3.0)))\n\n## create a hyper Erlang with specific parameters\n(param3 <- herlang(shape=c(2,3), mixrate=c(0.3,0.7), rate=c(1.0,10.0)))\n\n## mean\nph.mean(param1)\nph.mean(param2)\nph.mean(param3)\n\n\n\n"} {"package":"mapfit","topic":"ph.moment","snippet":"### Name: ph.moment\n### Title: Moments of PH distribution\n### Aliases: ph.moment\n\n### ** Examples\n\n## create a PH with specific parameters\n(param1 <- ph(alpha=c(1,0,0), \n Q=rbind(c(-4,2,0),c(2,-5,1),c(1,0,-1)), \n xi=c(2,2,0)))\n\n## create a CF1 with specific parameters\n(param2 <- cf1(alpha=c(1,0,0), rate=c(1.0,2.0,3.0)))\n\n## create a hyper Erlang with specific parameters\n(param3 <- herlang(shape=c(2,3), mixrate=c(0.3,0.7), rate=c(1.0,10.0)))\n\n## up to 5 moments \nph.moment(5, param1)\nph.moment(5, param2)\nph.moment(5, param3)\n\n\n\n"} {"package":"mapfit","topic":"ph.tridiag","snippet":"### Name: ph.tridiag\n### Title: Create a tri-diagonal PH distribution\n### Aliases: ph.tridiag\n\n### ** Examples\n\n## create a tridiagonal PH with 5 phases\n(param1 <- ph.tridiag(5))\n\n\n\n"} {"package":"mapfit","topic":"ph.var","snippet":"### Name: ph.var\n### Title: Variance of PH distribution\n### Aliases: ph.var\n\n### ** Examples\n\n## create a PH with specific parameters\n(param1 <- ph(alpha=c(1,0,0), \n Q=rbind(c(-4,2,0),c(2,-5,1),c(1,0,-1)), \n xi=c(2,2,0)))\n\n## create a CF1 with specific parameters\n(param2 <- cf1(alpha=c(1,0,0), rate=c(1.0,2.0,3.0)))\n\n## create a hyper Erlang with specific parameters\n(param3 <- herlang(shape=c(2,3), mixrate=c(0.3,0.7), rate=c(1.0,10.0)))\n\n## variance\nph.var(param1)\nph.var(param2)\nph.var(param3)\n\n\n\n"} {"package":"mapfit","topic":"phfit.3mom","snippet":"### Name: phfit.3mom\n### Title: PH fitting with three moments\n### Aliases: phfit.3mom\n\n### ** Examples\n\n## Three moment matching\n## Moments of Weibull(shape=2, scale=1); (0.886227, 1.0, 1.32934)\n(result1 <- phfit.3mom(0.886227, 1.0, 1.32934))\n\n## Three moment matching\n## Moments of Weibull(shape=2, scale=1); (0.886227, 1.0, 1.32934)\n(result2 <- phfit.3mom(0.886227, 1.0, 1.32934, method=\"Bobbio05\"))\n\n## mean\nph.mean(result1)\nph.mean(result2)\n\n## variance\nph.var(result1)\nph.var(result2)\n\n## up to 5 moments \nph.moment(5, result1)\nph.moment(5, result2)\n\n\n\n"} {"package":"mapfit","topic":"phfit.density","snippet":"### Name: phfit.density\n### Title: PH fitting with density function\n### Aliases: phfit.density\n\n### ** Examples\n\n####################\n##### truncated density\n####################\n\n## PH fitting for general PH\n(result1 <- phfit.density(ph=ph(2), f=dnorm, mean=3, sd=1))\n\n## PH fitting for CF1\n(result2 <- phfit.density(ph=cf1(2), f=dnorm, mean=3, sd=1))\n\n## PH fitting for hyper Erlang\n(result3 <- phfit.density(ph=herlang(3), f=dnorm, mean=3, sd=1))\n\n## mean\nph.mean(result1$model)\nph.mean(result2$model)\nph.mean(result3$model)\n\n## variance\nph.var(result1$model)\nph.var(result2$model)\nph.var(result3$model)\n\n## up to 5 moments \nph.moment(5, result1$model)\nph.moment(5, result2$model)\nph.moment(5, result3$model)\n\n\n\n"} {"package":"mapfit","topic":"phfit.group","snippet":"### Name: phfit.group\n### Title: PH fitting with grouped data\n### Aliases: phfit.group\n\n### ** Examples\n\n## make sample\nwsample <- rweibull(n=100, shape=2, scale=1)\nwgroup <- hist(x=wsample, breaks=\"fd\", plot=FALSE)\n\n## PH fitting for general PH\n(result1 <- phfit.group(ph=ph(2), counts=wgroup$counts, breaks=wgroup$breaks))\n\n## PH fitting for CF1\n(result2 <- phfit.group(ph=cf1(2), counts=wgroup$counts, breaks=wgroup$breaks))\n\n## PH fitting for hyper Erlang\n(result3 <- phfit.group(ph=herlang(3), counts=wgroup$counts, breaks=wgroup$breaks))\n\n## mean\nph.mean(result1$model)\nph.mean(result2$model)\nph.mean(result3$model)\n\n## variance\nph.var(result1$model)\nph.var(result2$model)\nph.var(result3$model)\n\n## up to 5 moments \nph.moment(5, result1$model)\nph.moment(5, result2$model)\nph.moment(5, result3$model)\n\n\n\n"} {"package":"mapfit","topic":"phfit.point","snippet":"### Name: phfit.point\n### Title: PH fitting with point data\n### Aliases: phfit.point\n\n### ** Examples\n\n## make sample\nwsample <- rweibull(n=100, shape=2, scale=1)\n\n## PH fitting for general PH\n(result1 <- phfit.point(ph=ph(2), x=wsample))\n\n## PH fitting for CF1\n(result2 <- phfit.point(ph=cf1(2), x=wsample))\n\n## PH fitting for hyper Erlang\n(result3 <- phfit.point(ph=herlang(3), x=wsample))\n\n## mean\nph.mean(result1$model)\nph.mean(result2$model)\nph.mean(result3$model)\n\n## variance\nph.var(result1$model)\nph.var(result2$model)\nph.var(result3$model)\n\n## up to 5 moments \nph.moment(5, result1$model)\nph.moment(5, result2$model)\nph.moment(5, result3$model)\n\n\n\n"} {"package":"mapfit","topic":"pphase","snippet":"### Name: pphase\n### Title: Distribution function of PH distribution\n### Aliases: pphase\n\n### ** Examples\n\n## create a PH with specific parameters\n(phdist <- ph(alpha=c(1,0,0),\n Q=rbind(c(-4,2,0),c(2,-5,1),c(1,0,-1)),\n xi=c(2,2,0)))\n\n## c.d.f. for 0, 0.1, ..., 1\npphase(q=seq(0, 1, 0.1), ph=phdist)\n\n\n\n"} {"package":"mapfit","topic":"rphase","snippet":"### Name: rphase\n### Title: Sampling of PH distributions\n### Aliases: rphase\n\n### ** Examples\n\n## create a PH with specific parameters\n(phdist <- ph(alpha=c(1,0,0),\n Q=rbind(c(-4,2,0),c(2,-5,1),c(1,0,-1)),\n xi=c(2,2,0)))\n\n## generate 10 samples\nrphase(n=10, ph=phdist)\n\n\n\n"} {"package":"bipartiteD3","topic":"Array2DF","snippet":"### Name: Array2DF\n### Title: Convert bipartite-style arrays to dataframe\n### Aliases: Array2DF\n\n### ** Examples\n\n\n## Not run: \n##D data(Safariland, vazquenc, package='bipartite')\n##D allin1 <- bipartite::webs2array(Safariland, vazquenc)\n##D Array2DF(allin1)\n## End(Not run)\n\n\n"} {"package":"bipartiteD3","topic":"BP_JS_Writer","snippet":"### Name: BP_JS_Writer\n### Title: Generate JavaScript file for a bipartite network\n### Aliases: BP_JS_Writer\n\n### ** Examples\n\n\n## Simple Data Set\ntestdata <- data.frame(higher = c(\"bee1\",\"bee1\",\"bee1\",\"bee2\",\"bee1\",\"bee3\"),\nlower = c(\"plant1\",\"plant2\",\"plant1\",\"plant2\",\"plant3\",\"plant4\"),\n Meadow=c(5,9,1,2,3,7))\n\nBP_JS_Writer(testdata,PRINT=TRUE)\n\n## tidy up (to keep CRAN happy, not needed in real life use)\nfile.remove('vizjs.js')\nfile.remove('JSBP.js')\nfile.remove('JSBP.css')\n\n\n\n"} {"package":"bipartiteD3","topic":"List2DF","snippet":"### Name: List2DF\n### Title: Convert bipartite-style list of matrices to dataframe\n### Aliases: List2DF\n\n### ** Examples\n\n\n## Not run: \n##D testdata <- data.frame(higher = c(\"bee1\",\"bee1\",\"bee1\",\"bee2\",\"bee1\",\"bee3\"),\n##D lower = c(\"plant1\",\"plant2\",\"plant1\",\"plant2\",\"plant3\",\"plant4\"),\n##D webID = c(\"meadow\",\"meadow\",\"meadow\",\"meadow\",\"bog\",\"bog\"), freq=c(5,9,1,2,3,7))\n##D bipartite::frame2webs(testdata, type.out = 'list')-> SmallTestWeb\n##D \n##D List2DF(SmallTestWeb)\n## End(Not run)\n\n\n"} {"package":"bipartiteD3","topic":"Matrix2DF","snippet":"### Name: Matrix2DF\n### Title: Convert a bipartite-style matrix to dataframe\n### Aliases: Matrix2DF\n\n### ** Examples\n\n\ndata(Safariland, package='bipartite')\nMatrix2DF(Safariland)\n\n\n\n"} {"package":"bipartiteD3","topic":"OrderByCrossover","snippet":"### Name: OrderByCrossover\n### Title: Find Species Order That Minimises Crossover\n### Aliases: OrderByCrossover\n\n### ** Examples\n\n\n## Not run: \n##D \n##D data(Safariland, package='bipartite')\n##D \n##D \n##D S_orders <- OrderByCrossover(Safariland)\n##D \n##D bipartite_D3(Safariland,\n##D filename = 'SF_sorted',\n##D SortPrimary = S_orders[[1]],\n##D SortSecondary = S_orders[[2]])\n## End(Not run)\n\n\n\n"} {"package":"bipartiteD3","topic":"bipartite_D3","snippet":"### Name: bipartite_D3\n### Title: Generate interactive bipartite networks\n### Aliases: bipartite_D3\n\n### ** Examples\n\n## Simple Bipartite Style Data Set:\n## Not run: \n##D testdata <- data.frame(higher = c(\"bee1\",\"bee1\",\"bee1\",\"bee2\",\"bee1\",\"bee3\"),\n##D lower = c(\"plant1\",\"plant2\",\"plant1\",\"plant2\",\"plant3\",\"plant4\"),\n##D webID = c(\"meadow\",\"meadow\",\"meadow\",\"meadow\",\"meadow\",\"meadow\"), freq=c(5,9,1,2,3,7))\n##D SmallTestWeb <- bipartite::frame2webs(testdata,type.out=\"array\")\n##D \n##D \n##D bipartite_D3(SmallTestWeb, filename = 'demo1')\n## End(Not run)\n## For more examples see vignette\n\n\n"} {"package":"gromovlab","topic":"gromovdist","snippet":"### Name: gromovdist\n### Title: Gromov-Hausdorff-type distances of labelled metric spaces\n### Aliases: gromovdist gromovdist,list-method gromovdist,phylo-method\n### gromovdist,multiPhylo-method gromovdist,dist-method\n### gromovdist,dissimilarity-method gromovdist,matrix-method\n### gromovdist,igraph-method gromovdist.list gromovdist.phylo\n### gromovdist.multiPhylo gromovdist.dist gromovdist.dissimilarity\n### gromovdist.matrix gromovdist.igraph gromovdist.default\n\n### ** Examples\nlibrary(\"ape\")\ntr1<-rtree(n=10)\ntr2<-rtree(n=10)\ngromovdist(tr1,tr2,\"l1\")\ngromovdist(tr1,tr2,\"l2\")\n#thesame, but slower\ngromovdist(d1=tr1,d2=tr2,type=\"lp\",p=2)\ngromovdist(tr1,tr2,\"linf\")\n\n\n"} {"package":"logistf","topic":"CLIP.confint","snippet":"### Name: CLIP.confint\n### Title: Confidence Intervals after Multiple Imputation: Combination of\n### Likelihood Profiles\n### Aliases: CLIP.confint\n\n### ** Examples\n\n#generate data set with NAs \nfreq=c(5,2,2,7,5,4)\ny<-c(rep(1,freq[1]+freq[2]), rep(0,freq[3]+freq[4]), rep(1,freq[5]), rep(0,freq[6]))\nx<-c(rep(1,freq[1]), rep(0,freq[2]), rep(1,freq[3]), rep(0,freq[4]), \nrep(NA,freq[5]),rep(NA,freq[6]))\ntoy<-data.frame(x=x,y=y)\n\n# impute data set 5 times\nset.seed(169)\ntoymi<-list(0)\nfor(i in 1:5){\n toymi[[i]]<-toy\n y1<-toymi[[i]]$y==1 & is.na(toymi[[i]]$x)\n y0<-toymi[[i]]$y==0 & is.na(toymi[[i]]$x) \n xnew1<-rbinom(sum(y1),1,freq[1]/(freq[1]+freq[2]))\n xnew0<-rbinom(sum(y0),1,freq[3]/(freq[3]+freq[4]))\n toymi[[i]]$x[y1==TRUE]<-xnew1\n toymi[[i]]$x[y0==TRUE]<-xnew0\n }\n \n # logistf analyses of each imputed data set\n fit.list<-lapply(1:5, function(X) logistf(data=toymi[[X]], y~x, pl=TRUE))\n \n # CLIP confidence limits\n CLIP.confint(obj=fit.list, data = toymi)\n \n\n\n"} {"package":"logistf","topic":"CLIP.profile","snippet":"### Name: CLIP.profile\n### Title: Combine Profile Likelihoods from Imputed-Data Model Fits\n### Aliases: CLIP.profile\n\n### ** Examples\n\n\n#generate data set with NAs \nfreq=c(5,2,2,7,5,4)\ny<-c(rep(1,freq[1]+freq[2]), rep(0,freq[3]+freq[4]), rep(1,freq[5]), rep(0,freq[6]))\nx<-c(rep(1,freq[1]), rep(0,freq[2]), rep(1,freq[3]), rep(0,freq[4]), rep(NA,freq[5]),\nrep(NA,freq[6]))\ntoy<-data.frame(x=x,y=y)\n\n# impute data set 5 times\nset.seed(169)\ntoymi<-list(0)\nfor(i in 1:5){\n toymi[[i]]<-toy\n y1<-toymi[[i]]$y==1 & is.na(toymi[[i]]$x)\n y0<-toymi[[i]]$y==0 & is.na(toymi[[i]]$x)\n xnew1<-rbinom(sum(y1),1,freq[1]/(freq[1]+freq[2]))\n xnew0<-rbinom(sum(y0),1,freq[3]/(freq[3]+freq[4]))\n toymi[[i]]$x[y1==TRUE]<-xnew1\n toymi[[i]]$x[y0==TRUE]<-xnew0\n}\n\n# logistf analyses of each imputed data set\nfit.list<-lapply(1:5, function(X) logistf(data=toymi[[X]], y~x, pl=TRUE))\n\n# CLIP profile\nxprof<-CLIP.profile(obj=fit.list, variable=\"x\",data =toymi, keep=TRUE)\nplot(xprof)\n\n#plot as CDF\nplot(xprof, \"cdf\")\n\n#plot as density\nplot(xprof, \"density\")\n\n\n\n"} {"package":"logistf","topic":"PVR.confint","snippet":"### Name: PVR.confint\n### Title: Pseudo Variance Modification of Rubin's Rule\n### Aliases: PVR.confint\n\n### ** Examples\n\n#generate data set with NAs\nfreq=c(5,2,2,7,5,4)\ny<-c(rep(1,freq[1]+freq[2]), rep(0,freq[3]+freq[4]), rep(1,freq[5]), rep(0,freq[6]))\nx<-c(rep(1,freq[1]), rep(0,freq[2]), rep(1,freq[3]), rep(0,freq[4]), rep(NA,freq[5]),\n rep(NA,freq[6]))\ntoy<-data.frame(x=x,y=y)\n\n# impute data set 5 times \nset.seed(169)\ntoymi<-list(0)\nfor(i in 1:5){\n toymi[[i]]<-toy\n y1<-toymi[[i]]$y==1 & is.na(toymi[[i]]$x)\n y0<-toymi[[i]]$y==0 & is.na(toymi[[i]]$x)\n xnew1<-rbinom(sum(y1),1,freq[1]/(freq[1]+freq[2]))\n xnew0<-rbinom(sum(y0),1,freq[3]/(freq[3]+freq[4]))\n toymi[[i]]$x[y1==TRUE]<-xnew1\n toymi[[i]]$x[y0==TRUE]<-xnew0\n }\n \n# logistf analyses of each imputed data set\nfit.list<-lapply(1:5, function(X) logistf(data=toymi[[X]], y~x, pl=TRUE))\n\n# CLIP confidence limits\nPVR.confint(obj=fit.list)\n\n\n\n"} {"package":"logistf","topic":"add1.logistf","snippet":"### Name: add1.logistf\n### Title: Add or Drop All Possible Single Terms to/from a 'logistf' Model\n### Aliases: add1.logistf\n\n### ** Examples\n\ndata(sex2) \nfit<-logistf(data=sex2, case~1, pl=FALSE) \nadd1(fit, scope=c(\"dia\", \"age\"), data=sex2)\n \nfit2<-logistf(data=sex2, case~age+oc+dia+vic+vicl+vis) \ndrop1(fit2, data=sex2)\n\n\n\n"} {"package":"logistf","topic":"anova.logistf","snippet":"### Name: anova.logistf\n### Title: Analysis of Penalized Deviance for 'logistf' Models\n### Aliases: anova.logistf\n\n### ** Examples\n\ndata(sex2) \nfit<-logistf(data=sex2, case~age+oc+dia+vic+vicl+vis)\n\n#simultaneous test of variables vic, vicl, vis:\nanova(fit, formula=~vic+vicl+vis)\n\n#test versus a simpler model\nfit2<-logistf(data=sex2, case~age+oc+dia)\n# or: fit2<-update(fit, case~age+oc+dia)\nanova(fit,fit2)\n\n# comparison of non-nested models (with different df):\nfit3<-logistf(data=sex2, case~age+vic+vicl+vis)\nanova(fit2,fit3, method=\"PLR\")\n\n\n\n\n"} {"package":"logistf","topic":"backward","snippet":"### Name: backward\n### Title: Backward Elimination/Forward Selection of Model Terms in logistf\n### Models\n### Aliases: backward backward.logistf backward.flic forward\n### forward.logistf\n\n### ** Examples\n\ndata(sex2) \nfit<-logistf(data=sex2, case~1, pl=FALSE) \nfitf<-forward(fit, scope=c(\"dia\", \"age\"), data=sex2) \n\nfit2<-logistf(data=sex2, case~age+oc+vic+vicl+vis+dia) \nfitb<-backward(fit2, data=sex2)\n\n\n\n"} {"package":"logistf","topic":"emmeans-logistf","snippet":"### Name: emmeans-logistf\n### Title: Emmeans support for logistf\n### Aliases: emmeans-logistf\n\n### ** Examples\n\n\ndata(sex2)\nfit<-logistf(case ~ age+oc+vic+vicl+vis+dia, data=sex2)\n\nemmeans::emmeans(fit, ~age+dia)\n\n\n\n"} {"package":"logistf","topic":"flac","snippet":"### Name: flac\n### Title: FLAC - Firth's logistic regression with added covariate\n### Aliases: flac flac.default flac.logistf\n\n### ** Examples\n\n#With formula and data:\ndata(sex2)\nflac(case ~ age + oc + vic + vicl + vis + dia, sex2)\n\n#With a logistf object:\nlf <- logistf(formula = case ~ age + oc + vic + vicl + vis + dia, data = sex2)\nflac(lf, data=sex2)\n\n\n\n"} {"package":"logistf","topic":"flic","snippet":"### Name: flic\n### Title: FLIC - Firth's logistic regression with intercept correction\n### Aliases: flic flic.default flic.logistf\n\n### ** Examples\n\n#With formula and data:\ndata(sex2)\nflic(case ~ age + oc + vic + vicl + vis + dia, sex2)\n\n#With a logistf object:\nlf <- logistf(formula = case ~ age + oc + vic + vicl + vis + dia, data = sex2)\nflic(lf)\n\n\n\n"} {"package":"logistf","topic":"logistf","snippet":"### Name: logistf\n### Title: Firth's Bias-Reduced Logistic Regression\n### Aliases: logistf\n\n### ** Examples\n\ndata(sex2)\nfit<-logistf(case ~ age+oc+vic+vicl+vis+dia, data=sex2)\nsummary(fit)\nnobs(fit)\ndrop1(fit)\nplot(profile(fit,variable=\"dia\"))\nextractAIC(fit)\n\nfit1<-update(fit, case ~ age+oc+vic+vicl+vis)\nextractAIC(fit1)\nanova(fit,fit1)\n\ndata(sexagg)\nfit2<-logistf(case ~ age+oc+vic+vicl+vis+dia, data=sexagg, weights=COUNT)\nsummary(fit2)\n\n# simulated SNP example\nset.seed(72341)\nsnpdata<-rbind(\n matrix(rbinom(2000,2,runif(2000)*0.3),100,20),\n matrix(rbinom(2000,2,runif(2000)*0.5),100,20))\ncolnames(snpdata)<-paste(\"SNP\",1:20,\"_\",sep=\"\")\nsnpdata<-as.data.frame(snpdata)\nsnpdata$case<-c(rep(0,100),rep(1,100))\n\nfitsnp<-logistf(data=snpdata, formula=case~1, pl=FALSE)\nadd1(fitsnp, scope=paste(\"SNP\",1:20,\"_\",sep=\"\"), data=snpdata)\nfitf<-forward(fitsnp, scope = paste(\"SNP\",1:20,\"_\",sep=\"\"), data=snpdata)\nfitf\n\n\n\n"} {"package":"logistf","topic":"logistf.control","snippet":"### Name: logistf.control\n### Title: Control Parameters for 'logistf'\n### Aliases: logistf.control\n\n### ** Examples\n\ndata(sexagg)\nfit2<-logistf(case ~ age+oc+vic+vicl+vis+dia, data=sexagg, weights=COUNT, \ncontrol=logistf.control(maxstep=1))\nsummary(fit2)\n\n\n\n"} {"package":"logistf","topic":"logistf.mod.control","snippet":"### Name: logistf.mod.control\n### Title: Controls additional parameters for 'logistf'\n### Aliases: logistf.mod.control\n\n### ** Examples\n\ndata(sexagg)\nfit2<-logistf(case ~ age+oc+vic+vicl+vis+dia, data=sexagg, weights=COUNT, \nmodcontrol=logistf.mod.control(terms.fit=c(1,2)))\nsummary(fit2)\n\n\n\n"} {"package":"logistf","topic":"logistftest","snippet":"### Name: logistftest\n### Title: Penalized likelihood ratio test\n### Aliases: logistftest\n\n### ** Examples\n\ndata(sex2) \nfit<-logistf(case ~ age+oc+vic+vicl+vis+dia, data=sex2)\nlogistftest(fit, test = ~ vic + vicl - 1, values = c(2, 0))\n\n\n\n\n"} {"package":"logistf","topic":"logistpl.control","snippet":"### Name: logistpl.control\n### Title: Control Parameters for logistf Profile Likelihood Confidence\n### Interval Estimation\n### Aliases: logistpl.control\n\n### ** Examples\n\ndata(sexagg)\nfit2<-logistf(case ~ age+oc+vic+vicl+vis+dia, data=sexagg, weights=COUNT, \n plcontrol=logistpl.control(maxstep=1))\nsummary(fit2)\n\n\n\n"} {"package":"logistf","topic":"plot.logistf.profile","snippet":"### Name: plot.logistf.profile\n### Title: 'plot' Method for 'logistf' Likelihood Profiles\n### Aliases: plot.logistf.profile\n\n### ** Examples\n\n\ndata(sex2) \nfit<-logistf(case ~ age+oc+vic+vicl+vis+dia, data=sex2)\nplot(profile(fit,variable=\"dia\"))\nplot(profile(fit,variable=\"dia\"), \"cdf\")\nplot(profile(fit,variable=\"dia\"), \"density\")\n\n#generate data set with NAs\nfreq=c(5,2,2,7,5,4)\ny<-c(rep(1,freq[1]+freq[2]), rep(0,freq[3]+freq[4]), rep(1,freq[5]), rep(0,freq[6]))\nx<-c(rep(1,freq[1]), rep(0,freq[2]), rep(1,freq[3]), rep(0,freq[4]), rep(NA,freq[5]),\n rep(NA,freq[6]))\ntoy<-data.frame(x=x,y=y)\n\n# impute data set 5 times\nset.seed(169)\ntoymi<-list(0)\nfor(i in 1:5){\n toymi[[i]]<-toy\n y1<-toymi[[i]]$y==1 & is.na(toymi[[i]]$x)\n y0<-toymi[[i]]$y==0 & is.na(toymi[[i]]$x)\n xnew1<-rbinom(sum(y1),1,freq[1]/(freq[1]+freq[2]))\n xnew0<-rbinom(sum(y0),1,freq[3]/(freq[3]+freq[4]))\n toymi[[i]]$x[y1==TRUE]<-xnew1\n toymi[[i]]$x[y0==TRUE]<-xnew0\n }\n \n# logistf analyses of each imputed data set\nfit.list<-lapply(1:5, function(X) logistf(data=toymi[[X]], y~x, pl=TRUE))\n\n# CLIP profile \nxprof<-CLIP.profile(obj=fit.list, variable=\"x\", data=toymi, keep=TRUE)\nplot(xprof)\n\n#plot as CDF\nplot(xprof, \"cdf\")\n\n#plot as density\nplot(xprof, \"density\")\n\n\n\n"} {"package":"logistf","topic":"profile.logistf","snippet":"### Name: profile.logistf\n### Title: Compute Profile Penalized Likelihood\n### Aliases: profile.logistf\n\n### ** Examples\n\ndata(sex2)\nfit<-logistf(case ~ age+oc+vic+vicl+vis+dia, data=sex2)\nplot(profile(fit,variable=\"dia\"))\nplot(profile(fit,variable=\"dia\"), \"cdf\")\nplot(profile(fit,variable=\"dia\"), \"density\")\n\n\n\n"} {"package":"terminaldigits","topic":"td_independence","snippet":"### Name: td_independence\n### Title: Test of independence of terminal digits\n### Aliases: td_independence\n\n### ** Examples\n\n\ntd_independence(decoy$weight, decimals = 2, reps = 2000)\n\n\n\n"} {"package":"terminaldigits","topic":"td_simulate","snippet":"### Name: td_simulate\n### Title: Monte Carlo simulations for independence of terminal digits\n### Aliases: td_simulate\n\n### ** Examples\n\n\ntd_simulate(distribution = \"normal\",\nn = 50,\nparameter_1 = 100,\nparameter_2 = 1,\ndecimals = 1,\nreps = 100,\nsimulations = 100)\n\n\n\n"} {"package":"terminaldigits","topic":"td_tests","snippet":"### Name: td_tests\n### Title: Tests of independence and uniformity for terminal digits in a\n### data frame\n### Aliases: td_tests\n\n### ** Examples\n\n\ntd_tests(decoy, weight, decimals = 2, group = subject, reps = 1000)\n\n\n\n"} {"package":"terminaldigits","topic":"td_uniformity","snippet":"### Name: td_uniformity\n### Title: Test of uniformity of terminal digits\n### Aliases: td_uniformity\n\n### ** Examples\n\n\ntd_uniformity(decoy$weight, decimals = 2, reps = 2000)\n\n\n\n\n"} {"package":"jsonStrings","topic":"jsonString","snippet":"### Name: jsonString\n### Title: R6 class to represent a JSON string\n### Aliases: jsonString\n\n### ** Examples\n\n\n## ------------------------------------------------\n## Method `jsonString$new`\n## ------------------------------------------------\n\njstring <- jsonString$new(\n \"[1, [\\\"a\\\", 99], {\\\"x\\\": [2,3,4], \\\"y\\\": 42}]\"\n)\njstring$prettyPrint\njstring\njstring$prettyPrint <- FALSE\njstring\njstring <- \"[1, [\\\"a\\\", 99], {\\\"x\\\": [2,3,4], \\\"y\\\": 42}]\"\njsonString$new(jstring)\n\n## ------------------------------------------------\n## Method `jsonString$print`\n## ------------------------------------------------\n\njstring <- jsonString$new(\n \"[1, [\\\"a\\\", 99], {\\\"x\\\": [2,3,4], \\\"y\\\": 42}]\"\n)\njstring\njstring$prettyPrint <- FALSE\njstring\n\n## ------------------------------------------------\n## Method `jsonString$asString`\n## ------------------------------------------------\n\njstring <- jsonString$new(\n \"[1, [\\\"a\\\", 99], {\\\"x\\\": [2,3,4], \\\"y\\\": 42}]\"\n)\ncat(jstring$asString())\ncat(jstring$asString(pretty = TRUE))\n\n## ------------------------------------------------\n## Method `jsonString$at`\n## ------------------------------------------------\n\njstring <- jsonString$new(\n \"[1, [\\\"a\\\", 99], {\\\"x\\\": [2,3,4], \\\"y\\\": 42}]\"\n)\njstring$at(1)\njstring$at(2, \"x\")\n\n## ------------------------------------------------\n## Method `jsonString$hasKey`\n## ------------------------------------------------\n\njstring <- jsonString$new(\n \"[1, [\\\"a\\\", 99], {\\\"x\\\": [2,3,4], \\\"y\\\": 42}]\"\n)\njstring$hasKey(\"x\")\njstring <- jsonString$new(\n \"{\\\"x\\\": [2,3,4], \\\"y\\\": 42}\"\n)\njstring$hasKey(\"x\")\n\n## ------------------------------------------------\n## Method `jsonString$keys`\n## ------------------------------------------------\n\njstring <- jsonString$new(\n \"{\\\"x\\\": [2,3,4], \\\"y\\\": 42}\"\n)\njstring$keys()\n\n## ------------------------------------------------\n## Method `jsonString$addProperty`\n## ------------------------------------------------\n\njstring <- jsonString$new(\n \"{\\\"a\\\":[1,2,3],\\\"b\\\":\\\"hello\\\"}\"\n)\nppty <- jsonString$new(\"[9, 99]\")\njstring$addProperty(\"c\", ppty)\njstring\njstring$addProperty(\"d\", \"null\")\njstring\n\n## ------------------------------------------------\n## Method `jsonString$erase`\n## ------------------------------------------------\n\njstring <- jsonString$new(\n \"{\\\"a\\\":[1,2,3],\\\"b\\\":\\\"hello\\\"}\"\n)\njstring$erase(\"b\")\njstring\njstring <- jsonString$new(\"[1, 2, 3, 4, 5]\")\njstring$erase(2)\njstring\n\n## ------------------------------------------------\n## Method `jsonString$size`\n## ------------------------------------------------\n\njstring <- jsonString$new(\n \"{\\\"a\\\":[1,2,3],\\\"b\\\":\\\"hello\\\"}\"\n)\njstring$size()\n\n## ------------------------------------------------\n## Method `jsonString$update`\n## ------------------------------------------------\n\njstring <- jsonString$new(\n \"{\\\"a\\\":[1,2,3],\\\"b\\\":\\\"hello\\\"}\"\n)\njstring2 <- \"{\\\"a\\\":[4,5,6],\\\"c\\\":\\\"goodbye\\\"}\"\njstring$update(jstring2)\njstring\n\n## ------------------------------------------------\n## Method `jsonString$merge`\n## ------------------------------------------------\n\njstring <- jsonString$new(\n \"{\\\"a\\\":[1,2,3],\\\"b\\\":\\\"hello\\\"}\"\n)\njstring2 <- \"{\\\"a\\\":[4,5,6],\\\"c\\\":\\\"goodbye\\\"}\"\njstring$merge(jstring2)\njstring\n\n## ------------------------------------------------\n## Method `jsonString$patch`\n## ------------------------------------------------\n\njstring <- jsonString$new(\n \"{\\\"a\\\":[1,2,3],\\\"b\\\":\\\"hello\\\"}\"\n)\njspatch <- \"[\n {\\\"op\\\": \\\"remove\\\", \\\"path\\\": \\\"/a\\\"},\n {\\\"op\\\": \\\"replace\\\", \\\"path\\\": \\\"/b\\\", \\\"value\\\": null}\n]\"\njstring$patch(jspatch)\n\n## ------------------------------------------------\n## Method `jsonString$push`\n## ------------------------------------------------\n\njstring <- jsonString$new(\"[1, 2, 3, 4, 5]\")\njstring2 <- jsonString$new(\n \"{\\\"a\\\":[4,5,6],\\\"c\\\":\\\"goodbye\\\"}\"\n )\njstring$push(jstring2)\njstring\n\n## ------------------------------------------------\n## Method `jsonString$is`\n## ------------------------------------------------\n\njstring <- jsonString$new(\n \"{\\\"a\\\":[1,2,3],\\\"b\\\":\\\"hello\\\"}\"\n)\njstring$is(\"object\")\njstring$is(\"array\")\njstring <- jsonString$new(\"999\")\njstring$is(\"integer\")\njstring$is(\"number\")\njstring$is(\"float\")\n\n## ------------------------------------------------\n## Method `jsonString$type`\n## ------------------------------------------------\n\njstring <- jsonString$new(\n \"{\\\"a\\\":[1,2,3],\\\"b\\\":\\\"hello\\\"}\"\n)\njstring$type()\njstring <- jsonString$new(\"999\")\njstring$type()\n\n## ------------------------------------------------\n## Method `jsonString$flatten`\n## ------------------------------------------------\n\njstring <- jsonString$new(\n \"{\\\"a\\\":[1,2,3],\\\"b\\\":{\\\"x\\\":\\\"hello\\\",\\\"y\\\":\\\"hi\\\"}}\"\n)\njstring$flatten()\n\n## ------------------------------------------------\n## Method `jsonString$unflatten`\n## ------------------------------------------------\n\nfolder <- system.file(package = \"jsonStrings\")\nfiles <- list.files(folder, recursive = TRUE)\nsizes <- file.size(file.path(folder, files))\nfiles <- sprintf('\"%s\"', paste0(\"/\", files))\nstring <- sprintf(\"{%s}\", paste0(files, \":\", sizes, collapse = \",\"))\njstring <- jsonString$new(string)\njstring$unflatten()\n\n## ------------------------------------------------\n## Method `jsonString$writeFile`\n## ------------------------------------------------\n\njstring <- jsonString$new(\n \"{\\\"a\\\":[1,2,3],\\\"b\\\":\\\"hello\\\"}\"\n)\njsonfile <- tempfile(fileext = \".json\")\njstring$writeFile(jsonfile)\ncat(readLines(jsonfile), sep = \"\\n\")\njsonString$new(jsonfile)\n\n## ------------------------------------------------\n## Method `jsonString$copy`\n## ------------------------------------------------\n\njstring <- jsonString$new(\n \"{\\\"a\\\":[1,2,3],\\\"b\\\":\\\"hello\\\"}\"\n)\ncopy <- jstring$copy()\ncopy$erase(\"b\")\njstring\nnaive_copy <- jstring\nnaive_copy$erase(\"b\")\njstring\n\n\n"} {"package":"CTShiny2","topic":"CTShiny2","snippet":"### Name: CTShiny2\n### Title: Launch 'CTShiny2' Interface\n### Aliases: CTShiny2\n### Keywords: CTShiny2\n\n### ** Examples\n\nif(interactive()){\nlibrary(rmarkdown)\nCTShiny2()\n}\n\n\n"} {"package":"rmda","topic":"cv_decision_curve","snippet":"### Name: cv_decision_curve\n### Title: Calculate cross-validated decision curves\n### Aliases: cv_decision_curve\n\n### ** Examples\n\n\nfull.model_cv <- cv_decision_curve(Cancer~Age + Female + Smokes + Marker1 + Marker2,\n data = dcaData,\n folds = 5,\n thresholds = seq(0, .4, by = .01))\n\nfull.model_apparent <- decision_curve(Cancer~Age + Female + Smokes + Marker1 + Marker2,\n data = dcaData,\n thresholds = seq(0, .4, by = .01),\n confidence.intervals = 'none')\n\nplot_decision_curve( list(full.model_apparent, full.model_cv),\n curve.names = c('Apparent curve', 'Cross-validated curve'),\n col = c('red', 'blue'),\n lty = c(2,1),\n lwd = c(3,2, 2, 1),\n legend.position = 'bottomright')\n\n\n\n"} {"package":"rmda","topic":"decision_curve","snippet":"### Name: decision_curve\n### Title: Calculate net benefit/decision curves\n### Aliases: decision_curve\n\n### ** Examples\n\n#helper function\nexpit <- function(xx) exp(xx)/ (1+exp(xx))\n\n#load simulated cohort data\ndata(dcaData)\nbaseline.model <- decision_curve(Cancer~Age + Female + Smokes,\n data = dcaData,\n thresholds = seq(0, .4, by = .01),\n study.design = 'cohort',\n bootstraps = 10) #number of bootstraps should be higher\n\nfull.model <- decision_curve(Cancer~Age + Female + Smokes + Marker1 + Marker2,\n data = dcaData,\n thresholds = seq(0, .4, by = .01),\n bootstraps = 10)\n\n#simulated case-control data with same variables as above\ndata(dcaData_cc)\n\ntable(dcaData_cc$Cancer)\n\n#estimated from the population where the\n#case-control sample comes from.\npopulation.rho = 0.11\n\nfull.model_cc <- decision_curve(Cancer~Age + Female + Smokes + Marker1 + Marker2,\n data = dcaData,\n thresholds = seq(0, .4, by = .01),\n bootstraps = 10,\n study.design = 'case-control',\n population.prevalence = population.rho)\n\n#estimate the net benefit for an 'opt-out' policy.\nnb.opt.out <- decision_curve(Cancer~Age + Female + Smokes + Marker1 + Marker2,\n data = dcaData,\n policy = 'opt-out',\n thresholds = seq(0, .4, by = .01),\n bootstraps = 10)\n\n\n\n\n"} {"package":"rmda","topic":"plot_clinical_impact","snippet":"### Name: plot_clinical_impact\n### Title: Plot the clinical impact curve from a DecisionCurve object.\n### Aliases: plot_clinical_impact\n\n### ** Examples\n\n#'data(dcaData)\nset.seed(123)\nbaseline.model <- decision_curve(Cancer~Age + Female + Smokes,\n data = dcaData,\n thresholds = seq(0, .4, by = .001),\n bootstraps = 25) #should use more bootstrap replicates in practice!\n\n#plot the clinical impact\nplot_clinical_impact(baseline.model, xlim = c(0, .4),\n col = c(\"black\", \"blue\"))\n\n\n\n"} {"package":"rmda","topic":"plot_decision_curve","snippet":"### Name: plot_decision_curve\n### Title: Plot the net benefit curves from a decision_curve object or many\n### decision_curve objects\n### Aliases: plot_decision_curve\n\n### ** Examples\n\ndata(dcaData)\nset.seed(123)\nbaseline.model <- decision_curve(Cancer~Age + Female + Smokes,\n data = dcaData,\n thresholds = seq(0, .4, by = .005),\n bootstraps = 10)\n\n#plot using the defaults\nplot_decision_curve(baseline.model, curve.names = \"baseline model\")\n\nset.seed(123)\nfull.model <- decision_curve(Cancer~Age + Female + Smokes + Marker1 + Marker2,\n data = dcaData,\n thresholds = seq(0, .4, by = .005),\n bootstraps = 10)\n\n# for lwd, the first two positions correspond to the decision curves, then 'all' and 'none'\nplot_decision_curve( list(baseline.model, full.model),\n curve.names = c(\"Baseline model\", \"Full model\"),\n col = c(\"blue\", \"red\"),\n lty = c(1,2),\n lwd = c(3,2, 2, 1),\n legend.position = \"bottomright\")\n\n\nplot_decision_curve( list(baseline.model, full.model),\n curve.names = c(\"Baseline model\", \"Full model\"),\n col = c(\"blue\", \"red\"),\n confidence.intervals = FALSE, #remove confidence intervals\n cost.benefit.axis = FALSE, #remove cost benefit axis\n legend.position = \"none\") #remove the legend\n\n#Set specific cost:benefit ratios.\n\nplot_decision_curve( list(baseline.model, full.model),\n curve.names = c(\"Baseline model\", \"Full model\"),\n col = c(\"blue\", \"red\"),\n cost.benefits = c(\"1:1000\", \"1:4\", \"1:9\", \"2:3\", \"1:3\"),\n legend.position = \"bottomright\")\n\n#Plot net benefit instead of standardize net benefit.\n\nplot_decision_curve( list(baseline.model, full.model),\n curve.names = c(\"Baseline model\", \"Full model\"),\n col = c(\"blue\", \"red\"),\n ylim = c(-0.05, 0.15), #set ylim\n lty = c(2,1),\n standardize = FALSE, #plot Net benefit instead of standardized net benefit\n legend.position = \"topright\")\n\n\n\n\n"} {"package":"rmda","topic":"plot_roc_components","snippet":"### Name: plot_roc_components\n### Title: Plot the components of a ROC curve by the high risk thresholds.\n### Aliases: plot_roc_components\n\n### ** Examples\n\ndata(dcaData)\nset.seed(123)\nbaseline.model <- decision_curve(Cancer~Age + Female + Smokes,\n data = dcaData,\n thresholds = seq(0, .4, by = .001),\n bootstraps = 25) #should use more bootstrap replicates in practice!\n\n#plot using the defaults\nplot_roc_components(baseline.model, xlim = c(0, 0.4), col = c(\"black\", \"red\"))\n\n\n\n\n"} {"package":"rmda","topic":"summary.decision_curve","snippet":"### Name: summary.decision_curve\n### Title: Displays a useful description of a decision_curve object\n### Aliases: summary.decision_curve\n\n### ** Examples\n\n#helper function\n\n#load simulated data\ndata(dcaData)\n\nfull.model <- decision_curve(Cancer~Age + Female + Smokes + Marker1 + Marker2,\ndata = dcaData,\nthresholds = seq(0, .4, by = .05),\nbootstraps = 25)\n\nsummary(full.model) #outputs standardized net benefit by default\n\nsummary(full.model, nround = 2, measure = \"TPR\")\n\n\n\n"} {"package":"gsbDesign","topic":"gsb","snippet":"### Name: gsb\n### Title: Group Sequential Bayesian Design\n### Aliases: gsb gsbDesign gsbSimulation\n### Keywords: main function operating characteristics\n\n### ** Examples\n\n## E X A M P L E 1: Update on treatment effect, flat prior\n##\n## A. Trial Design:\n## ----------------\n## A.1 2 stages (interim + final):\n## --> nr.stages = 2\n## A.2 10 patients per arms and stages. (total 2*2*10 = 40 patients)\n## --> patients = 10\n## A.3 Sigma in both arms = 10\n## --> sigma = 10\n## A.3 Criteria:\n## stop for success, if P( delta > 0 | data ) >= 0.8\n## AND P( delta > 7 | data ) >= 0.5 \n## --> criteria.success = c(0,0.8,7,0.5)\n## stop for futility, if P( delta < 2 | data ) >= 0.8\n## --> criteria.futility = c(2,0.8)\n## A.4 Prior:\n## --> prior = \"non-informative\"\n\ndesign1 <- gsbDesign(nr.stages = 2,\n patients = 10,\n sigma = 10,\n criteria.success = c(0,0.8, 7, 0.5),\n criteria.futility = c(2,0.8),\n prior.difference = \"non-informative\")\ndesign1\n\n## B. Simulation Settings\n## ----------------------\n## B.1 True treatment effects to be evaluated = seq(-10,20,60)\n## --> truth = c(-10,20,60)\n## B.2 Bayesian update on treatment effect delta (= treatment - control)\n## --> type.update = \"treatment effect\"\n\nsimulation1 <- gsbSimulation(truth=c(-10,20,60),\n type.update=\"treatment effect\")\nsimulation1\n\n## C.1 Calculate the operating characteristics\nx1 <- gsb(design=design1, simulation=simulation1)\nx1\n\n## D.1 Table the probabilities of success\nt1.1 <- tab(x1, \"success\", digits=2)\nt1.1\n\n## D.2 Table the cumulative probabilities of futility at delta = c(-5,0,5.57)\n## (for 5.57 a linear interpolation is used.)\nt1.2 <- tab(x1, \"cumulative futility\", atDelta = c(-5,0,5.57), digits=5)\nt1.2\n\n## D.3 Table the expected sample size (digits == 0 --> ceiling)\nt1.3 <- tab(x1, \"sample size\", atDelta= c(-5,0,5,16), digits=0)\nt1.3\n\n## E.1 Plot the operating characteristics\nplot(x1)\n\n## E.2 Plot the operating characteristics\nplot(x1,\"cumulative all\")\n\n## E.3 Plot the expected sample size\nplot(x1, what=\"sample size\")\n\n## F.1 Boundaries / criteria \nx1$boundary\nplot(x1, what=\"boundary\")\nplot(x1, what=\"std.boundary\")\n\n\n## E X A M P L E 2: Update on treatment effect, informative prior\n##\n## A. Trial design:\n## ----------------\n## A.1 3 stages (interims + final):\n## --> nr.stages = 3\n## A.2 10 patients per stage in control arm\n## 15 patients per stage in treatment arm\n## (i.e. total 3 * ( 10 + 15 ) = 75 patients)\n## --> patients = c(10,15)\n## A.3 Sigma in control arm = 9, sigma in treatment arm = 12\n## --> sigma = c(9,12)\n## A.3 Criteria:\n## stop for success, if P( delta > 0 | data ) >= 0.8\n## AND P( delta > 7 | data ) >= 0.5 \n## --> criteria.success = c(0,0.8,7,0.5)\n## not stop for futility, i.e. no futility criteria\n## --> criteria.futility = NA\n## A.4 Prior on difference:\n## prior difference = 3\n## informative prior equivalent to:\n## 5 patients in control arm; 2 patients in treatment arm\n## --> prior = c(3,5,2)\n\ndesign2a <- gsbDesign(nr.stages = 3,\n patients = c(10,15),\n sigma=c(9,12),\n criteria.success = c(0,0.8,7,0.5),\n criteria.futility = NA,\n prior.diff = c(3,5,2))\ndesign2a\n\n## A similar design with 3 success criteria can be specified as follows\n## A.3 criteria:\n## Stage 1: stop for success, if P( delta > 0 | data ) >= 0.8\n## AND if P( delta > 10 | data ) >= 0.5\n## AND if P( delta > 14 | data ) >= 0.4\n## Stage 2: stop for success, if P( delta > 0 | data ) >= 0.8\n## AND if P( delta > 9 | data ) >= 0.5\n## AND if P( delta > 13 | data ) >= 0.4\n## Stage 3: stop for success, if P( delta > 0 | data ) >= 0.8\n## AND if P( delta > 7 | data ) >= 0.5\n## AND if P( delta > 12 | data ) >= 0.4\n## --> criteria.success = rbind(c(0,0.8, 10,0.5, 14,0.4),\n## c(0,0.8, 9,0.5, 13,0.4),\n## c(0,0.8, 7,0.5, 12,0.4))\n\ndesign2b <- gsbDesign(nr.stages = 3,\n patients = c(10,15),\n sigma = c(9,12),\n criteria.success = rbind(c(0,0.8, 10,0.5, 14,0.4),\n c(0,0.8, 9,0.5, 13,0.4),\n c(0,0.8, 7,0.5, 12,0.4)),\n criteria.futility = NA,\n prior.diff = c(3,5,2))\ndesign2b\n\n## B. Simulation Settings\n## ----------------------\n## B.1 True treatment effects to be evaluated from -5 to 30\n## --> truth = -5:30\n## B.2 To enter the values in this format set grid.type = \"manually\"\n## --> grid.type = \"manually\"\n\n## B.2 Bayesian update on treatment effect delta (treatment - control)\n## --> type.update = \"treatment effect\"\n\nsimulation2 <- gsbSimulation(truth = -5:30,\n grid.type =\"manually\",\n type.update = \"treatment effect\")\nsimulation2\n\n## C. Calculate the operating characteristics\nx2a <- gsb(design = design2a, simulation = simulation2)\nx2b <- gsb(design = design2b, simulation = simulation2)\nx2a\nx2b\n\n## D. Table the cumulative probabilities of success of 'design2b'\n## at delta = c(-5,0,5.57). For 5.57 a linear interpolation is used.\nt2b <- tab(x2b, \"cumulative success\", atDelta = c(-5,0,5.57), digits=5)\nt2b\n\n## E. Plot the operating characteristics of 'design2a' and 'design2b'\nplot(x2a)\nplot(x2b)\nplot(x2a,\"cumulative all\")\n\n## F.1 Boundaries / criteria of 'design2b' \nx2b$boundary\nplot(x2b, what=\"boundary\")\nplot(x2b, what=\"std.boundary\")\n\n\n\n\n## No test: \n## E X A M P L E 3: Update on treatment effect, informative prior\n##\n## A. Trial Design\n## ---------------\n## A.1 3 stages (interims + final):\n## --> nr.stages = 3\n## A.2 Patients:\n## Stage 1: 10 patients in control arm; 15 patients in treatment arm\n## Stage 2: 20 patients in control arm; 30 patients in treatment arm\n## Stage 3: 30 patients in control arm; 45 patients in treatment arm\n## --> patients = rbind(c(10,15),c(20,30),c(30,45))\n## A.3 Sigma in control arm = 9 ; in treatment arm = 12\n## --> sigma = c(9,12)\n## A.4 Success criteria for all stages:\n## stop for success, if P( delta > 0 | data ) >= 0.8\n## AND P( delta > 7 | data ) >= 0.5\n## --> criteria.success = c(0,0.8,7,0.5)\n## A.5 Futility criteria:\n## Stage 1: no futility criteria\n## Stage 2: stop for futility, if P( delta < 2 | data ) >= 0.8\n## Stage 3: stop for futility, if P( delta < 2 | data ) >= 0.8\n## --> criteria.futility = rbind(c(NA,NA),c(2,0.8),c(2,0.8))\n## A.6 Prior on treatment effect:\n## difference = 3;\n## informative prior equivalent to:\n## 2 placebo patient; 1 treatment patient\n## --> prior.difference = c(3,2,1)\n\ndesign3 <- gsbDesign(nr.stages = 3,\n patients = rbind(c(10,15),c(20,30),c(30,45)),\n sigma=c(9,12),\n criteria.success = c(0,0.8,7,0.5),\n criteria.futility = rbind(c(NA,NA),c(2,0.8),c(2,0.8)),\n prior.difference = c(3,2,1))\ndesign3\n\n## B. Simulation Settings\n## ----------------------\n## B.1 True treatment effects to be evaluated at seq(-5,20,15)\n## --> truth = c(-5,20,15)\n## B.2 Bayesian update on treatment effect delta (= treatment - control)\n## --> type.update = \"treatment effect\"\n## B.3 Operating characteristics are evaluated by simulation and\n## numerical integration to double check the results\n## --> method = \"both\"\n## B.4 Number of simulations = 5000\n## --> nr.sim = 5000\n## B.5 If the number of simulated trials is smaller than 300\n## during the simulation print a warning.\n## --> warnings.sensitivity = 300\n## B.6 A seed value is set to 13\n## --> seed = 13 \n\nsimulation3 <- gsbSimulation(truth = c(-5,20,15),\n type.update = \"treatment effect\",\n method = \"both\",\n nr.sim = 5000,\n warnings.sensitivity = 300,\n seed = 13)\nsimulation3\n\n## C. Calculate the operating characteristics\nx3 <- gsb(design = design3, simulation = simulation3)\nx3\n\n## D. The summary(x3) is almost the same as print(x3) but its entries\n## can be saved as list.\ns3 <- summary(x3)\nnames(s3)\n\n## E.1 Plot the operating characteristics\nplot(x3)\n\n## E.2 Plot the operating characteristics obtained by simulation and\n## numerical integration in one plot. The lines should be identical (then\n## only one line is visible)\nplot(x3, \"both\")\nplot(x3, \"cumulative both\")\n\n\n\n## E X A M P L E 4 - Boundaries / Criteria\n## See how the Bayesian boundaries change within 10 stages.\n##\n## A. Trial Design:\n## ----------------\n## A.1 10 stages (interims + final):\n## --> nr.stages = 10\n## A.2 10 patients per arm and stage. (total 2*10*10 = 200 patients)\n## --> patients = 10\n## A.3 sigma in both arms = 10\n## --> sigma = 10\n## A.3 Criteria:\n## stop for success, if P( delta > 0 | data ) >= 0.8\n## AND P( delta > 7 | data ) >= 0.5 \n## --> criteria.success = c(0,0.8,7,0.5)\n## stop for futility, if P( delta < 2 | data ) >= 0.8\n## --> criteria.futility = c(2,0.8)\n## A.4 Prior:\n## --> prior = \"non-informative\"\n\ndesign4 <- gsbDesign(nr.stages=10,\n patients=10,\n sigma=10,\n criteria.success=c(0,0.8, 7, 0.5),\n criteria.futility=c(2,0.8),\n prior.difference=\"non-informative\")\ndesign4\n\n\n## B. Simulation Settings\n## --------------------------------------------\n## B.1 True treatment effects to be evaluated = seq(-10,20,60)\n## --> truth = c(-10,20,60)\n## B.2 Bayesian update on treatment effect delta (= treatment - control)\n## --> type.update = \"treatment effect\"\n\nsimulation4 <- gsbSimulation(truth=c(-10,20,60),\n type.update=\"treatment effect\")\nsimulation4\n\n## C. Calculate the operating characteristics\nx4 <- gsb(design = design4, simulation = simulation4)\nx4\n\n## D. Boundaries / criteria \nx4$boundary\nplot(x4, what=\"boundary\")\nplot(x4, what=\"std.boundary\")\n\n\n## E X A M P L E 5 - Bayesian update \"per arm\", \n## \n## A. Trial Design:\n## ----------------\n## A.1 3 stages (interims + final):\n## --> nr.stages = 3\n## A.2 12 patients per stage in control arm\n## 20 patients per stage in treatment arm\n## (i.e. total 3 * ( 12 + 20 ) = 96 patients)\n## --> patients = c(12,20)\n## A.3 sigma in both arms = 10\n## --> sigma = 10\n## A.3 Criteria:\n## stop for success, if P( delta > 0 | data ) >= 0.8\n## AND P( delta > 7 | data ) >= 0.5 \n## --> criteria.success = c(0,0.8,7,0.5)\n## stop for futility, if P( delta < 2 | data ) >= 0.8\n## --> criteria.futility = c(2,0.8)\n## A.4 Prior:\n## informative prior equivalent to:\n## 2 patients in control arm with mean = 0\n## --> prior.control = c(0,2)\n## 1 patient in treatment arm with mean = 7 \n## --> prior.treatment = c(7,1)\n\ndesign5 <- gsbDesign(nr.stages=3,\n patients=c(12,20),\n sigma=10,\n criteria.success=c(0,0.8,7,0.5),\n criteria.futility=c(2,0.8),\n prior.control=c(0,2),\n prior.treatment=c(7,1)) \ndesign5\n\n## B. Simulation Settings: - with table grid\n## --------------------------------------------\n## B.1 True control/treatment values:\n## control = seq(1,5,0.5)\n## treatment = seq(1,7,1)\n## --> truth = list(seq(1,5,0.5),seq(1,7,1))\n## B.2 Output optimized to create table\n## --> grid.type = \"table\"\n## B.3 Bayesian update per arm\n## --> type.update = \"per arm\"\n## B.4 Number of simulations = 5000 (which is low)\n## --> nr.sim = 5000\n## B.5 If the number of simulations is smaller than 2000\n## print a warning.\n## --> warnings.sensitivity = 2000\n## B.6 A seed value is set to 13\n## --> seed = 13\n\nsimulation5.table <- gsbSimulation(truth = list(seq(1,5,0.5), seq(1,7,1)),\n grid.type = \"table\",\n type.update = \"per arm\",\n nr.sim = 5000,\n warnings.sensitivity = 2000,\n seed = 13)\nsimulation5.table\n\n## The same grid can be specified manually by\nsimulation5.manually <- gsbSimulation(truth = as.matrix(expand.grid(seq(1,5,0.5),seq(1,7,1))),\n grid.type = \"manually\",\n type.update = \"per arm\",\n nr.sim = 5000,\n warnings.sensitivity = 2000,\n seed = 13)\nsimulation5.manually\n\n## To specify a grid optimized for sliced plotting with\n## control values from -10 to 0 and treatment values from -10 to 25\n\nsimulation5.sliced <- gsbSimulation(truth = list(control=seq(-10,0,2), delta=seq(-10,25,4)),\n grid.type = \"sliced\",\n type.update = \"per arm\",\n nr.sim = 5000,\n warnings.sensitivity = 2000,\n seed = 13)\nsimulation5.sliced\n\n## To specify a grid optimized for plotting with\n## control values from 1 to 5 and treatment values from 1 to 7\n## with approximately 20 values enter: \nsimulation5.plot <- gsbSimulation(truth = c(1,5,1,7,20),\n grid.type = \"plot\",\n type.update = \"per arm\",\n nr.sim = 5000,\n warnings.sensitivity = 2000,\n seed = 13)\nsimulation5.plot\n\n## C. Use function gsb\nx5.table <- gsb(design5,simulation5.table)\nx5.sliced <- gsb(design5,simulation5.sliced)\nx5.plot <- gsb(design5,simulation5.plot)\nx5.table\n\n## D. Tables\n## D.1 For any grid a table in long format can be obtained \nt5.1 <- tab(x5.table,\"cumulative futility\")\nhead(t5.1)\n\nt5.2 <- tab(x5.sliced,\"all\")\nhead(t5.2)\n\n## D.2 For the \"table\" grid there are additionally tables in wide format available.\nt5.3 <- tab(x5.table,\"success\", wide=TRUE)\nt5.3\n\n## Fix a stage, e.g. stage 2, to get a matrix\nt5.3[,,2]\n\n## D.2 Set delta.control to '3' to get a matrix\nt5.3[\"contr 3\",,]\n\n# D.3 Plot results\nplot(x5.table)\nplot(x5.plot)\nplot(x5.sliced)\nplot(x5.sliced, sliced=TRUE)\nplot(x5.sliced, sliced=TRUE, range.control=c(-4,0))\nplot(x5.sliced, what=\"success\", sliced=TRUE, range.control=c(-4,0))\n\n## the plot can differ because the number of simulations \"nr.sim\"\n## is low and because the grids are different\nplot(x5.plot,\"sample size\", color=FALSE)\nhead(tab(x5.table,\"sample size\"))\n## End(No test)\n\n\n"} {"package":"gsbDesign","topic":"gsbBayesUpdate","snippet":"### Name: gsbBayesUpdate\n### Title: Bayesian Update\n### Aliases: gsbBayesUpdate\n### Keywords: bayes update\n\n### ** Examples\n\n## One dimensional case, with.alpha = FALSE\ngsbBayesUpdate(beta=10,precisionData=20, with.alpha=FALSE)\n\n## Two dimensional case, with.alpha = TRUE\ngsbBayesUpdate(alpha=c(5,6),beta=c(10,11),meanData=c(10,11),\n precisionData=c(20,21),with.alpha=TRUE)\n\n\n"} {"package":"gsbDesign","topic":"plot.gsbMainOut","snippet":"### Name: plot.gsbMainOut\n### Title: Plot methods\n### Aliases: plot.gsbMainOut plot.gsbDesign plot.gsbSimulation\n### Keywords: plot\n\n### ** Examples\n\n\n## please see examples of function 'gsb'. \n## --------------------------------------\n\n\n## --------------------------------------\n## alternative plots can be created for example\n## with package 'ggplot2'.\n## No test: \ndes <- gsbDesign(nr.stages=2,\n patients=10,\n sigma=10,\n criteria.success=c(0,0.8, 7, 0.5),\n criteria.futility=c(2,0.8),\n prior.difference=\"non-informative\")\n\nsim <- gsbSimulation(truth=c(-10,20,60),\n type.update=\"treatment effect\")\n\n\nx <- gsb(des,sim)\n\n## get data.frame with operating characteristics\ndatgraph <- x$OC\n\n\n## prepare for plot\nsub <- c(\"success\", \"futility\", \"success or futility\")\ndatgraph2 <- subset(datgraph,datgraph$type %in% sub)\ndatgraph2$type <- as.factor(paste(datgraph2$type))\ndatgraph2$value[datgraph2$type==\"cumulative success or futility\"] <-\n1-datgraph2$value[datgraph2$type==\"cumulative success or futility\"]\nlevels(datgraph2$type) <- c(\"1)cumulative futility\" ,\"3)cumulative success\",\"2)indeterminate\")\ndatgraph2$type=as.factor(paste(datgraph2$type))\nlevels(datgraph2$type) <- c(\"cumulative futility\" ,\"indeterminate\",\"cumulative success\")\ndatgraph2 <- datgraph2[order(datgraph2$delta),]\n\n\n## plots\np1 <- qplot(delta,value,geom=\"blank\",color=type,facets=.~stage,data=datgraph2,\nxlab=expression(delta))\n\np1+geom_line(size=1.5)+scale_color_manual(values = c(\"cumulative futility\" = \"dark red\",\n\"indeterminate\" = \"orange\", \"cumulative success\" = \"dark green\"))\n\np2=p1+geom_area(aes(x = delta,y=value,fill=type))\n\np2+scale_fill_manual(values = c(\"cumulative futility\" = \"dark red\",\n\"indeterminate\" = \"orange\", \"cumulative success\" = \"dark green\"))\n\n## End(No test)\n\n\n"} {"package":"gsbDesign","topic":"tab","snippet":"### Name: tab\n### Title: get tables.\n### Aliases: tab\n### Keywords: table\n\n### ** Examples\n\n## please see examples of function 'gsb'.\n\n\n"} {"package":"uptimeRobot","topic":"uptimerobot.account.details","snippet":"### Name: uptimerobot.account.details\n### Title: Get the account details for who is linked to the given API key\n### Aliases: uptimerobot.account.details\n\n### ** Examples\n\n## Not run: \n##D # Let's assume the api.key is available into the environment variable KEY\n##D api.key <- Sys.getenv(\"KEY\", \"\")\n##D \n##D # Returns details as a list\n##D details.list <- uptimerobot.account.details(api.key)\n##D \n##D # Returns details as a vector\n##D details.num <- uptimerobot.account.details(api.key, unlist = TRUE)\n## End(Not run)\n\n\n"} {"package":"uptimeRobot","topic":"uptimerobot.contact.delete","snippet":"### Name: uptimerobot.contact.delete\n### Title: Delete an alert contact\n### Aliases: uptimerobot.contact.delete\n\n### ** Examples\n\n## Not run: \n##D # Let's assume the api.key is available into the environment variable KEY\n##D api.key <- Sys.getenv(\"KEY\", \"\")\n##D \n##D # Delete the contact with id=12345678\n##D if(uptimerobot.contact.delete(api.key, 12345678){\n##D message(\"Alert contact successfully deleted!\")\n##D }\n## End(Not run)\n\n\n"} {"package":"uptimeRobot","topic":"uptimerobot.contact.new","snippet":"### Name: uptimerobot.contact.new\n### Title: Add a new alert contact\n### Aliases: uptimerobot.contact.new\n\n### ** Examples\n\n## Not run: \n##D # Let's assume the api.key is available into the environment variable KEY\n##D api.key <- Sys.getenv(\"KEY\", \"\")\n##D # Create a new contact and get the ID\n##D contact.new <- uptimerobot.contact.new(api.key, type = \"email\", value = \"foo@bar.com\", \"John Doe\")\n##D \n##D # Get informations about this new contact\n##D contact.detail <- uptimerobot.contacts(api.key, contacts = contact.new)\n## End(Not run)\n\n\n"} {"package":"uptimeRobot","topic":"uptimerobot.contacts","snippet":"### Name: uptimerobot.contacts\n### Title: Get general informations about the alert contacts\n### Aliases: uptimerobot.contacts\n\n### ** Examples\n\n## Not run: \n##D # Let's assume the api.key is available into the environment variable KEY\n##D api.key <- Sys.getenv(\"KEY\", \"\")\n##D \n##D # Returns all the contacts with a default set of attributes\n##D contacts.df <- uptimerobot.contacts(api.key)\n##D \n##D # Returns all the contacts and all the attributes\n##D contacts.full.df <- uptimerobot.contacts(api.key, fields=uptimerobot.fields(\"contact\")$full))\n##D \n##D # Returns only the two contacts with ID: 1234, 5678\n##D contacts.df <- uptimerobot.contacts(api.key, c(\"1234\", \"5678\"))\n## End(Not run)\n\n\n"} {"package":"uptimeRobot","topic":"uptimerobot.monitor.contacts","snippet":"### Name: uptimerobot.monitor.contacts\n### Title: Get contacts informations for one or more monitors\n### Aliases: uptimerobot.monitor.contacts\n\n### ** Examples\n\n## Not run: \n##D # Let's assume the api.key is available into the environment variable KEY\n##D api.key <- Sys.getenv(\"KEY\", \"\")\n##D \n##D # Returns all the monitors IDs. Since the function always return a data.frame\n##D # (even if you ask only for a column), you have to reference the column to get a character vector.\n##D monitors.id <- uptimerobot.monitors(api.key, fields=\"id\")$id\n##D \n##D # Returns all the contacts registered for the given monitors\n##D logs.df <- uptimerobot.monitor.contacts(api.key, monitors=monitors.id)\n## End(Not run)\n\n\n"} {"package":"uptimeRobot","topic":"uptimerobot.monitor.delete","snippet":"### Name: uptimerobot.monitor.delete\n### Title: Delete a monitor\n### Aliases: uptimerobot.monitor.delete\n\n### ** Examples\n\n## Not run: \n##D # Let's assume the api.key is available into the environment variable KEY\n##D api.key <- Sys.getenv(\"KEY\", \"\")\n##D \n##D # Create a monitor and get its monitor.id\n##D monitor.id <- uptimerobot.monitor.new(api.key,\n##D friendly.name=\"Open Analytics\",\n##D url=\"https://gabrielebaldassarre.com\", type=\"http\"\n##D )\n##D \n##D # Change the friendly name of the monitor\n##D if(uptimerobot.monitor.edit(api.key,\n##D monitor.id,\n##D friendly.name=\"Open Analytics - gabrielebaldassarre.com\"\n##D ){\n##D message(\"Monitor has been successfully edited!\")\n##D }\n##D \n##D # Delete the just-made monitor\n##D if(uptimerobot.monitor.delete(api.key, monitor.id){\n##D message(\"Monitor has been successfully deleted!\")\n##D }\n## End(Not run)\n\n\n"} {"package":"uptimeRobot","topic":"uptimerobot.monitor.edit","snippet":"### Name: uptimerobot.monitor.edit\n### Title: Edit a monitor\n### Aliases: uptimerobot.monitor.edit\n\n### ** Examples\n\n## Not run: \n##D # Let's assume the api.key is available into the environment variable KEY\n##D api.key <- Sys.getenv(\"KEY\", \"\")\n##D \n##D # Create a monitor and get its monitor.id\n##D monitor.id <- uptimerobot.monitor.new(api.key,\n##D friendly.name=\"Open Analytics\",\n##D url=\"https://gabrielebaldassarre.com\", type=\"http\"\n##D )\n##D \n##D # Change the friendly name of the monitor\n##D if(uptimerobot.monitor.edit(api.key,\n##D monitor.id,\n##D friendly.name=\"Open Analytics - gabrielebaldassarre.com\"\n##D ){\n##D message(\"Monitor has been successfully edited!\")\n##D }\n##D \n##D # Delete the just-made monitor\n##D if(uptimerobot.monitor.delete(api.key, monitor.id){\n##D message(\"Monitor has been successfully deleted!\")\n##D }\n## End(Not run)\n\n\n"} {"package":"uptimeRobot","topic":"uptimerobot.monitor.logs","snippet":"### Name: uptimerobot.monitor.logs\n### Title: Get log records for one or more monitors\n### Aliases: uptimerobot.monitor.logs\n\n### ** Examples\n\n## Not run: \n##D # Let's assume the api.key is available into the environment variable KEY\n##D api.key <- Sys.getenv(\"KEY\", \"\")\n##D \n##D # Returns all the monitors IDs. Since the function always return a data.frame\n##D # (even if you ask only for a column), you have to reference the column to get a character vector.\n##D monitors.id <- uptimerobot.monitors(api.key, fields=\"id\")$id\n##D \n##D # Returns all the log events for the given monitors\n##D logs.df <- uptimerobot.monitor.logs(api.key, monitors=monitors.id)\n## End(Not run)\n\n\n"} {"package":"uptimeRobot","topic":"uptimerobot.monitor.new","snippet":"### Name: uptimerobot.monitor.new\n### Title: Add a new monitor\n### Aliases: uptimerobot.monitor.new\n\n### ** Examples\n\n## Not run: \n##D # Let's assume the api.key is available into the environment variable KEY\n##D api.key <- Sys.getenv(\"KEY\", \"\")\n##D \n##D # Create a monitor and get its monitor.id\n##D monitor.id <- uptimerobot.monitor.new(api.key,\n##D friendly.name=\"Open Analytics\",\n##D url=\"https://gabrielebaldassarre.com\", type=\"http\"\n##D )\n##D \n##D # Change the friendly name of the monitor\n##D if(uptimerobot.monitor.edit(api.key,\n##D monitor.id,\n##D friendly.name=\"Open Analytics - gabrielebaldassarre.com\"\n##D ){\n##D message(\"Monitor has been successfully edited!\")\n##D }\n##D \n##D # Delete the just-made monitor\n##D if(uptimerobot.monitor.delete(api.key, monitor.id){\n##D message(\"Monitor has been successfully deleted!\")\n##D }\n## End(Not run)\n\n\n"} {"package":"uptimeRobot","topic":"uptimerobot.monitor.reset","snippet":"### Name: uptimerobot.monitor.reset\n### Title: Reset a monitor\n### Aliases: uptimerobot.monitor.reset\n\n### ** Examples\n\n## Not run: \n##D # Let's assume the api.key is available into the environment variable KEY\n##D api.key <- Sys.getenv(\"KEY\", \"\")\n##D \n##D # Get a list of all available monitors, and take the first id\n##D monitors.id <- uptimerobot.monitors(api.key, fields=\"id\")[1,1]\n##D \n##D # Reset the stats for that monitor\n##D uptimerobot.monitor.reset(api.key, monitor.id)\n##D \n## End(Not run)\n\n\n"} {"package":"uptimeRobot","topic":"uptimerobot.monitor.responses","snippet":"### Name: uptimerobot.monitor.responses\n### Title: Get response times for one or more monitors\n### Aliases: uptimerobot.monitor.responses\n\n### ** Examples\n\n## Not run: \n##D # Let's assume the api.key is available into the environment variable KEY\n##D api.key <- Sys.getenv(\"KEY\", \"\")\n##D \n##D # Returns all the monitors IDs. Since the function always return a data.frame\n##D # (even if you ask only for a column), you have to reference the column to get a character vector.\n##D monitors.id <- uptimerobot.monitors(api.key, fields=\"id\")$id\n##D \n##D # Returns all the ping events for the given monitors\n##D logs.df <- uptimerobot.monitor.responses(api.key, monitors=monitors.id)\n## End(Not run)\n\n\n"} {"package":"uptimeRobot","topic":"uptimerobot.monitors","snippet":"### Name: uptimerobot.monitors\n### Title: Get general informations about monitors\n### Aliases: uptimerobot.monitors\n\n### ** Examples\n\n## Not run: \n##D # Let's assume the api.key is available into the environment variable KEY\n##D api.key <- Sys.getenv(\"KEY\", \"\")\n##D \n##D # Returns all the monitors with a default set of attributes\n##D monitors.df <- uptimerobot.monitors(api.key)\n##D \n##D #' # Returns all the monitors of 'keyword' type\n##D monitors.kwd..df <- uptimerobot.monitors(api.key, type=\"keyword\")\n##D \n##D # Returns all the monitors and all the attributes\n##D monitors.full.df <- uptimerobot.monitors(api.key, fields=uptimerobot.fields(\"monitor\")$full))\n##D \n##D # Returns only the two monitors with ID: 1234, 5678\n##D monitors.df <- uptimerobot.monitors(api.key, c(\"1234\", \"5678\"))\n## End(Not run)\n\n\n"} {"package":"CALANGO","topic":"install_bioc_dependencies","snippet":"### Name: install_bioc_dependencies\n### Title: Install Bioconductor dependencies\n### Aliases: install_bioc_dependencies\n\n### ** Examples\n\n## Not run: \n##D install_bioc_dependencies()\n## End(Not run)\n\n\n\n"} {"package":"CALANGO","topic":"retrieve_calanguize_genomes","snippet":"### Name: retrieve_calanguize_genomes\n### Title: Retrieve calanguize_genomes script from the Github repository\n### Aliases: retrieve_calanguize_genomes\n\n### ** Examples\n\n## Not run: \n##D CALANGO::retrieve_calanguize_genomes(target.dir = \"./data\")\n## End(Not run)\n\n\n\n"} {"package":"CALANGO","topic":"retrieve_data_files","snippet":"### Name: retrieve_data_files\n### Title: Retrieve data files from the Github repository\n### Aliases: retrieve_data_files\n\n### ** Examples\n\n## Not run: \n##D CALANGO::retrieve_data_files(target.dir = \"./data\")\n## End(Not run)\n\n\n\n"} {"package":"CALANGO","topic":"run_CALANGO","snippet":"### Name: run_CALANGO\n### Title: Run the CALANGO pipeline\n### Aliases: run_CALANGO\n\n### ** Examples\n\n## Not run: \n##D \n##D ## Install any missing BioConductor packages for report generation \n##D ## (only needs to be done once)\n##D # CALANGO::install_bioc_dependencies()\n##D \n##D # Retrieve example files\n##D basedir <- tempdir()\n##D retrieve_data_files(target.dir = paste0(basedir, \"/data\"))\n##D defs <- paste0(basedir, \"/data/parameters/parameters_domain2GO_count_less_phages.txt\")\n##D \n##D # Run CALANGO\n##D res <- run_CALANGO(defs, cores = 2)\n## End(Not run)\n\n\n\n"} {"package":"tehtuner","topic":"tunevt","snippet":"### Name: tunevt\n### Title: Fit a tuned Virtual Twins model\n### Aliases: tunevt\n\n### ** Examples\n\ndata(tehtuner_example)\n# Low p_reps for example use only\ntunevt(\n tehtuner_example, step1 = \"lasso\", step2 = \"rtree\",\n alpha0 = 0.2, p_reps = 5\n)\n\n\n\n"} {"package":"SOAs","topic":"MDLEs","snippet":"### Name: MDLEs\n### Title: Function to create maximin distance level expanded arrays\n### Aliases: MDLEs\n\n### ** Examples\n\ndim(aus <- MDLEs(DoE.base::L16.4.5, 2, noptim.rounds = 1))\npermpicks <- attr(aus, \"permpick\")\n## for people interested in internal workings:\n## the code below produces the same matrix as MDLEs\n## No test: \nSOAs:::DcFromDp(L16.4.5-1, 4,2, lapply(1:5, function(obj) permpicks[,obj]))\n## End(No test)\n\n\n"} {"package":"SOAs","topic":"OSOApb","snippet":"### Name: OSOApb\n### Title: function to create a strength 3 OSOA with 8-level columns from a\n### Hadamard matrix\n### Aliases: OSOApb\n### Keywords: internal\n\n### ** Examples\n\ndim(OSOApb(9)) ## 9 8-level factors in 24 runs\ndim(OSOApb(n=16)) ## 6 8-level factors in 16 runs\ndim(OSOApb(m=35)) ## 35 8-level factors in 80 runs\n\n\n\n"} {"package":"SOAs","topic":"OSOAregulart","snippet":"### Name: OSOAregulart\n### Title: TODO\n### Aliases: OSOAregulart\n### Keywords: internal\n\n### ** Examples\n\nprint(\"TODO\")\n\n\n\n"} {"package":"SOAs","topic":"OSOAs","snippet":"### Name: OSOAs\n### Title: Function to create an OSOA from an OA\n### Aliases: OSOAs\n\n### ** Examples\n\n## run with optimization for actual use!\n\n## 54 runs with seven 9-level columns\nOSOAs(DoE.base::L18[,3:8], el=2, optimize=FALSE)\n\n## 54 runs with six 27-level columns\nOSOAs(DoE.base::L18[,3:8], el=3, optimize=FALSE)\n\n## 81 runs with four 9-level columns\nOSOAs(DoE.base::L27.3.4, el=2, optimize=FALSE)\n## An OA with 9-level factors (L81.9.10)\n## has complete balance in 2D,\n## however does not achieve 3D projection for\n## all four collapsed triples\n## It is up to the user to decide what is more important.\n## I would go for the OA.\n\n## 81 runs with four 27-level columns\nOSOAs(DoE.base::L27.3.4, el=3, optimize=FALSE)\n\n\n"} {"package":"SOAs","topic":"OSOAs_LiuLiu","snippet":"### Name: OSOAs_LiuLiu\n### Title: Function to create OSOAs of strengths 2, 3, or 4 from an OA\n### Aliases: OSOAs_LiuLiu\n\n### ** Examples\n\n## strength 2, very small (four 9-level columns in 9 runs)\nOSOA9 <- OSOAs_LiuLiu(DoE.base::L9.3.4)\n\n## strength 3, from a Plackett-Burman design of FrF2\n## 10 8-level columns in 40 runs with OSOA strength 3\noa <- suppressWarnings(FrF2::pb(40)[,c(1:19,39)])\n### columns 1 to 19 and 39 together are the largest possible strength 3 set\nOSOA40 <- OSOAs_LiuLiu(oa, optimize=FALSE) ## strength 3, 8 levels\n### optimize would improve phi_p, but suppressed for saving run time\n\n## 9 8-level columns in 40 runs with OSOA strength 3\noa <- FrF2::pb(40,19)\n### 9 columns would be obtained without the final column in oa\nmbound_LiuLiu(19, t=3) ## example for which q=3\nmbound_LiuLiu(19, t=4) ## t=3 has one more column than t=4\nOSOA40_2 <- OSOAs_LiuLiu(oa, optimize=FALSE) ## strength 3, 8 levels\n### optimize would improve phi_p, but suppressed for saving run time\n\n## starting from a strength 4 OA\noa <- FrF2::FrF2(64,8)\n## four 16 level columns in 64 runs with OSOA strength 4\nOSOA64 <- OSOAs_LiuLiu(oa, optimize=FALSE) ## strength 4, 16 levels\n\n### reducing the strength to 3 does not increase the number of columns\nmbound_LiuLiu(8, t=3)\n### reducing the strength to 2 doubles the number of columns\nmbound_LiuLiu(8, t=2)\n## eight 4-level columns in 64 runs with OSOA strength 2\nOSOA64_2 <- OSOAs_LiuLiu(oa, t=2, optimize=FALSE)\n## fulfills the 2D strength 2 property\nsoacheck2D(OSOA64_2, s=2, el=2, t=2)\n### fulfills also the 3D strength 3 property\nsoacheck3D(OSOA64_2, s=2, el=2, t=3)\n### fulfills also the 4D strength 4 property\nDoE.base::GWLP(OSOA64/2)\n### but not the 3D strength 4 property\nsoacheck3D(OSOA64_2, s=2, el=2, t=4)\n### and not the 2D 4x2 and 2x4 stratification balance\nsoacheck2D(OSOA64_2, s=2, el=2, t=3)\n## six 36-level columns in 72 runs with OSOA strength 2\noa <- DoE.base::L72.2.5.3.3.4.1.6.7[,10:16]\nOSOA72 <- OSOAs_LiuLiu(oa, t=2, optimize=FALSE)\n\n\n"} {"package":"SOAs","topic":"OSOAs_hadamard","snippet":"### Name: OSOAs_hadamard\n### Title: function to create a strength 3 OSOA with 8-level columns or a\n### strength 3- OSOA with 4-level columns from a Hadamard matrix\n### Aliases: OSOAs_hadamard\n\n### ** Examples\n\ndim(OSOAs_hadamard(9, optimize=FALSE)) ## 9 8-level factors in 24 runs\ndim(OSOAs_hadamard(n=16, optimize=FALSE)) ## 6 8-level factors in 16 runs\nOSOAs_hadamard(n=24, m=6, optimize=FALSE) ## 6 8-level factors in 24 runs\n ## (though 10 would be possible)\ndim(OSOAs_hadamard(m=35, optimize=FALSE)) ## 35 8-level factors in 80 runs\n\n\n"} {"package":"SOAs","topic":"OSOAs_regular","snippet":"### Name: OSOAs_regular\n### Title: Function to create an OSOA in s^2 or s^3 levels and s^k runs\n### from a basic number of levels s and a power k\n### Aliases: OSOAs_regular\n\n### ** Examples\n\n## 13 columns in 9 levels each\nOSOAs_regular(3, 4, el=2, optimize=FALSE) ## 13 columns, phi_p about 0.117\n# optimizing level permutations typically improves phi_p a lot\n# OSOAs_regular(3, 4, el=2) ## 13 columns, phi_p typically below 0.055\n\n\n"} {"package":"SOAs","topic":"SOAs","snippet":"### Name: SOAs\n### Title: function to create SOAs of strength t with the GOA construction\n### by He and Tang.\n### Aliases: SOAs\n\n### ** Examples\n\naus <- SOAs(DoE.base::L27.3.4, optimize=FALSE) ## t=3 is the default\ndim(aus)\nsoacheck2D(aus, s=3, el=3) ## check for 2*\nsoacheck3D(aus, s=3, el=3) ## check for 3\n\naus2 <- SOAs(DoE.base::L27.3.4, t=2, optimize=FALSE)\n## t can be smaller than the array strength\n## --> more columns with fewer levels each\ndim(aus2)\nsoacheck2D(aus2, s=3, el=2, t=2) # check for 2\nsoacheck3D(aus2, s=3, el=2) # t=3 is the default (check for 3-)\n\n\n"} {"package":"SOAs","topic":"SOAs2plus_regular","snippet":"### Name: SOAs2plus_regular\n### Title: function to create SOAs of strength 2+ from regular s-level\n### designs\n### Aliases: SOAs2plus_regular\n\n### ** Examples\n\n## No test: \n## unoptimized OSOA with 8 16-level columns in 64 runs\n## (maximum possible number of columns)\nplan64 <- SOAs2plus_regular(4, 3, optimize=FALSE)\nocheck(plan64) ## the array has orthogonal columns\n\n## optimized SOA with 20 9-level columns in 81 runs\n## (up to 25 columns are possible)\nplan <- SOAs2plus_regular(3, 4, 20)\n## many column pairs have only 27 level pairs covered\ncount_npairs(plan)\n## an OA would exist for 10 9-level factors (DoE.base::L81.9.10)\n## it would cover all pairs\n## (SOAs are not for situations for which pair coverage\n## is of primary interest)\n## End(No test)\n\n\n"} {"package":"SOAs","topic":"SOAs_8level","snippet":"### Name: SOAs_8level\n### Title: Function to create 8-level SOAs according to Shi and Tang 2020\n### Aliases: SOAs_8level\n\n### ** Examples\n\n## use with optimization for actually using such designs\n## n/4 - 1 = 7 columns, strength 3+\nSOAs_8level(32, optimize=FALSE)\n\n## n/4 = 8 columns, strength 3 with alpha and beta\nSOAs_8level(32, m=8, optimize=FALSE)\n\n## 9 columns (special case n=32), strength 3 with alpha\nSOAs_8level(32, constr=\"ShiTang_alpha\", optimize=FALSE)\n\n## 5*n/16 = 5 columns, strength 3 with alpha\nSOAs_8level(16, constr=\"ShiTang_alpha\", optimize=FALSE)\n\n\n\n"} {"package":"SOAs","topic":"Spattern","snippet":"### Name: Spattern\n### Title: functions to evaluate stratification properties of (O)SOAs and\n### GSOAs\n### Aliases: Spattern dim_wt_tab soacheck2D soacheck3D\n\n### ** Examples\n\nnullcase <- matrix(0:7, nrow=8, ncol=4)\nsoacheck2D(nullcase, s=2)\nsoacheck3D(nullcase, s=2)\nSpattern(nullcase, s=2)\nSpattern(nullcase, s=2, maxdim=2)\n ## the non-zero entry at position 2 indicates that\n ## soacheck2D does not comply with t=2\n(Spat <- Spattern(nullcase, s=2, maxwt=4))\n ## comparison to maxdim=2 indicates that\n ## the contribution to S_4 from dimensions\n ## larger than 2 is 1\n## postprocessing Spat\ndim_wt_tab(Spat)\n\n## Shi and Tang strength 3+ construction in 7 8-level factors for 32 runs\nD <- SOAs_8level(32, optimize=FALSE)\n\n## check for strength 3+ (default el=3 is OK)\n## 2D check\nsoacheck2D(D, s=2, t=4)\n## 3D check\nsoacheck3D(D, s=2, t=4)\n## using Spattern (much faster for many columns)\n ## does not have strength 4\n Spattern(D, s=2)\n ## but complies with strength 4 for dim up to 3\n Spattern(D, s=2, maxwt=4, maxdim=3)\n ## inspect more detail\n Spat <- (Spattern(D, s = 2, maxwt=5))\n dim_wt_tab(Spat)\n\n\n"} {"package":"SOAs","topic":"XiaoXuMDLE","snippet":"### Name: XiaoXuMDLE\n### Title: Implementation of the Xiao Xu TA algorithm (experimental, for\n### comparison with MDLEs only)\n### Aliases: XiaoXuMDLE createF optimize\n### Keywords: internal\n\n### ** Examples\n\n## create 8-level columns from 4-level columns\nXiaoXuMDLE(DoE.base::L16.4.5, 2, nrounds = 5, nsteps=50)\n\n\n\n"} {"package":"SOAs","topic":"contr.FFbHelmert","snippet":"### Name: contr.FFbHelmert\n### Title: Full-factorial-based real-valued contrasts for s^el levels\n### Aliases: contr.FFbHelmert contr.FFbPoly\n\n### ** Examples\n\n## the same n can yield different contrasts for different s\n## Helmert variant\ncontr.FFbHelmert(16, 2)\nround(contr.FFbHelmert(16, 4), 4)\nround(contr.FFbHelmert(16, 16), 4)\n## Poly variant\ncontr.FFbHelmert(16, 2)\nround(contr.FFbHelmert(16, 4), 4)\nround(contr.FFbHelmert(16, 16), 4)\n\n\n\n"} {"package":"SOAs","topic":"contr.Power","snippet":"### Name: contr.Power\n### Title: A contrast function based on regular factorials for number of\n### levels a prime or prime power\n### Aliases: contr.Power\n\n### ** Examples\n\n## the same n can yield different contrasts for different s\ncontr.Power(16, 2)\ncontr.Power(16, 4)\n\n\n\n"} {"package":"SOAs","topic":"contr.TianXu","snippet":"### Name: contr.TianXu\n### Title: A complex-valued contrast function for s^el levels based on\n### powers of the s-th root of the unity\n### Aliases: contr.TianXu\n\n### ** Examples\n\n## the same n can yield different contrasts for different s\ncontr.TianXu(16, 2)\ncontr.TianXu(16, 4)\nround(contr.TianXu(16, 16), 4)\n\n\n\n"} {"package":"SOAs","topic":"createSaturated","snippet":"### Name: createSaturated\n### Title: Function to create a regular saturated strength 2 array\n### Aliases: createSaturated\n\n### ** Examples\n\ncreateSaturated(3, k=3) ## 27 x 13 array in 3 levels\n\n\n"} {"package":"SOAs","topic":"guide_SOAs","snippet":"### Name: guide_SOAs\n### Title: Utility function for inspecting available SOAs for which the\n### user need not provide an OA\n### Aliases: guide_SOAs\n### Keywords: array\n\n### ** Examples\n\n## guide_SOAs\n## There is a Zhou and Tang type SOA with 4-level columns in 8 runs\nguide_SOAs(2, 2, n=8)\n## There are no SOAs with 8-level columns in 8 runs\nguide_SOAs(2, 3, n=8)\n## What SOAs based on s=2 in s^3 levels with 7 columns\n## can be construct without providing an OA?\nguide_SOAs(2, 3, m=7)\n## pick the Shi and Tang family 3 design\nmyST_3plus <- SOAs_8level(n=32, m=7, constr='ShiTang_alphabeta')\n## Note that the design has orthogonal columns and strength 3+,\n## i.e., very good balance properties.\n\n\n\n"} {"package":"SOAs","topic":"guide_SOAs_from_OA","snippet":"### Name: guide_SOAs_from_OA\n### Title: Utility function for inspecting SOAs obtainable from an OA\n### Aliases: guide_SOAs_from_OA\n### Keywords: array\n\n### ** Examples\n\n## guide_SOAs_from_OA\n## there is an OA(81, 3^10, 3) (L81.3.10 in package DoE.base)\n## inspect what can be done with it:\nguide_SOAs_from_OA(s=3, mOA=10, nOA=81, tOA=3)\n## the output shows that a strength 3 OSOA\n## with 4 columns of 27 levels each can be obtained in 81 runs\n## and provides the necessary code (replace OA with L81.3.10)\n## optimize=FALSE reduces example run time\nOSOAs_LiuLiu(L81.3.10, t=3, optimize=FALSE)\n## or that an SOA with 9 non-orthogonal columns can be obtained\n## in the same number of runs\nSOAs(L81.3.10, t=3)\n\n\n"} {"package":"SOAs","topic":"mbound_LiuLiu","snippet":"### Name: mbound_LiuLiu\n### Title: bound for number of columns for LiuLiu OSOAs\n### Aliases: mbound_LiuLiu\n\n### ** Examples\n\n## moa is the number of columns of an oa\nmoa <- rep(seq(4,40),3)\n## t is the strength used in the construction\n## the oa must have at least this strength\nt <- rep(2:4, each=37)\n## numbers of columns for the combination\nmbounds <- mapply(mbound_LiuLiu, moa, t)\n## depending on the number of levels\n## the number of runs can be excessive\n## for larger values of moa with larger t!\n## t=3 and t=4 have the same number of columns, except for moa=4*j+3\nplot(moa, mbounds, pch=t, col=t)\n\n\n"} {"package":"SOAs","topic":"ocheck","snippet":"### Name: ocheck\n### Title: functions to evaluate low order projection properties of (O)SOAs\n### Aliases: ocheck ocheck3 count_npairs count_nallpairs\n\n### ** Examples\n\n#' ## Shi and Tang strength 3+ construction in 7 8-level factors for 32 runs\nD <- SOAs_8level(32, optimize=FALSE)\n## is an OSOA\nocheck(D)\n\n## an OSOA of strength 3 with 3-orthogonality\n## 4 columns in 27 levels each\n## second order model matrix\n\nD_o <- OSOAs_LiuLiu(DoE.base::L81.3.10, optimize=FALSE)\nocheck3(D_o)\n\n## benefit of 3-orthogonality for second order linear models\ncolnames(D_o) <- paste0(\"X\", 1:4)\ny <- stats::rnorm(81)\nmylm <- stats::lm(y~(X1+X2+X3+X4)^2 + I(X1^2)+I(X2^2)+I(X3^2)+I(X4^2),\n data=as.data.frame(scale(D_o, scale=FALSE)))\ncrossprod(stats::model.matrix(mylm))\n\n\n"} {"package":"SOAs","topic":"phi_optimize","snippet":"### Name: phi_optimize\n### Title: function to optimize the phi_p value of an array by level\n### permutation\n### Aliases: phi_optimize\n\n### ** Examples\n\noa <- lhs::createBoseBush(8,16)\nprint(phi_p(oa, dmethod=\"manhattan\"))\noa_optimized <- phi_optimize(oa)\nprint(phi_p(oa_optimized, dmethod=\"manhattan\"))\n\n\n"} {"package":"SOAs","topic":"phi_p","snippet":"### Name: phi_p\n### Title: Functions to evaluate space filling of an array\n### Aliases: phi_p mindist\n\n### ** Examples\n\nA <- DoE.base::L25.5.6 ## levels 1:5 for each factor\nphi_p(A)\nmindist(A) # 5\nA2 <- phi_optimize(A)\nphi_p(A2) ## improved\nmindist(A2) ## 6, improved\nA <- DoE.base::L16.4.5 ## levels 1:4 for each factor\nphi_p(A)\nphi_p(A, dmethod=\"euclidean\")\nA2 <- A\nA2[,4] <- c(2,4,3,1)[A[,4]]\nphi_p(A2)\n## Not run: \n##D ## A2 has fewer minimal distances\n##D par(mfrow=c(2,1))\n##D hist(dist(A), xlim=c(2,6), ylim=c(0,40))\n##D hist(dist(A2), xlim=c(2,6), ylim=c(0,40))\n## End(Not run)\n\n\n"} {"package":"SOAs","topic":"print.SOA","snippet":"### Name: print.SOA\n### Title: Print Methods\n### Aliases: print.SOA print.MDLE print.Spattern print.dim_wt_tab\n\n### ** Examples\n\nmyOSOA <- OSOAs_regular(s=3, k=3, optimize=FALSE)\nmyOSOA\nstr(myOSOA) ## structure for comparison\nSpat <- Spattern(myOSOA, s=3)\ndim_wt_tab(Spat) ## print method prints NAs as .\nprint(dim_wt_tab(Spat), na.print=\" \")\n\n\n"} {"package":"Dforest","topic":"DF_CV","snippet":"### Name: DF_CV\n### Title: Decision Forest algorithm: Model training with Cross-validation\n### Aliases: DF_CV\n\n### ** Examples\n\n ##data(iris)\n X = iris[,1:4]\n Y = iris[,5]\n names(Y)=rownames(X)\n\n random_seq=sample(nrow(X))\n split_rate=3\n split_sample = suppressWarnings(split(random_seq,1:split_rate))\n Train_X = X[-random_seq[split_sample[[1]]],]\n Train_Y = Y[-random_seq[split_sample[[1]]]]\n\n CV_result = DF_CV(Train_X, Train_Y)\n\n\n\n\n"} {"package":"Dforest","topic":"DF_dataFs","snippet":"### Name: DF_dataFs\n### Title: Decision Forest algorithm: Feature Selection in pre-processing\n### Aliases: DF_dataFs\n\n### ** Examples\n\n ##data(iris)\n X = iris[iris[,5]!=\"setosa\",1:4]\n Y = iris[iris[,5]!=\"setosa\",5]\n used_feat = DF_dataFs(X, Y)\n\n\n"} {"package":"Dforest","topic":"DF_dataPre","snippet":"### Name: DF_dataPre\n### Title: Decision Forest algorithm: Data pre-processing\n### Aliases: DF_dataPre\n\n### ** Examples\n\n ##data(iris)\n X = iris[,1:4]\n Keep_feat = DF_dataPre(X)\n\n\n"} {"package":"Dforest","topic":"DF_easy","snippet":"### Name: DF_easy\n### Title: Simple pre-defined pipeline for Decision forest\n### Aliases: DF_easy\n\n### ** Examples\n\n # data(demo_simple)\n X = iris[,1:4]\n Y = iris[,5]\n names(Y)=rownames(X)\n\n random_seq=sample(nrow(X))\n split_rate=3\n split_sample = suppressWarnings(split(random_seq,1:split_rate))\n Train_X = X[-random_seq[split_sample[[1]]],]\n Train_Y = Y[-random_seq[split_sample[[1]]]]\n Test_X = X[random_seq[split_sample[[1]]],]\n Test_Y = Y[random_seq[split_sample[[1]]]]\n\n Result = DF_easy(Train_X, Train_Y, Test_X, Test_Y)\n\n\n"} {"package":"Dforest","topic":"DF_pred","snippet":"### Name: DF_pred\n### Title: Decision Forest algorithm: Model prediction\n### Aliases: DF_pred\n\n### ** Examples\n\n # data(demo_simple)\n X = data_dili$X\n Y = data_dili$Y\n names(Y)=rownames(X)\n\n random_seq=sample(nrow(X))\n split_rate=3\n split_sample = suppressWarnings(split(random_seq,1:split_rate))\n Train_X = X[-random_seq[split_sample[[1]]],]\n Train_Y = Y[-random_seq[split_sample[[1]]]]\n Test_X = X[random_seq[split_sample[[1]]],]\n Test_Y = Y[random_seq[split_sample[[1]]]]\n\n used_model = DF_train(Train_X, Train_Y)\n Pred_result = DF_pred(used_model,Test_X,Test_Y)\n\n\n\n\n\n"} {"package":"Dforest","topic":"DF_train","snippet":"### Name: DF_train\n### Title: Decision Forest algorithm: Model training\n### Aliases: DF_train\n\n### ** Examples\n\n ##data(iris)\n X = iris[,1:4]\n Y = iris[,5]\n names(Y)=rownames(X)\n used_model = DF_train(X,factor(Y))\n\n\n\n"} {"package":"Dforest","topic":"Dforest","snippet":"### Name: Dforest\n### Title: Demo script to lean Decision Forest package Demo data are\n### located in data/ folder\n### Aliases: Dforest\n\n### ** Examples\n\n Dforest()\n\n\n"} {"package":"greybox","topic":"dalaplace","snippet":"### Name: dalaplace\n### Title: Asymmetric Laplace Distribution\n### Aliases: dalaplace ALaplace palaplace qalaplace ralaplace\n### Keywords: distribution\n\n### ** Examples\n\nx <- dalaplace(c(-100:100)/10, 0, 1, 0.2)\nplot(x, type=\"l\")\n\nx <- palaplace(c(-100:100)/10, 0, 1, 0.2)\nplot(x, type=\"l\")\n\nqalaplace(c(0.025,0.975), 0, c(1,2), c(0.2,0.3))\n\nx <- ralaplace(1000, 0, 1, 0.2)\nhist(x)\n\n\n\n"} {"package":"greybox","topic":"dbcnorm","snippet":"### Name: dbcnorm\n### Title: Box-Cox Normal Distribution\n### Aliases: dbcnorm BCNormal pbcnorm qbcnorm rbcnorm\n### Keywords: distribution\n\n### ** Examples\n\nx <- dbcnorm(c(-1000:1000)/200, 0, 1, 1)\nplot(c(-1000:1000)/200, x, type=\"l\")\n\nx <- pbcnorm(c(-1000:1000)/200, 0, 1, 1)\nplot(c(-1000:1000)/200, x, type=\"l\")\n\nqbcnorm(c(0.025,0.975), 0, c(1,2), 1)\n\nx <- rbcnorm(1000, 0, 1, 1)\nhist(x)\n\n\n\n"} {"package":"greybox","topic":"dfnorm","snippet":"### Name: dfnorm\n### Title: Folded Normal Distribution\n### Aliases: dfnorm FNormal pfnorm qfnorm rfnorm\n### Keywords: distribution\n\n### ** Examples\n\nx <- dfnorm(c(-1000:1000)/200, 0, 1)\nplot(x, type=\"l\")\n\nx <- pfnorm(c(-1000:1000)/200, 0, 1)\nplot(x, type=\"l\")\n\nqfnorm(c(0.025,0.975), 0, c(1,2))\n\nx <- rfnorm(1000, 0, 1)\nhist(x)\n\n\n\n"} {"package":"greybox","topic":"AICc","snippet":"### Name: AICc\n### Title: Corrected Akaike's Information Criterion and Bayesian\n### Information Criterion\n### Aliases: AICc BICc\n### Keywords: htest\n\n### ** Examples\n\n\nxreg <- cbind(rnorm(100,10,3),rnorm(100,50,5))\nxreg <- cbind(100+0.5*xreg[,1]-0.75*xreg[,2]+rnorm(100,0,3),xreg,rnorm(100,300,10))\ncolnames(xreg) <- c(\"y\",\"x1\",\"x2\",\"Noise\")\n\nourModel <- stepwise(xreg)\n\nAICc(ourModel)\nBICc(ourModel)\n\n\n\n"} {"package":"greybox","topic":"dlaplace","snippet":"### Name: dlaplace\n### Title: Laplace Distribution\n### Aliases: dlaplace Laplace plaplace qlaplace rlaplace\n### Keywords: distribution\n\n### ** Examples\n\nx <- dlaplace(c(-100:100)/10, 0, 1)\nplot(x, type=\"l\")\n\nx <- plaplace(c(-100:100)/10, 0, 1)\nplot(x, type=\"l\")\n\nqlaplace(c(0.025,0.975), 0, c(1,2))\n\nx <- rlaplace(1000, 0, 1)\nhist(x)\n\n\n\n"} {"package":"greybox","topic":"dlogitnorm","snippet":"### Name: dlogitnorm\n### Title: Logit Normal Distribution\n### Aliases: dlogitnorm LogitNormal plogitnorm qlogitnorm rlogitnorm\n### Keywords: distribution\n\n### ** Examples\n\nx <- dlogitnorm(c(-1000:1000)/200, 0, 1)\nplot(c(-1000:1000)/200, x, type=\"l\")\n\nx <- plogitnorm(c(-1000:1000)/200, 0, 1)\nplot(c(-1000:1000)/200, x, type=\"l\")\n\nqlogitnorm(c(0.025,0.975), 0, c(1,2))\n\nx <- rlogitnorm(1000, 0, 1)\nhist(x)\n\n\n\n"} {"package":"greybox","topic":"ds","snippet":"### Name: ds\n### Title: S Distribution\n### Aliases: ds SDistribution ps qs rs\n### Keywords: distribution\n\n### ** Examples\n\nx <- ds(c(-1000:1000)/10, 0, 1)\nplot(x, type=\"l\")\n\nx <- ps(c(-1000:1000)/10, 0, 1)\nplot(x, type=\"l\")\n\nqs(c(0.025,0.975), 0, 1)\n\nx <- rs(1000, 0, 1)\nhist(x)\n\n\n\n"} {"package":"greybox","topic":"dtplnorm","snippet":"### Name: dtplnorm\n### Title: Three Parameter Log Normal Distribution\n### Aliases: dtplnorm TPLNormal ptplnorm qtplnorm rtplnorm\n### Keywords: distribution\n\n### ** Examples\n\nx <- dtplnorm(c(-1000:1000)/200, 0, 1, 1)\nplot(c(-1000:1000)/200, x, type=\"l\")\n\nx <- ptplnorm(c(-1000:1000)/200, 0, 1, 1)\nplot(c(-1000:1000)/200, x, type=\"l\")\n\nqtplnorm(c(0.025,0.975), 0, c(1,2), 1)\n\nx <- rtplnorm(1000, 0, 1, 1)\nhist(x)\n\n\n\n"} {"package":"greybox","topic":"accuracy.greybox","snippet":"### Name: accuracy.greybox\n### Title: Error measures for an estimated model\n### Aliases: accuracy.greybox accuracy.predict.greybox\n\n### ** Examples\n\n\nxreg <- cbind(rlaplace(100,10,3),rnorm(100,50,5))\nxreg <- cbind(100+0.5*xreg[,1]-0.75*xreg[,2]+rlaplace(100,0,3),xreg,rnorm(100,300,10))\ncolnames(xreg) <- c(\"y\",\"x1\",\"x2\",\"Noise\")\n\nourModel <- alm(y~x1+x2+trend, xreg, subset=c(1:80), distribution=\"dlaplace\")\npredict(ourModel,xreg[-c(1:80),]) |>\n accuracy(xreg[-c(1:80),\"y\"])\n\n\n"} {"package":"greybox","topic":"actuals","snippet":"### Name: actuals\n### Title: Function extracts the actual values from the function\n### Aliases: actuals actuals.default actuals.lm actuals.alm\n### actuals.predict.greybox\n\n### ** Examples\n\n\nxreg <- cbind(rnorm(100,10,3),rnorm(100,50,5))\nxreg <- cbind(100+0.5*xreg[,1]-0.75*xreg[,2]+rnorm(100,0,3),xreg,rnorm(100,300,10))\ncolnames(xreg) <- c(\"y\",\"x1\",\"x2\",\"Noise\")\n\nourModel <- stepwise(xreg)\n\nactuals(ourModel)\n\n\n\n"} {"package":"greybox","topic":"alm","snippet":"### Name: alm\n### Title: Augmented Linear Model\n### Aliases: alm\n### Keywords: models nonlinear regression ts\n\n### ** Examples\n\n\n### An example with mtcars data and factors\nmtcars2 <- within(mtcars, {\n vs <- factor(vs, labels = c(\"V\", \"S\"))\n am <- factor(am, labels = c(\"automatic\", \"manual\"))\n cyl <- factor(cyl)\n gear <- factor(gear)\n carb <- factor(carb)\n})\n# The standard model with Log-Normal distribution\nourModel <- alm(mpg~., mtcars2[1:30,], distribution=\"dlnorm\")\nsummary(ourModel)\n## No test: \nplot(ourModel)\n## End(No test)\n# Produce table based on the output for LaTeX\nxtable(summary(ourModel))\n\n# Produce predictions with the one sided interval (upper bound)\npredict(ourModel, mtcars2[-c(1:30),], interval=\"p\", side=\"u\")\n\n# Model with heteroscedasticity (scale changes with the change of qsec)\n## No test: \nourModel <- alm(mpg~., mtcars2[1:30,], scale=~qsec)\n## End(No test)\n\n### Artificial data for the other examples\n## No test: \nxreg <- cbind(rlaplace(100,10,3),rnorm(100,50,5))\nxreg <- cbind(100+0.5*xreg[,1]-0.75*xreg[,2]+rlaplace(100,0,3),xreg,rnorm(100,300,10))\ncolnames(xreg) <- c(\"y\",\"x1\",\"x2\",\"Noise\")\n## End(No test)\n\n# An example with Laplace distribution\n## No test: \nourModel <- alm(y~x1+x2+trend, xreg, subset=c(1:80), distribution=\"dlaplace\")\nsummary(ourModel)\nplot(predict(ourModel,xreg[-c(1:80),]))\n## End(No test)\n\n# And another one with Asymmetric Laplace distribution (quantile regression)\n# with optimised alpha\n## No test: \nourModel <- alm(y~x1+x2, xreg, subset=c(1:80), distribution=\"dalaplace\")\n## End(No test)\n\n# An example with AR(1) order\n## No test: \nourModel <- alm(y~x1+x2, xreg, subset=c(1:80), distribution=\"dnorm\", orders=c(1,0,0))\nsummary(ourModel)\nplot(predict(ourModel,xreg[-c(1:80),]))\n## End(No test)\n\n### Examples with the count data\n## No test: \nxreg[,1] <- round(exp(xreg[,1]-70),0)\n## End(No test)\n\n# Negative Binomial distribution\n## No test: \nourModel <- alm(y~x1+x2, xreg, subset=c(1:80), distribution=\"dnbinom\")\nsummary(ourModel)\npredict(ourModel,xreg[-c(1:80),],interval=\"p\",side=\"u\")\n## End(No test)\n\n# Poisson distribution\n## No test: \nourModel <- alm(y~x1+x2, xreg, subset=c(1:80), distribution=\"dpois\")\nsummary(ourModel)\npredict(ourModel,xreg[-c(1:80),],interval=\"p\",side=\"u\")\n## End(No test)\n\n\n### Examples with binary response variable\n## No test: \nxreg[,1] <- round(xreg[,1] / (1 + xreg[,1]),0)\n## End(No test)\n\n# Logistic distribution (logit regression)\n## No test: \nourModel <- alm(y~x1+x2, xreg, subset=c(1:80), distribution=\"plogis\")\nsummary(ourModel)\nplot(predict(ourModel,xreg[-c(1:80),],interval=\"c\"))\n## End(No test)\n\n# Normal distribution (probit regression)\n## No test: \nourModel <- alm(y~x1+x2, xreg, subset=c(1:80), distribution=\"pnorm\")\nsummary(ourModel)\nplot(predict(ourModel,xreg[-c(1:80),],interval=\"p\"))\n## End(No test)\n\n\n\n"} {"package":"greybox","topic":"association","snippet":"### Name: association\n### Title: Measures of association\n### Aliases: association assoc\n### Keywords: htest\n\n### ** Examples\n\n\nassociation(mtcars)\n\n\n\n"} {"package":"greybox","topic":"coef.greybox","snippet":"### Name: coef.greybox\n### Title: Coefficients of the model and their statistics\n### Aliases: coef.greybox coef.alm confint.alm confint.scale vcov.alm\n### vcov.scale summary.alm\n### Keywords: models nonlinear regression ts\n\n### ** Examples\n\n# An example with ALM\nourModel <- alm(mpg~., mtcars, distribution=\"dlnorm\")\ncoef(ourModel)\nvcov(ourModel)\nconfint(ourModel)\nsummary(ourModel)\n\n\n\n"} {"package":"greybox","topic":"coefbootstrap","snippet":"### Name: coefbootstrap\n### Title: Bootstrap for parameters of models\n### Aliases: coefbootstrap coefbootstrap.lm coefbootstrap.alm\n### Keywords: models nonlinear regression ts\n\n### ** Examples\n\n# An example with ALM\nourModel <- alm(mpg~., mtcars, distribution=\"dlnorm\", loss=\"HAM\")\n# A fast example with 10 iterations. Use at least 1000 to get better results\ncoefbootstrap(ourModel, nsim=10)\n\n\n\n"} {"package":"greybox","topic":"cramer","snippet":"### Name: cramer\n### Title: Calculate Cramer's V for categorical variables\n### Aliases: cramer\n### Keywords: htest\n\n### ** Examples\n\n\ncramer(mtcars$am, mtcars$gear)\n\n\n\n"} {"package":"greybox","topic":"detectdst","snippet":"### Name: detectdst\n### Title: DST and Leap year detector functions\n### Aliases: detectdst detectleap\n### Keywords: models nonlinear regression ts\n\n### ** Examples\n\n# Generate matrix with monthly dummies for a zoo object\nx <- as.POSIXct(\"2004-01-01\")+0:(365*24*8)*60*60\ndetectdst(x)\ndetectleap(x)\n\n\n\n"} {"package":"greybox","topic":"determination","snippet":"### Name: determination\n### Title: Coefficients of determination\n### Aliases: determination determ\n### Keywords: models\n\n### ** Examples\n\n\n### Simple example\nxreg <- cbind(rnorm(100,10,3),rnorm(100,50,5))\nxreg <- cbind(100+0.5*xreg[,1]-0.75*xreg[,2]+rnorm(100,0,3),xreg,rnorm(100,300,10))\ncolnames(xreg) <- c(\"x1\",\"x2\",\"x3\",\"Noise\")\ndetermination(xreg)\n\n\n\n"} {"package":"greybox","topic":"ME","snippet":"### Name: ME\n### Title: Error measures\n### Aliases: ME Errors MAE MSE MRE MIS MPE MAPE MASE RMSSE rMAE rRMSE rAME\n### rMIS sMSE sPIS sCE sMIS GMRAE\n\n### ** Examples\n\n\n\ny <- rnorm(100,10,2)\ntestForecast <- rep(mean(y[1:90]),10)\n\nMAE(y[91:100],testForecast)\nMSE(y[91:100],testForecast)\n\nMPE(y[91:100],testForecast)\nMAPE(y[91:100],testForecast)\n\n# Measures from Petropoulos & Kourentzes (2015)\nMASE(y[91:100],testForecast,mean(abs(y[1:90])))\nsMSE(y[91:100],testForecast,mean(abs(y[1:90]))^2)\nsPIS(y[91:100],testForecast,mean(abs(y[1:90])))\nsCE(y[91:100],testForecast,mean(abs(y[1:90])))\n\n# Original MASE from Hyndman & Koehler (2006)\nMASE(y[91:100],testForecast,mean(abs(diff(y[1:90]))))\n\ntestForecast2 <- rep(y[91],10)\n# Relative measures, from and inspired by Davydenko & Fildes (2013)\nrMAE(y[91:100],testForecast2,testForecast)\nrRMSE(y[91:100],testForecast2,testForecast)\nrAME(y[91:100],testForecast2,testForecast)\nGMRAE(y[91:100],testForecast2,testForecast)\n\n#### Measures for the prediction intervals\n# An example with mtcars data\nourModel <- alm(mpg~., mtcars[1:30,], distribution=\"dnorm\")\nourBenchmark <- alm(mpg~1, mtcars[1:30,], distribution=\"dnorm\")\n\n# Produce predictions with the interval\nourForecast <- predict(ourModel, mtcars[-c(1:30),], interval=\"p\")\nourBenchmarkForecast <- predict(ourBenchmark, mtcars[-c(1:30),], interval=\"p\")\n\nMIS(mtcars$mpg[-c(1:30)],ourForecast$lower,ourForecast$upper,0.95)\nsMIS(mtcars$mpg[-c(1:30)],ourForecast$lower,ourForecast$upper,mean(mtcars$mpg[1:30]),0.95)\nrMIS(mtcars$mpg[-c(1:30)],ourForecast$lower,ourForecast$upper,\n ourBenchmarkForecast$lower,ourBenchmarkForecast$upper,0.95)\n\n### Also, see pinball function for other measures for the intervals\n\n\n\n"} {"package":"greybox","topic":"errorType","snippet":"### Name: errorType\n### Title: Functions that extracts type of error from the model\n### Aliases: errorType\n### Keywords: models nonlinear regression ts\n\n### ** Examples\n\n\nxreg <- cbind(rnorm(100,10,3),rnorm(100,50,5))\nxreg <- cbind(100+0.5*xreg[,1]-0.75*xreg[,2]+rnorm(100,0,3),xreg,rnorm(100,300,10))\ncolnames(xreg) <- c(\"y\",\"x1\",\"x2\",\"Noise\")\nourModel <- alm(y~x1+x2,as.data.frame(xreg))\n\nerrorType(ourModel)\n\n\n\n"} {"package":"greybox","topic":"extractScale","snippet":"### Name: extractScale\n### Title: Functions to extract scale and standard error from a model\n### Aliases: extractScale extractScale.default extractScale.greybox\n### extractSigma extractSigma.default extractSigma.greybox\n### Keywords: models nonlinear regression ts\n\n### ** Examples\n\n# Generate the data\nxreg <- cbind(rnorm(100,10,3),rnorm(100,50,5))\nxreg <- cbind(100+0.5*xreg[,1]-0.75*xreg[,2]+sqrt(exp(0.8+0.2*xreg[,1]))*rnorm(100,0,1),\n xreg,rnorm(100,300,10))\ncolnames(xreg) <- c(\"y\",\"x1\",\"x2\",\"Noise\")\n\n# Estimate the location and scale model\nourModel <- alm(y~., xreg, scale=~x1+x2)\n\n# Extract scale\nextractScale(ourModel)\n# Extract standard error\nextractSigma(ourModel)\n\n\n\n"} {"package":"greybox","topic":"dgnorm","snippet":"### Name: dgnorm\n### Title: The generalized normal distribution\n### Aliases: dgnorm pgnorm qgnorm rgnorm\n### Keywords: distribution\n\n### ** Examples\n\n# Density function values for standard normal distribution\nx <- dgnorm(seq(-1, 1, length.out = 100), 0, sqrt(2), 2)\nplot(x, type=\"l\")\n\n#CDF of standard Laplace\nx <- pgnorm(c(-100:100), 0, 1, 1)\nplot(x, type=\"l\")\n\n# Quantiles of S distribution\nqgnorm(c(0.025,0.975), 0, 1, 0.5)\n\n# Random numbers from a distribution with shape=10000 (approximately uniform)\nx <- rgnorm(1000, 0, 1, 1000)\nhist(x)\n\n\n\n"} {"package":"greybox","topic":"graphmaker","snippet":"### Name: graphmaker\n### Title: Linear graph construction function\n### Aliases: graphmaker\n### Keywords: graph linear plots\n\n### ** Examples\n\n\nxreg <- cbind(y=rnorm(100,100,10),x=rnorm(100,10,10))\nalmModel <- alm(y~x, xreg, subset=c(1:90))\nvalues <- predict(almModel, newdata=xreg[-c(1:90),], interval=\"prediction\")\n\ngraphmaker(xreg[,1],values$mean,fitted(values))\ngraphmaker(xreg[,1],values$mean,fitted(values),legend=FALSE)\ngraphmaker(xreg[,1],values$mean,fitted(values),legend=FALSE,lower=values$lower,upper=values$upper)\n\n# Produce the necessary ts objects from an arbitrary vectors\nactuals <- ts(c(1:10), start=c(2000,1), frequency=4)\nforecast <- ts(c(11:15),start=end(actuals)[1]+end(actuals)[2]*deltat(actuals),\n frequency=frequency(actuals))\ngraphmaker(actuals,forecast)\n\n# This should work as well\ngraphmaker(c(1:10),c(11:15))\n\n# This way you can add additional elements to the plot\ngraphmaker(c(1:10),c(11:15), parReset=FALSE)\npoints(c(1:15))\n# But don't forget to do dev.off() in order to reset the plotting area afterwards\n\n\n\n"} {"package":"greybox","topic":"greybox","snippet":"### Name: greybox\n### Title: Grey box\n### Aliases: greybox greybox-package\n### Keywords: models nonlinear regression ts\n\n### ** Examples\n\n\n## No test: \nxreg <- cbind(rnorm(100,10,3),rnorm(100,50,5))\nxreg <- cbind(100+0.5*xreg[,1]-0.75*xreg[,2]+rnorm(100,0,3),xreg,rnorm(100,300,10))\ncolnames(xreg) <- c(\"y\",\"x1\",\"x2\",\"Noise\")\n\nstepwise(xreg)\n## End(No test)\n\n\n\n"} {"package":"greybox","topic":"hm","snippet":"### Name: hm\n### Title: Half moment of a distribution and its derivatives.\n### Aliases: hm ham asymmetry extremity cextremity\n\n### ** Examples\n\n\nx <- rnorm(100,0,1)\nhm(x)\nham(x)\nasymmetry(x)\nextremity(x)\ncextremity(x)\n\n\n\n"} {"package":"greybox","topic":"implant","snippet":"### Name: implant\n### Title: Implant the scale model in the location model\n### Aliases: implant\n### Keywords: models nonlinear regression ts\n\n### ** Examples\n\n\nxreg <- cbind(rnorm(100,10,3),rnorm(100,50,5))\nxreg <- cbind(100+0.5*xreg[,1]-0.75*xreg[,2]+sqrt(exp(0.8+0.2*xreg[,1]))*rnorm(100,0,1),\n xreg,rnorm(100,300,10))\ncolnames(xreg) <- c(\"y\",\"x1\",\"x2\",\"Noise\")\n\n# Estimate the location model\nourModel <- alm(y~.,xreg)\n# Estimate the scale model\nourScale <- sm(ourModel,formula=~x1+x2)\n# Implant scale into location\nourModel <- implant(ourModel, ourScale)\n\n\n\n"} {"package":"greybox","topic":"is.greybox","snippet":"### Name: is.greybox\n### Title: Greybox classes checkers\n### Aliases: is.greybox is.alm is.occurrence is.greyboxC is.greyboxD\n### is.rollingOrigin is.rmc is.scale\n### Keywords: ts univar\n\n### ** Examples\n\n\nxreg <- cbind(rlaplace(100,10,3),rnorm(100,50,5))\nxreg <- cbind(100+0.5*xreg[,1]-0.75*xreg[,2]+rlaplace(100,0,3),xreg,rnorm(100,300,10))\ncolnames(xreg) <- c(\"y\",\"x1\",\"x2\",\"Noise\")\n\nourModel <- alm(y~x1+x2, xreg, distribution=\"dnorm\")\n\nis.alm(ourModel)\nis.greybox(ourModel)\nis.greyboxC(ourModel)\nis.greyboxD(ourModel)\n\n\n\n"} {"package":"greybox","topic":"lmCombine","snippet":"### Name: lmCombine\n### Title: Combine regressions based on information criteria\n### Aliases: lmCombine\n### Keywords: models nonlinear regression ts\n\n### ** Examples\n\n\n### Simple example\nxreg <- cbind(rnorm(100,10,3),rnorm(100,50,5))\nxreg <- cbind(100+0.5*xreg[,1]-0.75*xreg[,2]+rnorm(100,0,3),xreg,rnorm(100,300,10))\ncolnames(xreg) <- c(\"y\",\"x1\",\"x2\",\"Noise\")\ninSample <- xreg[1:80,]\noutSample <- xreg[-c(1:80),]\n# Combine all the possible models\nourModel <- lmCombine(inSample,bruteforce=TRUE)\npredict(ourModel,outSample)\nplot(predict(ourModel,outSample))\n\n### Fat regression example\nxreg <- matrix(rnorm(5000,10,3),50,100)\nxreg <- cbind(100+0.5*xreg[,1]-0.75*xreg[,2]+rnorm(50,0,3),xreg,rnorm(50,300,10))\ncolnames(xreg) <- c(\"y\",paste0(\"x\",c(1:100)),\"Noise\")\ninSample <- xreg[1:40,]\noutSample <- xreg[-c(1:40),]\n# Combine only the models close to the optimal\nourModel <- lmCombine(inSample, ic=\"BICc\",bruteforce=FALSE)\nsummary(ourModel)\nplot(predict(ourModel, outSample))\n\n# Combine in parallel - should increase speed in case of big data\n## Not run: \n##D ourModel <- lmCombine(inSample, ic=\"BICc\", bruteforce=TRUE, parallel=TRUE)\n##D summary(ourModel)\n##D plot(predict(ourModel, outSample))\n## End(Not run)\n\n\n\n"} {"package":"greybox","topic":"lmDynamic","snippet":"### Name: lmDynamic\n### Title: Combine regressions based on point information criteria\n### Aliases: lmDynamic\n### Keywords: models nonlinear regression ts\n\n### ** Examples\n\n\n### Simple example\nxreg <- cbind(rnorm(100,10,3),rnorm(100,50,5))\nxreg <- cbind(100+0.5*xreg[,1]-0.75*xreg[,2]+rnorm(100,0,3),xreg,rnorm(100,300,10))\ncolnames(xreg) <- c(\"y\",\"x1\",\"x2\",\"Noise\")\ninSample <- xreg[1:80,]\noutSample <- xreg[-c(1:80),]\n# Combine all the possible models\nourModel <- lmDynamic(inSample,bruteforce=TRUE)\npredict(ourModel,outSample)\nplot(predict(ourModel,outSample))\n\n\n\n"} {"package":"greybox","topic":"mcor","snippet":"### Name: mcor\n### Title: Multiple correlation\n### Aliases: mcor\n### Keywords: htest\n\n### ** Examples\n\n\nmcor(mtcars$am, mtcars$mpg)\n\n\n\n"} {"package":"greybox","topic":"measures","snippet":"### Name: measures\n### Title: Error measures for the provided forecasts\n### Aliases: measures\n\n### ** Examples\n\n\n\ny <- rnorm(100,10,2)\nourForecast <- rep(mean(y[1:90]),10)\n\nmeasures(y[91:100],ourForecast,y[1:90],digits=5)\n\n\n\n"} {"package":"greybox","topic":"nparam","snippet":"### Name: nparam\n### Title: Number of parameters and number of variates in the model\n### Aliases: nparam nvariate\n### Keywords: htest\n\n### ** Examples\n\n\n### Simple example\nxreg <- cbind(rnorm(100,10,3),rnorm(100,50,5))\nxreg <- cbind(100+0.5*xreg[,1]-0.75*xreg[,2]+rnorm(100,0,3),xreg,rnorm(100,300,10))\ncolnames(xreg) <- c(\"y\",\"x1\",\"x2\",\"Noise\")\nourModel <- lm(y~.,data=as.data.frame(xreg))\n\nnparam(ourModel)\nnvariate(ourModel)\n\n\n\n"} {"package":"greybox","topic":"outlierdummy","snippet":"### Name: outlierdummy\n### Title: Outlier detection and matrix creation\n### Aliases: outlierdummy outlierdummy.default outlierdummy.alm\n\n### ** Examples\n\n\n# Generate the data with S distribution\nxreg <- cbind(rnorm(100,10,3),rnorm(100,50,5))\nxreg <- cbind(100+0.5*xreg[,1]-0.75*xreg[,2]+rs(100,0,3),xreg)\ncolnames(xreg) <- c(\"y\",\"x1\",\"x2\")\n\n# Fit the normal distribution model\nourModel <- alm(y~x1+x2, xreg, distribution=\"dnorm\")\n\n# Detect outliers\nxregOutlierDummy <- outlierdummy(ourModel)\n\n\n\n"} {"package":"greybox","topic":"pcor","snippet":"### Name: pcor\n### Title: Partial correlations\n### Aliases: pcor\n### Keywords: htest\n\n### ** Examples\n\n\npcor(mtcars)\n\n\n\n"} {"package":"greybox","topic":"pinball","snippet":"### Name: pinball\n### Title: Pinball function\n### Aliases: pinball\n\n### ** Examples\n\n# An example with mtcars data\nourModel <- alm(mpg~., mtcars[1:30,], distribution=\"dnorm\")\n\n# Produce predictions with the interval\nourForecast <- predict(ourModel, mtcars[-c(1:30),], interval=\"p\")\n\n# Pinball with the L1 (quantile value)\npinball(mtcars$mpg[-c(1:30)],ourForecast$upper,level=0.975,loss=1)\npinball(mtcars$mpg[-c(1:30)],ourForecast$lower,level=0.025,loss=1)\n\n# Pinball with the L2 (expectile value)\npinball(mtcars$mpg[-c(1:30)],ourForecast$upper,level=0.975,loss=2)\npinball(mtcars$mpg[-c(1:30)],ourForecast$lower,level=0.025,loss=2)\n\n\n\n"} {"package":"greybox","topic":"plot.greybox","snippet":"### Name: plot.greybox\n### Title: Plots of the fit and residuals\n### Aliases: plot.greybox plot.alm\n### Keywords: ts univar\n\n### ** Examples\n\n\nxreg <- cbind(rlaplace(100,10,3),rnorm(100,50,5))\nxreg <- cbind(100+0.5*xreg[,1]-0.75*xreg[,2]+rlaplace(100,0,3),xreg,rnorm(100,300,10))\ncolnames(xreg) <- c(\"y\",\"x1\",\"x2\",\"Noise\")\n\nourModel <- alm(y~x1+x2, xreg, distribution=\"dnorm\")\n\npar(mfcol=c(4,4))\nplot(ourModel, c(1:14))\n\n\n\n"} {"package":"greybox","topic":"pAIC","snippet":"### Name: pAIC\n### Title: Point AIC\n### Aliases: pAIC pAICc pBIC pBICc\n### Keywords: htest\n\n### ** Examples\n\n\nxreg <- cbind(rnorm(100,10,3),rnorm(100,50,5))\nxreg <- cbind(100+0.5*xreg[,1]-0.75*xreg[,2]+rnorm(100,0,3),xreg,rnorm(100,300,10))\ncolnames(xreg) <- c(\"y\",\"x1\",\"x2\",\"Noise\")\nourModel <- alm(y~x1+x2,as.data.frame(xreg))\n\npAICValues <- pAIC(ourModel)\n\nmean(pAICValues)\nAIC(ourModel)\n\n\n\n"} {"package":"greybox","topic":"pointLik","snippet":"### Name: pointLik\n### Title: Point likelihood values\n### Aliases: pointLik\n### Keywords: htest\n\n### ** Examples\n\n\nxreg <- cbind(rnorm(100,10,3),rnorm(100,50,5))\nxreg <- cbind(100+0.5*xreg[,1]-0.75*xreg[,2]+rnorm(100,0,3),xreg,rnorm(100,300,10))\ncolnames(xreg) <- c(\"y\",\"x1\",\"x2\",\"Noise\")\nourModel <- alm(y~x1+x2,as.data.frame(xreg))\n\npointLik(ourModel)\n\n# Bias correction\npointLik(ourModel) - nparam(ourModel)\n\n# Bias correction in AIC style\n2*(nparam(ourModel)/nobs(ourModel) - pointLik(ourModel))\n\n# BIC calculation based on pointLik\nlog(nobs(ourModel))*nparam(ourModel) - 2*sum(pointLik(ourModel))\n\n\n\n"} {"package":"greybox","topic":"polyprod","snippet":"### Name: polyprod\n### Title: This function calculates parameters for the polynomials\n### Aliases: polyprod\n\n### ** Examples\n\n\n## Not run: polyprod(c(1,-2,-1),c(1,0.5,0.3))\n\n\n\n"} {"package":"greybox","topic":"predict.alm","snippet":"### Name: predict.alm\n### Title: Forecasting using greybox functions\n### Aliases: predict.alm predict.greybox forecast.greybox forecast.alm\n### predict.scale\n### Keywords: ts univar\n\n### ** Examples\n\n\nxreg <- cbind(rlaplace(100,10,3),rnorm(100,50,5))\nxreg <- cbind(100+0.5*xreg[,1]-0.75*xreg[,2]+rlaplace(100,0,3),xreg,rnorm(100,300,10))\ncolnames(xreg) <- c(\"y\",\"x1\",\"x2\",\"Noise\")\ninSample <- xreg[1:80,]\noutSample <- xreg[-c(1:80),]\n\nourModel <- alm(y~x1+x2, inSample, distribution=\"dlaplace\")\n\npredict(ourModel,outSample)\npredict(ourModel,outSample,interval=\"c\")\n\nplot(predict(ourModel,outSample,interval=\"p\"))\nplot(forecast(ourModel,h=10,interval=\"p\"))\n\n\n\n"} {"package":"greybox","topic":"drectnorm","snippet":"### Name: drectnorm\n### Title: Rectified Normal Distribution\n### Aliases: drectnorm rectNormal prectnorm qrectnorm rrectnorm\n### Keywords: distribution\n\n### ** Examples\n\nx <- drectnorm(c(-1000:1000)/200, 0, 1)\nplot(c(-1000:1000)/200, x, type=\"l\")\n\nx <- prectnorm(c(-1000:1000)/200, 0, 1)\nplot(c(-1000:1000)/200, x, type=\"l\")\n\nqrectnorm(c(0.025,0.975), 0, c(1,2))\n\nx <- rrectnorm(1000, 0, 1)\nhist(x)\n\n\n\n"} {"package":"greybox","topic":"rmcb","snippet":"### Name: rmcb\n### Title: Regression for Multiple Comparison with the Best\n### Aliases: rmcb plot.rmcb\n### Keywords: htest\n\n### ** Examples\n\nN <- 50\nM <- 4\nourData <- matrix(rnorm(N*M,mean=0,sd=1), N, M)\nourData[,2] <- ourData[,2]+4\nourData[,3] <- ourData[,3]+3\nourData[,4] <- ourData[,4]+2\ncolnames(ourData) <- c(\"Method A\",\"Method B\",\"Method C - long name\",\"Method D\")\nourTest <- rmcb(ourData, level=0.95)\n\n# See the mean ranks:\nourTest$mean\n# The same is for the intervals:\nourTest$interval\n\n# You can also reproduce plots in different styles:\nplot(ourTest, outplot=\"lines\")\n\n# Or you can use the default \"mcb\" style and set additional parameters for the plot():\npar(mar=c(2,2,4,0)+0.1)\nplot(ourTest, main=\"Four methods\")\n\n\n\n"} {"package":"greybox","topic":"ro","snippet":"### Name: ro\n### Title: Rolling Origin\n### Aliases: ro\n### Keywords: ts\n\n### ** Examples\n\n\ny <- rnorm(100,0,1)\nourCall <- \"predict(arima(x=data,order=c(0,1,1)),n.ahead=h)\"\n# NOTE that the \"data\" needs to be used in the call, not \"y\".\n# This way we tell the function, where \"y\" should be used in the call of the function.\n\n# The default call and values\nourValue <- \"pred\"\nourRO <- ro(y, h=5, origins=5, ourCall, ourValue)\n\n# We can now plot the results of this evaluation:\nplot(ourRO)\n\n# You can also use dolar sign\nourValue <- \"$pred\"\n# And you can have constant in-sample size\nro(y, h=5, origins=5, ourCall, ourValue, ci=TRUE)\n\n# You can ask for several values\nourValue <- c(\"pred\",\"se\")\n# And you can have constant holdout size\nro(y, h=5, origins=20, ourCall, ourValue, ci=TRUE, co=TRUE)\n\n#### The following code will give exactly the same result as above,\n#### but computed in parallel using all but 1 core of CPU:\n## Not run: ro(y, h=5, origins=20, ourCall, ourValue, ci=TRUE, co=TRUE, parallel=TRUE)\n\n#### If you want to use functions from forecast package, please note that you need to\n#### set the values that need to be returned explicitly. There are two options for this.\n# Example 1:\n## Not run: \n##D ourCall <- \"forecast(ets(data), h=h, level=95)\"\n##D ourValue <- c(\"mean\", \"lower\", \"upper\")\n##D ro(y,h=5,origins=5,ourCall,ourValue)\n## End(Not run)\n\n# Example 2:\n## Not run: \n##D ourCall <- \"forecast(ets(data), h=h, level=c(80,95))\"\n##D ourValue <- c(\"mean\", \"lower[,1]\", \"upper[,1]\", \"lower[,2]\", \"upper[,2]\")\n##D ro(y,h=5,origins=5,ourCall,ourValue)\n## End(Not run)\n\n#### A more complicated example using the for loop and\n#### several time series\nx <- matrix(rnorm(120*3,0,1), 120, 3)\n\n## Form an array for the forecasts we will produce\n## We will have 4 origins with 6-steps ahead forecasts\nourForecasts <- array(NA,c(6,4,3))\n\n## Define models that need to be used for each series\nourModels <- list(c(0,1,1), c(0,0,1), c(0,1,0))\n\n## This call uses specific models for each time series\nourCall <- \"predict(arima(data, order=ourModels[[i]]), n.ahead=h)\"\nourValue <- \"pred\"\n\n## Start the loop. The important thing here is to use the same variable 'i' as in ourCall.\nfor(i in 1:3){\n ourData <- x[,i]\n ourForecasts[,,i] <- ro(data=ourData,h=6,origins=4,call=ourCall,\n value=ourValue,co=TRUE,silent=TRUE)$pred\n}\n\n## ourForecasts array now contains rolling origin forecasts from specific\n## models.\n\n##### An example with exogenous variables\nx <- rnorm(100,0,1)\nxreg <- matrix(rnorm(200,0,1),100,2,dimnames=list(NULL,c(\"x1\",\"x2\")))\n\n## 'counti' is used to define in-sample size of xreg,\n## 'counto' - the size of the holdout sample of xreg\n\nourCall <- \"predict(arima(x=data, order=c(0,1,1), xreg=xreg[counti,,drop=FALSE]),\n n.ahead=h, newxreg=xreg[counto,,drop=FALSE])\"\nourValue <- \"pred\"\nro(x,h=5,origins=5,ourCall,ourValue)\n\n##### Poisson regression with alm\nx <- rpois(100,2)\nxreg <- cbind(x,matrix(rnorm(200,0,1),100,2,dimnames=list(NULL,c(\"x1\",\"x2\"))))\nourCall <- \"predict(alm(x~., data=xreg[counti,,drop=FALSE], distribution='dpois'),\n newdata=xreg[counto,,drop=FALSE])\"\nourValue <- \"mean\"\ntestRO <- ro(xreg[,1],h=5,origins=5,ourCall,ourValue,co=TRUE)\nplot(testRO)\n\n## 'countf' is used to take xreg of the size corresponding to the whole\n## sample on each iteration\n## This is useful when working with functions from smooth package.\n## The following call will return the forecasts from es() function of smooth.\n## Not run: \n##D ourCall <- \"es(data=data, h=h, xreg=xreg[countf,,drop=FALSE])\"\n##D ourValue <- \"forecast\"\n##D ro(x,h=5,origins=5,ourCall,ourValue)\n## End(Not run)\n\n\n\n"} {"package":"greybox","topic":"sm","snippet":"### Name: sm\n### Title: Scale Model\n### Aliases: sm sm.default sm.lm sm.alm\n### Keywords: models nonlinear regression ts\n\n### ** Examples\n\n\nxreg <- cbind(rnorm(100,10,3),rnorm(100,50,5))\nxreg <- cbind(100+0.5*xreg[,1]-0.75*xreg[,2]+sqrt(exp(0.8+0.2*xreg[,1]))*rnorm(100,0,1),\n xreg,rnorm(100,300,10))\ncolnames(xreg) <- c(\"y\",\"x1\",\"x2\",\"Noise\")\n\n# Estimate the location model\nourModel <- alm(y~.,xreg)\n# Estimate the scale model\nourScale <- sm(ourModel,formula=~x1+x2)\n# Summary of the scale model\nsummary(ourScale)\n\n\n\n"} {"package":"greybox","topic":"spread","snippet":"### Name: spread\n### Title: Construct scatterplot / boxplots for the data\n### Aliases: spread\n### Keywords: graph plots\n\n### ** Examples\n\n\n### Simple example\nspread(mtcars)\nspread(mtcars,log=TRUE)\n\n\n\n"} {"package":"greybox","topic":"stepwise","snippet":"### Name: stepwise\n### Title: Stepwise selection of regressors\n### Aliases: stepwise\n### Keywords: models nonlinear regression ts\n\n### ** Examples\n\n\n### Simple example\nxreg <- cbind(rnorm(100,10,3),rnorm(100,50,5))\nxreg <- cbind(100+0.5*xreg[,1]-0.75*xreg[,2]+rnorm(100,0,3),xreg,rnorm(100,300,10))\ncolnames(xreg) <- c(\"y\",\"x1\",\"x2\",\"Noise\")\nstepwise(xreg)\n\n### Mixture distribution of Log Normal and Cumulative Logit\nxreg[,1] <- xreg[,1] * round(exp(xreg[,1]-70) / (1 + exp(xreg[,1]-70)),0)\ncolnames(xreg) <- c(\"y\",\"x1\",\"x2\",\"Noise\")\nourModel <- stepwise(xreg, distribution=\"dlnorm\",\n occurrence=stepwise(xreg, distribution=\"plogis\"))\nsummary(ourModel)\n\n### Fat regression example\nxreg <- matrix(rnorm(20000,10,3),100,200)\nxreg <- cbind(100+0.5*xreg[,1]-0.75*xreg[,2]+rnorm(100,0,3),xreg,rnorm(100,300,10))\ncolnames(xreg) <- c(\"y\",paste0(\"x\",c(1:200)),\"Noise\")\nourModel <- stepwise(xreg,ic=\"AICc\")\nplot(ourModel$ICs,type=\"l\",ylim=range(min(ourModel$ICs),max(ourModel$ICs)+5))\npoints(ourModel$ICs)\ntext(c(1:length(ourModel$ICs))+0.1,ourModel$ICs+5,names(ourModel$ICs))\n\n\n\n"} {"package":"greybox","topic":"tableplot","snippet":"### Name: tableplot\n### Title: Construct a plot for categorical variable\n### Aliases: tableplot\n### Keywords: graph plots\n\n### ** Examples\n\n\ntableplot(mtcars$am, mtcars$gear)\n\n\n\n"} {"package":"greybox","topic":"temporaldummy","snippet":"### Name: temporaldummy\n### Title: Dummy variables for provided seasonality type\n### Aliases: temporaldummy temporaldummy.default temporaldummy.ts\n### temporaldummy.Date temporaldummy.POSIXt temporaldummy.zoo\n### Keywords: models nonlinear regression ts\n\n### ** Examples\n\n# Generate matrix with dummies for a ts object\nx <- ts(rnorm(100,100,1),frequency=12)\ntemporaldummy(x)\n\n# Generate matrix with monthly dummies for a zoo object\nx <- as.Date(\"2003-01-01\")+0:99\ntemporaldummy(x, type=\"month\", of=\"year\", h=10)\n\n\n\n"} {"package":"greybox","topic":"xregExpander","snippet":"### Name: xregExpander\n### Title: Exogenous variables expander\n### Aliases: xregExpander\n### Keywords: models nonlinear regression ts\n\n### ** Examples\n\n# Create matrix of two variables, make it ts object and expand it\nx <- cbind(rnorm(100,100,1),rnorm(100,50,3))\nx <- ts(x,frequency=12)\nxregExpander(x)\n\n\n\n"} {"package":"greybox","topic":"xregMultiplier","snippet":"### Name: xregMultiplier\n### Title: Exogenous variables cross-products\n### Aliases: xregMultiplier\n### Keywords: models nonlinear regression ts\n\n### ** Examples\n\n# Create matrix of two variables and expand it\nx <- cbind(rnorm(100,100,1),rnorm(100,50,3))\nxregMultiplier(x)\n\n\n\n"} {"package":"greybox","topic":"xregTransformer","snippet":"### Name: xregTransformer\n### Title: Exogenous variables transformer\n### Aliases: xregTransformer\n### Keywords: models nonlinear regression ts\n\n### ** Examples\n\n# Create matrix of two variables and expand it\nx <- cbind(rnorm(100,100,1),rnorm(100,50,3))\nxregTransformer(x)\n\n\n\n"} {"package":"divvy","topic":"bandit","snippet":"### Name: bandit\n### Title: Rarefy localities within latitudinal bands\n### Aliases: bandit\n\n### ** Examples\n\n# load bivalve occurrences to rasterise\nlibrary(terra)\ndata(bivalves)\n\n# initialise Equal Earth projected coordinates\nrWorld <- rast()\nprj <- 'EPSG:8857'\nrPrj <- project(rWorld, prj, res = 200000) # 200,000m is approximately 2 degrees\n\n# coordinate column names for the current and target coordinate reference system\nxyCartes <- c('paleolng','paleolat')\nxyCell <- c('centroidX','centroidY')\n\n# project occurrences and retrieve cell centroids in new coordinate system\nllOccs <- vect(bivalves, geom = xyCartes, crs = 'epsg:4326')\nprjOccs <- project(llOccs, prj)\ncellIds <- cells(rPrj, prjOccs)[,'cell']\nbivalves[, xyCell] <- xyFromCell(rPrj, cellIds)\n\n# subsample 20 equal-area sites within 10-degree bands of absolute latitude\nn <- 20\nreps <- 100\nset.seed(11)\nbandAbs <- bandit(dat = bivalves, xy = xyCell,\n iter = reps, nSite = n, output = 'full',\n bin = 10, absLat = TRUE,\n crs = prj\n)\nhead(bandAbs[[1]]) # inspect first subsample\nnames(bandAbs)[1] # degree interval (absolute value) of first subsample\n#> [1] \"[10,20)\"\nunique(names(bandAbs)) # all intervals containing sufficient data\n#> [1] \"[10,20)\" \"[20,30)\" \"[30,40)\" \"[40,50)\"\n# note insufficient coverage to subsample at equator or above 50 degrees\n\n# subsample 20-degree bands, where central band spans the equator\n# (-10 S to 10 N latitude), as in Allen et al. (2020)\n# (An alternative, finer-grain way to divide 180 degrees evenly into an\n# odd number of bands would be to set 'bin' = 4.)\nbandCent <- bandit(dat = bivalves, xy = xyCell,\n iter = reps, nSite = n, output = 'full',\n bin = 20, centr = TRUE, absLat = FALSE,\n crs = prj\n)\nunique(names(bandCent)) # all intervals containing sufficient data\n#> [1] \"[-50,-30)\" \"[10,30)\" \"[30,50)\"\n\n\n\n"} {"package":"divvy","topic":"classRast","snippet":"### Name: classRast\n### Title: Convert point environment data to a raster of\n### majority-environment classes\n### Aliases: classRast\n\n### ** Examples\n\nlibrary(terra)\n# work in Equal Earth projected coordinates\nprj <- 'EPSG:8857'\n# generate point occurrences in a small area of Northern Africa\nn <- 100\nset.seed(5)\nx <- runif(n, 0, 30)\ny <- runif(n, 10, 30)\n# generate an environmental variable with a latitudinal gradient\n# more habitat type 0 (e.g. rock) near equator, more 1 (e.g. grassland) to north\nenv <- rbinom(n, 1, prob = (y-10)/20)\nenv[env == 0] <- 'rock'\nenv[env == 1] <- 'grass'\n# units for Equal Earth are meters, so if we consider x and y as given in km,\nx <- x * 1000\ny <- y * 1000\nptsDf <- data.frame(x, y, env)\n# raster for study area at 5-km resolution\nr <- rast(resolution = 5*1000, crs = prj,\n xmin = 0, xmax = 30000, ymin = 10000, ymax = 30000)\n\nbinRast <- classRast(grid = r, dat = ptsDf, xy = c('x', 'y'),\n env = 'env', cutoff = 0.6)\nbinRast\n\n# plot environment classification vs. original points\nplot(binRast, col = c('lightgreen', 'grey60', 'white'))\npoints(ptsDf[env=='rock', ], pch = 16, cex = 1.2) # occurrences of given habitat\npoints(ptsDf[env=='grass',], pch = 1, cex = 1.2)\n\n# classRast can also accept more than 2 environmental classes:\n\n# add a 3rd environmental class with maximum occurrence in bottom-left grid cell\nnewEnv <- data.frame('x' = rep(0, 10),\n 'y' = rep(10000, 10),\n 'env' = rep('new', 10))\nptsDf <- rbind(ptsDf, newEnv)\nbinRast <- classRast(grid = r, dat = ptsDf, xy = c('x', 'y'),\n env = 'env', cutoff = 0.6)\nplot(binRast, col = c('lightgreen', 'grey60', 'purple', 'white'))\n\n\n\n"} {"package":"divvy","topic":"clustr","snippet":"### Name: clustr\n### Title: Cluster localities within regions of nearest neighbours\n### Aliases: clustr\n\n### ** Examples\n\n# generate occurrences: 10 lat-long points in modern Australia\nn <- 10\nx <- seq(from = 140, to = 145, length.out = n)\ny <- seq(from = -20, to = -25, length.out = n)\npts <- data.frame(x, y)\n\n# sample 5 sets of 4 locations no more than 400km across\nclustr(dat = pts, xy = 1:2, iter = 5,\n nSite = 4, distMax = 400)\n\n\n\n"} {"package":"divvy","topic":"cookies","snippet":"### Name: cookies\n### Title: Rarefy localities within circular regions of standard area\n### Aliases: cookies\n\n### ** Examples\n\n# generate occurrences: 10 lat-long points in modern Australia\nn <- 10\nx <- seq(from = 140, to = 145, length.out = n)\ny <- seq(from = -20, to = -25, length.out = n)\npts <- data.frame(x, y)\n\n# sample 5 sets of 3 occurrences within 200km radius\ncookies(dat = pts, xy = 1:2, iter = 5,\n nSite = 3, r = 200)\n\n\n\n"} {"package":"divvy","topic":"rangeSize","snippet":"### Name: rangeSize\n### Title: Calculate common metrics of spatial distribution\n### Aliases: rangeSize\n\n### ** Examples\n\n# generate 20 occurrences for a pseudo-species\n# centred on Yellowstone National Park (latitude-longitude)\n# normally distributed with a standard deviation ~110 km\nset.seed(2)\nx <- rnorm(20, 110.5885, 2)\ny <- rnorm(20, 44.4280, 1)\npts <- cbind(x,y)\n\nrangeSize(pts)\n\n\n\n"} {"package":"divvy","topic":"sdSumry","snippet":"### Name: sdSumry\n### Title: Calculate basic spatial coverage and diversity metrics\n### Aliases: sdSumry\n\n### ** Examples\n\n# generate occurrences\nset.seed(9)\nx <- sample(rep(1:5, 10))\ny <- sample(rep(1:5, 10))\n# make some species 2x or 4x as common\nabund <- c(rep(4, 5), rep(2, 5), rep(1, 10))\nsp <- sample(letters[1:20], 50, replace = TRUE, prob = abund)\nobs <- data.frame(x, y, sp)\n\n# minimum sample data returned\nsdSumry(obs, c('x','y'), 'sp')\n\n# also calculate evenness and coverage-based rarefaction diversity estimates\nsdSumry(obs, xy = c('x','y'), taxVar = 'sp', quotaQ = 0.7)\n\n\n\n"} {"package":"divvy","topic":"uniqify","snippet":"### Name: uniqify\n### Title: Find unique (taxon) occurrence records\n### Aliases: uniqify\n\n### ** Examples\n\n# generate occurrence data\nx <- rep(1, 10)\ny <- c(rep(1, 5), 2:6)\nsp <- c(rep(letters[1:3], 2),\n rep(letters[4:5], 2))\nobs <- data.frame(x, y, sp)\n\n# compare original and unique datasets:\n# rows 4 and 5 removed as duplicates of rows 1 and 2, respectively\nobs\nuniqify(obs, taxVar = 3, xy = 1:2)\n\n# using taxon identifications or other third variable is optional\nuniqify(obs, xy = c('x', 'y'))\n\n# caution - data outside the taxon and occurrence variables\n# will be lost where associated with duplicate occurrences\nobs$notes <- letters[11:20]\nuniqify(obs, 1:2, 3)\n# the notes 'n' and 'o' are absent in the output data\n\n\n\n"} {"package":"gensvm","topic":"coef.gensvm","snippet":"### Name: coef.gensvm\n### Title: Get the coefficients of the fitted GenSVM model\n### Aliases: coef.gensvm\n\n### ** Examples\n\nx <- iris[, -5]\ny <- iris[, 5]\n\nfit <- gensvm(x, y)\nV <- coef(fit)\n\n\n\n"} {"package":"gensvm","topic":"coef.gensvm.grid","snippet":"### Name: coef.gensvm.grid\n### Title: Get the parameter grid from a GenSVM Grid object\n### Aliases: coef.gensvm.grid\n\n### ** Examples\n\n## No test: \nx <- iris[, -5]\ny <- iris[, 5]\n\ngrid <- gensvm.grid(x, y)\npg <- coef(grid)\n## End(No test)\n\n\n\n"} {"package":"gensvm","topic":"fitted.gensvm","snippet":"### Name: fitted.gensvm\n### Title: Show fitted labels for the GenSVM model\n### Aliases: fitted.gensvm\n\n### ** Examples\n\nx <- iris[, -5]\ny <- iris[, 5]\n\n# fit GenSVM and compute training set predictions\nfit <- gensvm(x, y)\nyhat <- fitted(fit)\n\n# compute the accuracy with gensvm.accuracy\ngensvm.accuracy(y, yhat)\n\n\n\n"} {"package":"gensvm","topic":"fitted.gensvm.grid","snippet":"### Name: fitted.gensvm.grid\n### Title: Fitted labels for the GenSVMGrid class\n### Aliases: fitted.gensvm.grid\n\n### ** Examples\n\nx <- iris[, -5]\ny <- iris[, 5]\n\n# fit GenSVM and compute training set predictions\nfit <- gensvm(x, y)\nyhat <- fitted(fit)\n\n# compute the accuracy with gensvm.accuracy\ngensvm.accuracy(y, yhat)\n\n\n\n"} {"package":"gensvm","topic":"gensvm","snippet":"### Name: gensvm\n### Title: Fit the GenSVM model\n### Aliases: gensvm\n\n### ** Examples\n\nx <- iris[, -5]\ny <- iris[, 5]\n\n# fit using the default parameters and show progress\nfit <- gensvm(x, y, verbose=TRUE)\n\n# fit with some changed parameters\nfit <- gensvm(x, y, lambda=1e-6)\n\n# Early stopping defined through epsilon\nfit <- gensvm(x, y, epsilon=1e-3)\n\n# Early stopping defined through max.iter\nfit <- gensvm(x, y, max.iter=1000)\n\n# Nonlinear training\nfit <- gensvm(x, y, kernel='rbf', max.iter=1000)\nfit <- gensvm(x, y, kernel='poly', degree=2, gamma=1.0, max.iter=1000)\n\n# Setting the random seed and comparing results\nfit <- gensvm(x, y, random.seed=123, max.iter=1000)\nfit2 <- gensvm(x, y, random.seed=123, max.iter=1000)\nall.equal(coef(fit), coef(fit2))\n\n\n\n\n"} {"package":"gensvm","topic":"gensvm.accuracy","snippet":"### Name: gensvm.accuracy\n### Title: Compute the accuracy score\n### Aliases: gensvm.accuracy\n\n### ** Examples\n\nx <- iris[, -5]\ny <- iris[, 5]\n\nfit <- gensvm(x, y)\ngensvm.accuracy(predict(fit, x), y)\n\n\n\n"} {"package":"gensvm","topic":"gensvm.grid","snippet":"### Name: gensvm.grid\n### Title: Cross-validated grid search for GenSVM\n### Aliases: gensvm.grid\n\n### ** Examples\n\nx <- iris[, -5]\ny <- iris[, 5]\n\n## No test: \n# use the default parameter grid\ngrid <- gensvm.grid(x, y, verbose=TRUE)\n## End(No test)\n\n# use a smaller parameter grid\npg <- expand.grid(p=c(1.0, 1.5, 2.0), kappa=c(-0.9, 1.0), epsilon=c(1e-3))\ngrid <- gensvm.grid(x, y, param.grid=pg)\n\n# print the result\nprint(grid)\n\n## No test: \n# Using a custom scoring function (accuracy as percentage)\nacc.pct <- function(yt, yp) { return (100 * sum(yt == yp) / length(yt)) }\ngrid <- gensvm.grid(x, y, scoring=acc.pct)\n\n# With RBF kernel and very verbose progress printing\npg <- expand.grid(kernel=c('rbf'), gamma=c(1e-2, 1e-1, 1, 1e1, 1e2),\n lambda=c(1e-8, 1e-6), max.iter=c(5000))\ngrid <- gensvm.grid(x, y, param.grid=pg, verbose=2)\n## End(No test)\n\n\n\n"} {"package":"gensvm","topic":"gensvm.maxabs.scale","snippet":"### Name: gensvm.maxabs.scale\n### Title: Scale each column of a matrix by its maximum absolute value\n### Aliases: gensvm.maxabs.scale\n\n### ** Examples\n\nx <- iris[, -5]\n\n# check the min and max of the columns\napply(x, 2, min)\napply(x, 2, max)\n\n# scale the data\nx.scale <- gensvm.maxabs.scale(x)\n\n# check again (max should be 1.0, min shouldn't be below -1)\napply(x.scale, 2, min)\napply(x.scale, 2, max)\n\n# with a train and test dataset\nsplit <- gensvm.train.test.split(x)\nx.train <- split$x.train\nx.test <- split$x.test\nscaled <- gensvm.maxabs.scale(x.train, x.test)\nx.train.scl <- scaled$x\nx.test.scl <- scaled$x.test\n\n\n\n"} {"package":"gensvm","topic":"gensvm.refit","snippet":"### Name: gensvm.refit\n### Title: Train an already fitted model on new data\n### Aliases: gensvm.refit\n\n### ** Examples\n\nx <- iris[, -5]\ny <- iris[, 5]\n\n# fit a standard model and refit with slightly different parameters\nfit <- gensvm(x, y)\nfit2 <- gensvm.refit(fit, x, y, epsilon=1e-8)\n\n## No test: \n# refit a model returned by a grid search\ngrid <- gensvm.grid(x, y)\nfit <- gensvm.refit(fit, x, y, epsilon=1e-8)\n## End(No test)\n\n# refit on different data\nidx <- runif(nrow(x)) > 0.5\nx1 <- x[idx, ]\nx2 <- x[!idx, ]\ny1 <- y[idx]\ny2 <- y[!idx]\n\nfit1 <- gensvm(x1, y1)\nfit2 <- gensvm.refit(fit1, x2, y2)\n\n\n\n"} {"package":"gensvm","topic":"gensvm.train.test.split","snippet":"### Name: gensvm.train.test.split\n### Title: Create a train/test split of a dataset\n### Aliases: gensvm.train.test.split\n\n### ** Examples\n\nx <- iris[, -5]\ny <- iris[, 5]\n\n# using the default values\nsplit <- gensvm.train.test.split(x, y)\n\n# using the split in a GenSVM model\nfit <- gensvm(split$x.train, split$y.train)\ngensvm.accuracy(split$y.test, predict(fit, split$x.test))\n\n# using attach makes the results directly available\nattach(gensvm.train.test.split(x, y))\nfit <- gensvm(x.train, y.train)\ngensvm.accuracy(y.test, predict(fit, x.test))\n\n\n\n"} {"package":"gensvm","topic":"plot.gensvm","snippet":"### Name: plot.gensvm\n### Title: Plot the simplex space of the fitted GenSVM model\n### Aliases: plot.gensvm\n\n### ** Examples\n\nx <- iris[, -5]\ny <- iris[, 5]\n\n# train the model\nfit <- gensvm(x, y)\n\n# plot the simplex space\nplot(fit)\n\n# plot and use the true colors (easier to spot misclassified samples)\nplot(fit, y)\n\n# plot only misclassified samples\nx.mis <- x[predict(fit) != y, ]\ny.mis.true <- y[predict(fit) != y]\nplot(fit, newdata=x.mis)\nplot(fit, y.mis.true, newdata=x.mis)\n\n# plot a 2-d model\nxx <- x[y %in% c('versicolor', 'virginica'), ]\nyy <- y[y %in% c('versicolor', 'virginica')]\nfit <- gensvm(xx, yy, kernel='rbf', max.iter=1000)\nplot(fit)\n\n\n\n"} {"package":"gensvm","topic":"plot.gensvm.grid","snippet":"### Name: plot.gensvm.grid\n### Title: Plot the simplex space of the best fitted model in the\n### GenSVMGrid\n### Aliases: plot.gensvm.grid\n\n### ** Examples\n\n## No test: \nx <- iris[, -5]\ny <- iris[, 5]\n\ngrid <- gensvm.grid(x, y)\nplot(grid, x)\n## End(No test)\n\n\n\n"} {"package":"gensvm","topic":"predict.gensvm","snippet":"### Name: predict.gensvm\n### Title: Predict class labels with the GenSVM model\n### Aliases: predict.gensvm\n\n### ** Examples\n\nx <- iris[, -5]\ny <- iris[, 5]\n\n# create a training and test sample\nattach(gensvm.train.test.split(x, y))\nfit <- gensvm(x.train, y.train)\n\n# predict the class labels of the test sample\ny.test.pred <- predict(fit, x.test)\n\n# compute the accuracy with gensvm.accuracy\ngensvm.accuracy(y.test, y.test.pred)\n\n\n\n"} {"package":"gensvm","topic":"predict.gensvm.grid","snippet":"### Name: predict.gensvm.grid\n### Title: Predict class labels from the GenSVMGrid class\n### Aliases: predict.gensvm.grid\n\n### ** Examples\n\n## No test: \nx <- iris[, -5]\ny <- iris[, 5]\n\n# run a grid search\ngrid <- gensvm.grid(x, y)\n\n# predict training sample\ny.hat <- predict(grid, x)\n## End(No test)\n\n\n\n"} {"package":"gensvm","topic":"print.gensvm","snippet":"### Name: print.gensvm\n### Title: Print the fitted GenSVM model\n### Aliases: print.gensvm\n\n### ** Examples\n\nx <- iris[, -5]\ny <- iris[, 5]\n\n# fit and print the model\nfit <- gensvm(x, y)\nprint(fit)\n\n# (advanced) use the fact that print returns the fitted model\nfit <- gensvm(x, y)\npredict(print(fit), x)\n\n\n\n"} {"package":"gensvm","topic":"print.gensvm.grid","snippet":"### Name: print.gensvm.grid\n### Title: Print the fitted GenSVMGrid model\n### Aliases: print.gensvm.grid\n\n### ** Examples\n\n## No test: \nx <- iris[, -5]\ny <- iris[, 5]\n\n# fit a grid search and print the resulting object\ngrid <- gensvm.grid(x, y)\nprint(grid)\n## End(No test)\n\n\n\n"} {"package":"cgmanalysis","topic":"cgmreport","snippet":"### Name: cgmreport\n### Title: Generate AGP\n### Aliases: cgmreport\n\n### ** Examples\n\ncgmreport(system.file(\"extdata\",\"Cleaned\",package = \"cgmanalysis\"))\n\n\n"} {"package":"cgmanalysis","topic":"cgmvariables","snippet":"### Name: cgmvariables\n### Title: Calculate CGM Variables\n### Aliases: cgmvariables\n\n### ** Examples\n\ncgmvariables(system.file(\"extdata\",\"Cleaned\",package = \"cgmanalysis\"))\n\n\n"} {"package":"cgmanalysis","topic":"cleandata","snippet":"### Name: cleandata\n### Title: Clean CGM Data\n### Aliases: cleandata\n\n### ** Examples\n\n## Not run: \n##D cleandata(system.file(\"extdata\", \"De-identified\",\n##D package = \"cgmanalysis\"\n##D ))\n## End(Not run)\n\n\n"} {"package":"furrr","topic":"furrr_options","snippet":"### Name: furrr_options\n### Title: Options to fine tune furrr\n### Aliases: furrr_options\n\n### ** Examples\n\nfurrr_options()\n\n\n"} {"package":"furrr","topic":"future_imap","snippet":"### Name: future_imap\n### Title: Apply a function to each element of a vector, and its index via\n### futures\n### Aliases: future_imap future_imap_chr future_imap_dbl future_imap_int\n### future_imap_lgl future_imap_raw future_imap_dfr future_imap_dfc\n### future_iwalk\n\n### ** Examples\n\n## No test: \nplan(multisession, workers = 2)\n## End(No test)\n\nfuture_imap_chr(sample(10), ~ paste0(.y, \": \", .x))\n\n## Don't show: \n# Close open connections for R CMD Check\nif (!inherits(plan(), \"sequential\")) plan(sequential)\n## End(Don't show)\n\n\n"} {"package":"furrr","topic":"future_invoke_map","snippet":"### Name: future_invoke_map\n### Title: Invoke functions via futures\n### Aliases: future_invoke_map future_invoke_map_chr future_invoke_map_dbl\n### future_invoke_map_int future_invoke_map_lgl future_invoke_map_raw\n### future_invoke_map_dfr future_invoke_map_dfc\n\n### ** Examples\n\n## No test: \nplan(multisession, workers = 2)\n## End(No test)\n\ndf <- dplyr::tibble(\n f = c(\"runif\", \"rpois\", \"rnorm\"),\n params = list(\n list(n = 10),\n list(n = 5, lambda = 10),\n list(n = 10, mean = -3, sd = 10)\n )\n)\n\nfuture_invoke_map(df$f, df$params, .options = furrr_options(seed = 123))\n\n## Don't show: \n# Close open connections for R CMD Check\nif (!inherits(plan(), \"sequential\")) plan(sequential)\n## End(Don't show)\n\n\n"} {"package":"furrr","topic":"future_map","snippet":"### Name: future_map\n### Title: Apply a function to each element of a vector via futures\n### Aliases: future_map future_map_chr future_map_dbl future_map_int\n### future_map_lgl future_map_raw future_map_dfr future_map_dfc\n### future_walk\n\n### ** Examples\n\nlibrary(magrittr)\n## No test: \nplan(multisession, workers = 2)\n## End(No test)\n\n1:10 %>%\n future_map(rnorm, n = 10, .options = furrr_options(seed = 123)) %>%\n future_map_dbl(mean)\n\n# If each element of the output is a data frame, use\n# `future_map_dfr()` to row-bind them together:\nmtcars %>%\n split(.$cyl) %>%\n future_map(~ lm(mpg ~ wt, data = .x)) %>%\n future_map_dfr(~ as.data.frame(t(as.matrix(coef(.)))))\n\n\n# You can be explicit about what gets exported to the workers.\n# To see this, use multisession (not multicore as the forked workers\n# still have access to this environment)\n## No test: \nplan(multisession)\n## End(No test)\nx <- 1\ny <- 2\n\n# This will fail, y is not exported (no black magic occurs)\ntry(future_map(1, ~y, .options = furrr_options(globals = \"x\")))\n\n# y is exported\nfuture_map(1, ~y, .options = furrr_options(globals = \"y\"))\n\n## Don't show: \n# Close open connections for R CMD Check\nif (!inherits(plan(), \"sequential\")) plan(sequential)\n## End(Don't show)\n\n\n"} {"package":"furrr","topic":"future_map2","snippet":"### Name: future_map2\n### Title: Map over multiple inputs simultaneously via futures\n### Aliases: future_map2 future_map2_chr future_map2_dbl future_map2_int\n### future_map2_lgl future_map2_raw future_map2_dfr future_map2_dfc\n### future_pmap future_pmap_chr future_pmap_dbl future_pmap_int\n### future_pmap_lgl future_pmap_raw future_pmap_dfr future_pmap_dfc\n### future_walk2 future_pwalk\n\n### ** Examples\n\n## No test: \nplan(multisession, workers = 2)\n## End(No test)\n\nx <- list(1, 10, 100)\ny <- list(1, 2, 3)\nz <- list(5, 50, 500)\n\nfuture_map2(x, y, ~ .x + .y)\n\n# Split into pieces, fit model to each piece, then predict\nby_cyl <- split(mtcars, mtcars$cyl)\nmods <- future_map(by_cyl, ~ lm(mpg ~ wt, data = .))\nfuture_map2(mods, by_cyl, predict)\n\nfuture_pmap(list(x, y, z), sum)\n\n# Matching arguments by position\nfuture_pmap(list(x, y, z), function(a, b ,c) a / (b + c))\n\n# Vectorizing a function over multiple arguments\ndf <- data.frame(\n x = c(\"apple\", \"banana\", \"cherry\"),\n pattern = c(\"p\", \"n\", \"h\"),\n replacement = c(\"x\", \"f\", \"q\"),\n stringsAsFactors = FALSE\n)\n\nfuture_pmap(df, gsub)\nfuture_pmap_chr(df, gsub)\n\n## Don't show: \n# Close open connections for R CMD Check\nif (!inherits(plan(), \"sequential\")) plan(sequential)\n## End(Don't show)\n\n\n"} {"package":"furrr","topic":"future_map_if","snippet":"### Name: future_map_if\n### Title: Apply a function to each element of a vector conditionally via\n### futures\n### Aliases: future_map_if future_map_at\n\n### ** Examples\n\n## No test: \nplan(multisession, workers = 2)\n## End(No test)\n\n# Modify the even elements\nfuture_map_if(1:5, ~.x %% 2 == 0L, ~ -1)\n\nfuture_map_at(1:5, c(1, 5), ~ -1)\n## Don't show: \n# Close open connections for R CMD Check\nif (!inherits(plan(), \"sequential\")) plan(sequential)\n## End(Don't show)\n\n\n"} {"package":"furrr","topic":"future_modify","snippet":"### Name: future_modify\n### Title: Modify elements selectively via futures\n### Aliases: future_modify future_modify_at future_modify_if\n\n### ** Examples\n\nlibrary(magrittr)\n## No test: \nplan(multisession, workers = 2)\n## End(No test)\n\n# Convert each col to character, in parallel\nfuture_modify(mtcars, as.character)\n\niris %>%\n future_modify_if(is.factor, as.character) %>%\n str()\n\nmtcars %>%\n future_modify_at(c(1, 4, 5), as.character) %>%\n str()\n\n## Don't show: \n# Close open connections for R CMD Check\nif (!inherits(plan(), \"sequential\")) plan(sequential)\n## End(Don't show)\n\n\n"} {"package":"furrr","topic":"future_options","snippet":"### Name: future_options\n### Title: Deprecated furrr options\n### Aliases: future_options\n### Keywords: internal\n\n### ** Examples\n\ntry(future_options())\n\n\n"} {"package":"mixtox","topic":"BMD","snippet":"### Name: BMD\n### Title: Calculating benchmark dose (BMD) and lower limit of benchmark\n### dose (BMDL)\n### Aliases: BMD\n### Keywords: BMD BMDL\n\n### ** Examples\n\n## example 1\n# calcualte the BMD of heavy metal Ni(2+) on the MCF-7 cells \nx <- cytotox$Ni$x\nrspn <- cytotox$Ni$y\nobj <- curveFit(x, rspn, eq = 'Logit', param = c(12, 3), effv = c(0.05, 0.5), rtype = 'quantal')\nBMD(obj, bmr = 0.10, backg = 0, def = 'additional', eq = 'default', sigLev = 0.05, ci = 'CI')\n\n\n"} {"package":"mixtox","topic":"CEx","snippet":"### Name: CEx\n### Title: Effect Calculation for All Ninteen Curves\n### Aliases: CEx\n### Keywords: effect concentration inverse function\n\n### ** Examples\n\n## example 1\n# calculate the responses of hormesis curves at the concentration of 0.1 and 0.02 mol/L\nmodel <- hormesis$sgl$model\nparam <- hormesis$sgl$param\nCEx(model, param, conc = c(0.1, 0.02)) \n\n## example 2\n# calculate the effect caused by four heavy metals and four ionic liquids at the concentration of\n# 0.00001 and 0.00002 mol/L on the MCF-7 cells\nmodel <- cytotox$sgl$model\nparam <- cytotox$sgl$param\nCEx(model, param, conc = c(0.00001, 0.00002)) \n\n## example 3\n# calculate the response ranges\nmodel <- hormesis$sgl$model\nparam <- hormesis$sgl$param\nCEx(model, param, conc = c(0, 1e20))\n\n\n"} {"package":"mixtox","topic":"ECx","snippet":"### Name: ECx\n### Title: Effect Concentration Calculation for Sigmoidal Models\n### Aliases: ECx\n### Keywords: effect concentration concentration-response curve\n\n### ** Examples\n\n## example 1\n# calculate EC5 and EC50 of seven antibiotics on the photobacteria\nmodel <- antibiotox$sgl$model\nparam <- antibiotox$sgl$param\neffv <- c(0.05, 0.5)\nECx(model, param, effv = c(0.05, 0.50))\n\n## example 2\n# calculate EC5 and EC50 of four heavy metals and four ionic liquids on the MCF-7 cells\nmodel <- cytotox$sgl$model\nparam <- cytotox$sgl$param\nECx(model, param, effv = c(0.05, 0.50), rtype = 'quantal')\n\n\n"} {"package":"mixtox","topic":"NOEC","snippet":"### Name: NOEC\n### Title: NOEC and LOEC Calculation\n### Aliases: NOEC\n### Keywords: NOEC LOEC\n\n### ** Examples\n\n## example 1\n# calcualte the NOEC and LOEC of heavy metal Ni(2+) on the MCF-7 cells at the default significance \n# level of 0.05\nx <- cytotox$Ni$x\nrspn <- cytotox$Ni$y\nNOEC(x, rspn)\n\n## example 2\n# calcualte the NOEC and LOEC of Neomycin sulfate on the phtotobacteria at the significance \n# level of 0.01\nx <- antibiotox$NEO$x\nrspn <- antibiotox$NEO$y\nNOEC(x, rspn, sigLev = 0.01)\n\n\n"} {"package":"mixtox","topic":"antibiotox","snippet":"### Name: antibiotox\n### Title: Toxicity of Seven Antibiotics on Photobacteria\n### Aliases: antibiotox\n### Keywords: quantal responses continuous responses phtotobacteria\n\n### ** Examples\n\n# example 1\n## Retrive the toxicity information of PAR on photobacteria.\nantibiotox$PAR\n# example 2\n## Retrive the toxicity information of two eecr mixtures on photobacteria.\nantibiotox$eecr.mix\n\n\n"} {"package":"mixtox","topic":"caPred","snippet":"### Name: caPred\n### Title: Mixture Toxicity Prediction Based on Concentration Addition\n### Aliases: caPred\n### Keywords: concentration addition equal effect concentration ratio\n### uniform design concentration ratio arbitrary concentration ratio\n### uniform design table mixture effect\n\n### ** Examples\n\n## example 1\n# using CA to predict the toxicity of mixture designed by eecr at the\n# effect concentration of EC05 and EC50\n# eecr mixture design is based on seven antibiotics(factors).\nmodel <- antibiotox$sgl$model\nparam <- antibiotox$sgl$param\ncaPred(model, param, mixType = \"eecr\", effv = c(0.05, 0.5))\n\n## example 2\n# using CA to predict the mixtures designed by udcr\n# the udcr mixture design is based on four heavy metals and four ionic liquids (eight factors).\n# five levels (EC05, EC10, EC20, EC30, and EC50 ) are allocated in the uniform table using the\n# pseudo-level technique (Liang et al., 2001)\nmodel <- cytotox$sgl$model\nparam <- cytotox$sgl$param\neffv <- c(0.05, 0.05, 0.10, 0.10, 0.20, 0.20, 0.30, 0.30, 0.50, 0.50)\ncaPred(model, param, mixType = \"udcr\", effv)\n\n## example 3\n# using CA to predict the mixtures designed by acr\n# the udcr mixture design is based on five antibiotics (five factors).\n# the every component in the mixture shares exactly the same ratio (0.20) \nmodel <- antibiotox$sgl$model[1 : 5]\nparam <- antibiotox$sgl$param[1 : 5, ]\neffv <- c(0.2, 0.2, 0.2, 0.2, 0.2)\ncaPred(model, param, mixType = \"acr\", effv)\n\n\n"} {"package":"mixtox","topic":"curveFit","snippet":"### Name: curveFit\n### Title: Curve Fitting\n### Aliases: curveFit\n### Keywords: curve fitting dose-response curve goodness of fit quantal\n### dose-reponses continuous dose-reponses hormesis dose-reponses\n### response range effect concentration\n\n### ** Examples\n\n## example 1\n# Fit hormesis dose-response data.\n# Calculate the concentrations that cause 5% of 50% inhibition.\nx <- hormesis$OmimCl$x\nrspn <- hormesis$OmimCl$y\ncurveFit(x, rspn, eq = 'Biphasic', param = c(-0.34, 0.001, 884, 0.01, 128), \n\t\t\teffv = 0.5, rtype = 'hormesis')\n\nx <- hormesis$HmimCl$x\nrspn <- hormesis$HmimCl$y\ncurveFit(x, rspn, eq = 'Biphasic', param = c(-0.59, 0.001, 160,0.05, 19), \n\t\t\teffv = c(0.05, 0.5), rtype = 'hormesis')\n\nx <- hormesis$ACN$x\nrspn <- hormesis$ACN$y\ncurveFit(x, rspn, eq = 'Brain_Consens', param = c(2.5, 2.8, 0.6, 2.44), \n\t\t\teffv = c(0.05, 0.5), rtype = 'hormesis')\n\nx <- hormesis$Acetone$x\nrspn <- hormesis$Acetone$y\ncurveFit(x, rspn, eq = 'BCV', param = c(1.0, 3.8, 0.6, 2.44), effv = c(0.05, 0.5), \n\t\t\trtype = 'hormesis')\n\n## example 2\n# Fit quantal dose-responses: the inhibition of heavy metal Ni(2+) on the growth of MCF-7 cells.\n# Calculate the concentrations that cause 5% and 50% inhibition. \nx <- cytotox$Ni$x\nrspn <- cytotox$Ni$y\ncurveFit(x, rspn, eq = 'Logit', param = c(12, 3), effv = c(0.05, 0.5), rtype = 'quantal')\n\n## example 3\n# Fit quantal dose-responses: the inhibition effect of Paromomycin Sulfate (PAR) on photobacteria.\n# Calculate the concentrations that cause 5% and 50% inhibition.\nx <- antibiotox$PAR$x\nrspn <- antibiotox$PAR$y\ncurveFit(x, rspn, eq = 'Logit', param = c(26, 4), effv = c(0.05, 0.5))\n\n\n"} {"package":"mixtox","topic":"cytotox","snippet":"### Name: cytotox\n### Title: Cytotoxicity of Heavy Metal Ions and Ionic Liquids on MCF-7\n### Aliases: cytotox\n### Keywords: quantal responses continuous responses cytotoxicity\n\n### ** Examples\n\n# example 1\n## Retrive the toxicity data of Ni on MCF-7.\ncytotox$Ni\n\n# example 2\n## Retrive the toxicity information of ten udcr mixtures on MCF-7.\ncytotox$udcr.mix\n\n\n"} {"package":"mixtox","topic":"ecaPred","snippet":"### Name: ecaPred\n### Title: Mixture Effect Predicted by CA at Particular Concentration of a\n### Mixture\n### Aliases: ecaPred\n### Keywords: concentration addition mixture effect\n\n### ** Examples\n\n## example\n# predict the CA predicted response at the concentrations that cause 5%, 10%, 20%, and 50% \n# effect of antibiotic mixtures\n# each mixture contains eight components. Totally, there are 10 mixtures designed by the udcr.\n\nsgl <- antibiotox$sgl\nmix <- antibiotox$udcr.mix\npct <- antibiotox$udcr.pct\necaPred(effv = c(0.05, 0.1, 0.20, 0.5), sgl, mix, pct)\n\n\n"} {"package":"mixtox","topic":"eiaPred","snippet":"### Name: eiaPred\n### Title: Mixture Effect Predicted by IA at Particular Concentration of a\n### Mixture\n### Aliases: eiaPred\n### Keywords: independent action mixture effect\n\n### ** Examples\n\n## example 1\n# predict the IA predicted response (cytotoxicity) at the concentrations that cause 10% and 50% \n# effect of an mixture. \n# each mixture contains eight components. Totally, there are 10 mixtures designed by the udcr.\n\nsgl <- cytotox$sgl\nmix <- cytotox$udcr.mix\npct <- cytotox$udcr.pct\neiaPred(effv = c(0.1, 0.5), sgl, mix, pct)\n\n## example 2\n# predict the IA predicted response at the concentrations that cause 5% and 50% effect \n# of antibiotic mixtures.\n# each mixture contains eight components. Totally, there are 2 mixtures designed by the eecr.\n\nsgl <- antibiotox$sgl\nmix <- antibiotox$eecr.mix\npct <- antibiotox$eecr.pct\neiaPred(effv = c(0.05, 0.5), sgl, mix, pct)\n\n## example 2\n# predict the IA predicted response at the concentrations that cause 5%, 10%, 20%, and \n# 50% effect of antibiotic mixtures.\n# each mixture contains eight components. Totally, there are 10 mixtures designed by the udcr.\n\nsgl <- antibiotox$sgl\nmix <- antibiotox$udcr.mix\npct <- antibiotox$udcr.pct\neiaPred(effv = c(0.05, 0.10, 0.20, 0.5), sgl, mix, pct)\n\n\n"} {"package":"mixtox","topic":"figPlot","snippet":"### Name: figPlot\n### Title: Ploting concentration response curve\n### Aliases: figPlot\n### Keywords: concentration response curve\n\n### ** Examples\n\n## example 1\n# \nx <- antibiotox$PAR$x\nexpr <- antibiotox$PAR$y\nobj <- curveFit(x, expr, eq = 'Logit', rtype = 'quantal', param = c(26, 4), effv = c(0.05, 0.5))\nfigPlot(obj)\n\n## example 2\n# \nx <- hormesis$HmimCl$x\nrspn <- hormesis$HmimCl$y\nobj <- curveFit(x, rspn, eq = 'Biphasic', param = c(-0.59, 0.001, 160,0.05, 19),\n effv = c(0.05, 0.5), rtype = 'hormesis')\nfigPlot(obj, logT = TRUE)\n\n\n"} {"package":"mixtox","topic":"gcaHill","snippet":"### Name: gcaHill\n### Title: Mixture Toxicity Prediction Using GCA (Hill_two)\n### Aliases: gcaHill\n### Keywords: generalized concentration addition Hill_two equal effect\n### concentration ratio uniform design concentration ratio arbitrary\n### concentration ratio uniform design table\n\n### ** Examples\n\nmodel <- c(\"Hill_two\", \"Hill_two\", \"Hill_two\", \"Hill_two\")\nparam <- matrix(c(3.94e-5, 0.97, 0, 5.16e-4, 1.50, 0, 3.43e-6, 1.04, 0, 9.18e-6, 0.77, 0), \n\t\t\t\tnrow = 4, ncol = 3, byrow = TRUE)\nrownames(param) <- c('Ni', 'Zn', 'Cu', 'Mn')\ncolnames(param) <- c('Alpha', 'Beta', 'Gamma')\n## example 1\n# using GCA to predict the mixtures designed by equal effect concentration ratio (eecr) at\n# the effect concentration of EC05 and EC50\n# the eecr mixture design is based on four heavy metals (four factors).\ngcaHill(model, param, mixType = \"eecr\", effv = c(0.05, 0.5), rtype = 'continuous')\n\n## example 2\n# using GCA to predict the mixtures designed by uniform design concentration ratio (udcr)\n# the udcr mixture design is based on four heavy metals (four factors).\n# Seven levels (EC05, EC10, EC15, EC20, EC25, EC30, and EC50 ) are allocated in \n# the uniform table\neffv <- c(0.05, 0.10, 0.15, 0.20, 0.25, 0.30, 0.50)\ngcaHill(model, param, mixType = \"udcr\", effv, rtype = 'quantal')\n\n## example 3\n# using GCA to predict the mixtures designed by arbitrary concentration ratio (acr)\n# the udcr mixture design is based on four heavy metals (four factors).\n# the every component in the mixture shares exactly the same ratio (0.25) \neffv <- c(0.25, 0.25, 0.25, 0.25)\ngcaHill(model, param, mixType = \"acr\", effv)\n\n\n"} {"package":"mixtox","topic":"gcaPred","snippet":"### Name: gcaPred\n### Title: Mixture Toxicity Prediction Using GCA (General)\n### Aliases: gcaPred\n### Keywords: generalized concentration addition equal effect concentration\n### ratio uniform design concentration ratio arbitrary concentration\n### ratio uniform design table\n\n### ** Examples\n\n## example 1\n# using GCA to predict the mixtures designed by equal effect concentration ratio (eecr) at the \n# effect concentration of EC50\n# the eecr mixture design is based on seven antibiotics(seven factors).\nmodel <- antibiotox$sgl$model\nparam <- antibiotox$sgl$param\nrefEffv <- c(0.1, 0.50, 0.80)\ngcaPred(model, param, mixType = \"eecr\", effv = 0.5, refEffv, rtype = 'quantal')\n\n## example 2\n# using GCA to predict the mixtures designed by uniform design concentration ratio (udcr)\n# the udcr mixture design is based on 2 antibiotics(2 factors) and \n# three levels (EC05, EC20, and EC50 )\nmodel <- antibiotox$sgl$model[1 : 2]\nparam <- antibiotox$sgl$param[1 : 2, ]\neffv <- c(0.05, 0.20, 0.50)\nrefEffv <- c(0.1, 0.80)\ngcaPred(model, param, mixType = \"udcr\", effv, refEffv, rtype = 'quantal')\n\n## example 3\n# using GCA to predict the mixtures designed by arbitrary concentration ratio (acr)\n# the udcr mixture design is based on 2 heavy metals (2 factors).\n# the every component in the mixture shares exactly the same ratio (0.5) \nmodel <- cytotox$sgl$model[1 : 2]\nparam <- cytotox$sgl$param[1 : 2, ]\neffv <- c(0.5, 0.5)\nrefEffv <- c(0.1, 0.80)\ngcaPred(model, param, mixType = \"acr\", effv, refEffv, rtype = 'quantal')\n\n\n"} {"package":"mixtox","topic":"getCI","snippet":"### Name: getCI\n### Title: Calculating Confidence Intervals\n### Aliases: getCI\n### Keywords: non-simultanous confidence intervals non-simultanous\n### prediction intervals\n\n### ** Examples\n\n## example 1\nx <- cytotox$Ni$x\nrspn <- cytotox$Ni$y\nobj <- curveFit(x, rspn, eq = 'Logit', param = c(12, 3), effv = c(0.05, 0.5), rtype = 'quantal')\ngetCI(obj, effv = c(0.05, 0.50))\n\n\n"} {"package":"mixtox","topic":"hormesis","snippet":"### Name: hormesis\n### Title: Non-monotonic Concentration-response Data\n### Aliases: hormesis\n### Keywords: hormesis non-monotonic concentration-response data\n\n### ** Examples\n\n#example 1\n## Retrive the toxicity data of acetonitrile on firefly luciferase.\nhormesis$ACN\n\n#example 2\n## Retrive the minx of OmimCl, HmimCl, ACN, and Acetone\nhormesis$sgl$minx\n\n\n"} {"package":"mixtox","topic":"iaPred","snippet":"### Name: iaPred\n### Title: Mixture Toxicity Prediction Based on Independent Action\n### Aliases: iaPred\n### Keywords: independent action equal effect concentration ratio uniform\n### design concentration ratio arbitrary concentration ratio uniform\n### design table\n\n### ** Examples\n\n# data(cytotox)\n\n## example 1\n# using IA to predict the mixtures designed by equal effect concentration ratio (eecr) at the \n# effect concentration of EC05 and EC50\n# the eecr mixture design is based on four heavy metals and four ion liquids(eight factors).\nmodel <- cytotox$sgl$model\nparam <- cytotox$sgl$param\niaPred(model, param, mixType = \"eecr\", effv = c(0.05, 0.5))\n\n## example 2\n# using IA to predict the mixtures designed by uniform design concentration ratio (udcr)\n# the udcr mixture design is based on seven antibiotics (seven factors).\n# five levels (EC05, EC10, EC20, EC30, and EC50 ) are allocated in the uniform table using the \n# pseudo-level technique (Liang et al., 2001)\nmodel <- antibiotox$sgl$model\nparam <- antibiotox$sgl$param\neffv <- c(0.05, 0.05, 0.10, 0.10, 0.20, 0.20, 0.30, 0.30, 0.50, 0.50)\niaPred(model, param, mixType = \"udcr\", effv)\n\n## example 3\n# using IA to predict the mixtures designed by arbitrary concentration ratio (acr)\n# the udcr mixture design is based on four antibiotics (four factors).\n# the every component in the mixture shares exactly the same ratio (0.25) \nmodel <- antibiotox$sgl$model[1 : 4]\nparam <- antibiotox$sgl$param[1 : 4, ]\neffv <- c(0.25, 0.25, 0.25, 0.25)\niaPred(model, param, mixType = \"acr\", effv)\n\n\n"} {"package":"mixtox","topic":"jacobian","snippet":"### Name: jacobian\n### Title: Jacobian Matrix Calculation\n### Aliases: jacobian\n### Keywords: Jacobian matrix\n\n### ** Examples\n\n## example 1\nx <- cytotox$Ni$x\nrspn <- cytotox$Ni$y\nobj <- curveFit(x, rspn, eq = 'Logit', param = c(12, 3), effv = c(0.05, 0.5), rtype = 'quantal')\njacobian('Logit', x, obj$p)\n\n\n\n"} {"package":"mixtox","topic":"nmECx","snippet":"### Name: nmECx\n### Title: Effect Concentration Calculation for J-shaped Models\n### Aliases: nmECx\n### Keywords: effect concentration non-monotonic curve\n\n### ** Examples\n\n## example 1\n# calculate ECL-10, ECR-10, EC5, and EC50 of the four hormetic curves \nmodel <- hormesis$sgl$model\nparam <- hormesis$sgl$param\nminx <- hormesis$sgl$minx\nnmECx(model, param, effv = c(-0.10, 0.05, 0.50), minx)\n\n\n"} {"package":"mixtox","topic":"qq4res","snippet":"### Name: qq4res\n### Title: Residual Normal QQ Plot\n### Aliases: qq4res\n### Keywords: normal QQ plot\n\n### ** Examples\n\n## example 1\n# \nx <- antibiotox$PAR$x\nexpr <- antibiotox$PAR$y\nobj <- curveFit(x, expr, eq = 'Logit', rtype = 'quantal', param = c(26, 4), effv = c(0.05, 0.5))\nqq4res(obj)\n\n\n"} {"package":"mixtox","topic":"showEq","snippet":"### Name: showEq\n### Title: List Requested Equations\n### Aliases: showEq\n### Keywords: monotonic and non-monotonic equations\n\n### ** Examples\n\n# example 1\n## show Weibull model\nshowEq('Weibull')\n\n# example 2\n## show the name of all sigmoidal models\nshowEq('sigmoid')\n\n\n"} {"package":"mixtox","topic":"staval","snippet":"### Name: staval\n### Title: Starting Values for 13 Sigmoidal and 4 Hormetic Models\n### Aliases: staval\n### Keywords: starting values curve fitting\n\n### ** Examples\n\n# example 1\n## Retrive the starting values for Hill.\nstaval$Hill\n\n# example 2\n## Retrive the starting values for Weibull.\nstaval$Weibull\n\n\n"} {"package":"mixtox","topic":"tuneFit","snippet":"### Name: tuneFit\n### Title: Find Optimal Starting values for Curve Fitting\n### Aliases: tuneFit\n### Keywords: curve fitting goodness of fit trial and error effect\n### concentration\n\n### ** Examples\n\n## example 1\n# Fit the non-monotonic concentration-response data\n# we'll get a fit with r2 of 0.740\nx <- hormesis$OmimCl$x\nexpr <- hormesis$OmimCl$y\ny <- rowMeans(expr)\ntuneFit(x, y, eq = 'Biphasic')\n\n## example 2\n# Fit the non-monotonic concentration-response data\n# use r2 (rsq) of 0.9, we'll get a fit with r2 of 0.989\n# calcualte the effect concentration that causes 5% inhibition\nx <- hormesis$OmimCl$x\nexpr <- hormesis$OmimCl$y\ny <- rowMeans(expr)\ntuneFit(x, y, eq = 'Biphasic', effv = 0.05, rsq = 0.9)\n\n## example 3\n# Fit the concentration-response data of heavy metal Ni(2+) on MCF-7 cells.\n# Calculate the concentration that causes 5% inhibition on the growth of MCF-7\n\nx <- cytotox$Ni$x\nexpr <- cytotox$Ni$y\ny <- rowMeans(expr)\ntuneFit(x, y, eq = 'Logit', effv = 0.05)\n\n## example 4\n# Fit the concentration-response data of Paromomycin Sulfate (PAR) on photobacteria.\n# Calculate the concentrations that cause 50% inhibition on the growth of photobacteria \n\nx <- antibiotox$PAR$x\nexpr <- antibiotox$PAR$y\ny <- rowMeans(expr)\ntuneFit(x, y, eq = 'Logit', effv = 0.5)\n\n\n"} {"package":"mixtox","topic":"unidTab","snippet":"### Name: unidTab\n### Title: Uniform Design Table\n### Aliases: unidTab\n### Keywords: uniform design good lattice point\n\n### ** Examples\n\n## example 1\n# construct uniform table with 11 runs and 7 factors using the default centered L2-discrepancy \n# algorithm\nunidTab(11, 7)\n\n## example 2\n# construct uniform table with 37 runs and 13 factors using the symmetric discrepancy algorithm \nunidTab(lev = 37, fac = 13, algo = \"sd2\")\n\n## example 3\n# construct uniform table with 37 runs and 13 factors using default centered L2-discrepancy \n# algorithm\nunidTab(lev = 37, fac = 13, algo = \"cd2\")\n\n\n"} {"package":"deepgp","topic":"ALC","snippet":"### Name: ALC\n### Title: Active Learning Cohn for Sequential Design\n### Aliases: ALC ALC.gp ALC.dgp2 ALC.dgp3\n\n### ** Examples\n\n# --------------------------------------------------------\n# Example 1: toy step function, runs in less than 5 seconds\n# --------------------------------------------------------\n\nf <- function(x) {\n if (x <= 0.4) return(-1)\n if (x >= 0.6) return(1)\n if (x > 0.4 & x < 0.6) return(10*(x-0.5))\n}\n\nx <- seq(0.05, 0.95, length = 7)\ny <- sapply(x, f)\nx_new <- seq(0, 1, length = 100)\n\n# Fit model and calculate ALC\nfit <- fit_two_layer(x, y, nmcmc = 100, cov = \"exp2\")\nfit <- trim(fit, 50)\nfit <- predict(fit, x_new, cores = 1, store_latent = TRUE)\nalc <- ALC(fit)\n\n## No test: \n# --------------------------------------------------------\n# Example 2: damped sine wave\n# --------------------------------------------------------\n\nf <- function(x) {\n exp(-10*x) * (cos(10*pi*x - 1) + sin(10*pi*x - 1)) * 5 - 0.2\n}\n\n# Training data\nx <- seq(0, 1, length = 30)\ny <- f(x) + rnorm(30, 0, 0.05)\n\n# Testing data\nxx <- seq(0, 1, length = 100)\nyy <- f(xx)\n\nplot(xx, yy, type = \"l\")\npoints(x, y, col = 2)\n\n# Conduct MCMC (can replace fit_two_layer with fit_one_layer/fit_three_layer)\nfit <- fit_two_layer(x, y, D = 1, nmcmc = 2000, cov = \"exp2\")\nplot(fit)\nfit <- trim(fit, 1000, 2)\n\n# Option 1 - calculate ALC from MCMC iterations\nalc <- ALC(fit, xx)\n\n# Option 2 - calculate ALC after predictions\nfit <- predict(fit, xx, cores = 1, store_latent = TRUE)\nalc <- ALC(fit)\n\n# Visualize fit\nplot(fit)\npar(new = TRUE) # overlay ALC\nplot(xx, alc$value, type = 'l', lty = 2, \n axes = FALSE, xlab = '', ylab = '')\n\n# Select next design point\nx_new <- xx[which.max(alc$value)]\n## End(No test)\n\n\n\n"} {"package":"deepgp","topic":"IMSE","snippet":"### Name: IMSE\n### Title: Integrated Mean-Squared (prediction) Error for Sequential Design\n### Aliases: IMSE IMSE.gp IMSE.dgp2 IMSE.dgp3\n\n### ** Examples\n\n# --------------------------------------------------------\n# Example 1: toy step function, runs in less than 5 seconds\n# --------------------------------------------------------\n\nf <- function(x) {\n if (x <= 0.4) return(-1)\n if (x >= 0.6) return(1)\n if (x > 0.4 & x < 0.6) return(10*(x-0.5))\n}\n\nx <- seq(0.05, 0.95, length = 7)\ny <- sapply(x, f)\nx_new <- seq(0, 1, length = 100)\n\n# Fit model and calculate IMSE\nfit <- fit_one_layer(x, y, nmcmc = 100, cov = \"exp2\")\nfit <- trim(fit, 50)\nfit <- predict(fit, x_new, cores = 1, store_latent = TRUE)\nimse <- IMSE(fit)\n\n## No test: \n# --------------------------------------------------------\n# Example 2: Higdon function\n# --------------------------------------------------------\n\nf <- function(x) {\n i <- which(x <= 0.48)\n x[i] <- 2 * sin(pi * x[i] * 4) + 0.4 * cos(pi * x[i] * 16)\n x[-i] <- 2 * x[-i] - 1\n return(x)\n}\n\n# Training data\nx <- seq(0, 1, length = 30)\ny <- f(x) + rnorm(30, 0, 0.05)\n\n# Testing data\nxx <- seq(0, 1, length = 100)\nyy <- f(xx)\n\nplot(xx, yy, type = \"l\")\npoints(x, y, col = 2)\n\n# Conduct MCMC (can replace fit_three_layer with fit_one_layer/fit_two_layer)\nfit <- fit_three_layer(x, y, D = 1, nmcmc = 2000, cov = \"exp2\")\nplot(fit)\nfit <- trim(fit, 1000, 2)\n\n# Option 1 - calculate IMSE from only MCMC iterations\nimse <- IMSE(fit, xx)\n\n# Option 2 - calculate IMSE after predictions\nfit <- predict(fit, xx, cores = 1, store_latent = TRUE)\nimse <- IMSE(fit)\n\n# Visualize fit\nplot(fit)\npar(new = TRUE) # overlay IMSE\nplot(xx, imse$value, col = 2, type = 'l', lty = 2, \n axes = FALSE, xlab = '', ylab = '')\n\n# Select next design point\nx_new <- xx[which.min(imse$value)]\n## End(No test)\n\n\n\n"} {"package":"deepgp","topic":"continue","snippet":"### Name: continue\n### Title: Continues MCMC sampling\n### Aliases: continue continue.gp continue.dgp2 continue.dgp3\n### continue.gpvec continue.dgp2vec continue.dgp3vec\n\n### ** Examples\n\n# See \"fit_two_layer\" for an example\n\n\n\n"} {"package":"deepgp","topic":"deepgp-package","snippet":"### Name: deepgp-package\n### Title: Package deepgp\n### Aliases: deepgp-package\n\n### ** Examples\n\n# See \"fit_one_layer\", \"fit_two_layer\", \"fit_three_layer\", \n# \"ALC\", or \"IMSE\" for examples\n# Examples of real-world implementations are available at: \n# https://bitbucket.org/gramacylab/deepgp-ex/\n\n\n\n"} {"package":"deepgp","topic":"fit_one_layer","snippet":"### Name: fit_one_layer\n### Title: MCMC sampling for one layer GP\n### Aliases: fit_one_layer\n\n### ** Examples\n\n# Examples of real-world implementations are available at: \n# https://bitbucket.org/gramacylab/deepgp-ex/\n## No test: \n# G function (https://www.sfu.ca/~ssurjano/gfunc.html)\nf <- function(xx, a = (c(1:length(xx)) - 1) / 2) { \n new1 <- abs(4 * xx - 2) + a\n new2 <- 1 + a\n prod <- prod(new1 / new2)\n return((prod - 1) / 0.86)\n}\n\n# Training data\nd <- 1 \nn <- 20\nx <- matrix(runif(n * d), ncol = d)\ny <- apply(x, 1, f)\n\n# Testing data\nn_test <- 100\nxx <- matrix(runif(n_test * d), ncol = d)\nyy <- apply(xx, 1, f)\n\nplot(xx[order(xx)], yy[order(xx)], type = \"l\")\npoints(x, y, col = 2)\n\n# Example 1: full model (nugget fixed)\nfit <- fit_one_layer(x, y, nmcmc = 2000, true_g = 1e-6)\nplot(fit)\nfit <- trim(fit, 1000, 2)\nfit <- predict(fit, xx, cores = 1)\nplot(fit)\n\n# Example 2: full model (nugget estimated, EI calculated)\nfit <- fit_one_layer(x, y, nmcmc = 2000)\nplot(fit) \nfit <- trim(fit, 1000, 2)\nfit <- predict(fit, xx, cores = 1, EI = TRUE)\nplot(fit)\npar(new = TRUE) # overlay EI\nplot(xx[order(xx)], fit$EI[order(xx)], type = 'l', lty = 2, \n axes = FALSE, xlab = '', ylab = '')\n \n# Example 3: Vecchia approximated model\nfit <- fit_one_layer(x, y, nmcmc = 2000, vecchia = TRUE, m = 10) \nplot(fit)\nfit <- trim(fit, 1000, 2)\nfit <- predict(fit, xx, cores = 1)\nplot(fit)\n## End(No test)\n\n\n\n"} {"package":"deepgp","topic":"fit_three_layer","snippet":"### Name: fit_three_layer\n### Title: MCMC sampling for three layer deep GP\n### Aliases: fit_three_layer\n\n### ** Examples\n\n# Examples of real-world implementations are available at: \n# https://bitbucket.org/gramacylab/deepgp-ex/\n## No test: \n# G function (https://www.sfu.ca/~ssurjano/gfunc.html)\nf <- function(xx, a = (c(1:length(xx)) - 1) / 2) { \n new1 <- abs(4 * xx - 2) + a\n new2 <- 1 + a\n prod <- prod(new1 / new2)\n return((prod - 1) / 0.86)\n}\n\n# Training data\nd <- 2\nn <- 30\nx <- matrix(runif(n * d), ncol = d)\ny <- apply(x, 1, f)\n\n# Testing data\nn_test <- 100\nxx <- matrix(runif(n_test * d), ncol = d)\nyy <- apply(xx, 1, f)\n\ni <- interp::interp(xx[, 1], xx[, 2], yy)\nimage(i, col = heat.colors(128))\ncontour(i, add = TRUE)\npoints(x)\n\n# Example 1: full model (nugget estimated)\nfit <- fit_three_layer(x, y, nmcmc = 2000)\nplot(fit)\nfit <- trim(fit, 1000, 2)\nfit <- predict(fit, xx, cores = 1)\nplot(fit)\n\n# Example 2: Vecchia approximated model (nugget fixed)\n# (Vecchia approximation is faster for larger data sizes)\nfit <- fit_three_layer(x, y, nmcmc = 2000, vecchia = TRUE, \n m = 10, true_g = 1e-6)\nplot(fit) \nfit <- trim(fit, 1000, 2)\nfit <- predict(fit, xx, cores = 1)\nplot(fit)\n## End(No test)\n\n\n\n"} {"package":"deepgp","topic":"fit_two_layer","snippet":"### Name: fit_two_layer\n### Title: MCMC sampling for two layer deep GP\n### Aliases: fit_two_layer\n\n### ** Examples\n\n# Examples of real-world implementations are available at: \n# https://bitbucket.org/gramacylab/deepgp-ex/\n## No test: \n# G function (https://www.sfu.ca/~ssurjano/gfunc.html)\nf <- function(xx, a = (c(1:length(xx)) - 1) / 2) { \n new1 <- abs(4 * xx - 2) + a\n new2 <- 1 + a\n prod <- prod(new1 / new2)\n return((prod - 1) / 0.86)\n}\n\n# Training data\nd <- 1 \nn <- 20\nx <- matrix(runif(n * d), ncol = d)\ny <- apply(x, 1, f)\n\n# Testing data\nn_test <- 100\nxx <- matrix(runif(n_test * d), ncol = d)\nyy <- apply(xx, 1, f)\n\nplot(xx[order(xx)], yy[order(xx)], type = \"l\")\npoints(x, y, col = 2)\n\n# Example 1: full model (nugget estimated, using continue)\nfit <- fit_two_layer(x, y, nmcmc = 1000)\nplot(fit)\nfit <- continue(fit, 1000) \nplot(fit) \nfit <- trim(fit, 1000, 2)\nfit <- predict(fit, xx, cores = 1)\nplot(fit, hidden = TRUE)\n\n# Example 2: Vecchia approximated model\n# (Vecchia approximation is faster for larger data sizes)\nfit <- fit_two_layer(x, y, nmcmc = 2000, vecchia = TRUE, m = 10)\nplot(fit) \nfit <- trim(fit, 1000, 2)\nfit <- predict(fit, xx, cores = 1)\nplot(fit, hidden = TRUE)\n\n# Example 3: Vecchia approximated model (re-approximated after burn-in)\nfit <- fit_two_layer(x, y, nmcmc = 1000, vecchia = TRUE, m = 10)\nfit <- continue(fit, 1000, re_approx = TRUE)\nplot(fit)\nfit <- trim(fit, 1000, 2)\nfit <- predict(fit, xx, cores = 1)\nplot(fit, hidden = TRUE)\n## End(No test)\n\n\n\n"} {"package":"deepgp","topic":"plot","snippet":"### Name: plot\n### Title: Plots object from 'deepgp' package\n### Aliases: plot plot.gp plot.gpvec plot.dgp2 plot.dgp2vec plot.dgp3\n### plot.dgp3vec\n\n### ** Examples\n\n# See \"fit_one_layer\", \"fit_two_layer\", or \"fit_three_layer\"\n# for an example\n\n\n\n"} {"package":"deepgp","topic":"predict","snippet":"### Name: predict\n### Title: Predict posterior mean and variance/covariance\n### Aliases: predict predict.gp predict.dgp2 predict.dgp3 predict.gpvec\n### predict.dgp2vec predict.dgp3vec\n\n### ** Examples\n\n# See \"fit_one_layer\", \"fit_two_layer\", or \"fit_three_layer\"\n# for an example\n\n\n\n"} {"package":"deepgp","topic":"sq_dist","snippet":"### Name: sq_dist\n### Title: Calculates squared pairwise distances\n### Aliases: sq_dist\n\n### ** Examples\n\nx <- seq(0, 1, length = 10)\nd2 <- sq_dist(x)\n\n\n\n"} {"package":"deepgp","topic":"trim","snippet":"### Name: trim\n### Title: Trim/Thin MCMC iterations\n### Aliases: trim trim.gp trim.gpvec trim.dgp2 trim.dgp2vec trim.dgp3\n### trim.dgp3vec\n\n### ** Examples\n\n# See \"fit_one_layer\", \"fit_two_layer\", or \"fit_three_layer\"\n# for an example\n\n\n\n"} {"package":"makePalette","topic":"makePaletteCLARA","snippet":"### Name: makePaletteCLARA\n### Title: Make your color palette with the CLARA algorithm\n### Aliases: makePaletteCLARA\n\n### ** Examples\n\nmakePaletteCLARA(system.file(\"extdata\", \"picture02.jpg\", package=\"makePalette\"))\nmakePaletteCLARA(system.file(\"extdata\", \"picture04.png\", package=\"makePalette\"), 6)\nmakePaletteCLARA(system.file(\"extdata\", \"picture05.jpg\", package=\"makePalette\"), 10)\nMyPalette = makePaletteCLARA(system.file(\"extdata\", \"picture05.jpg\", package=\"makePalette\"), 10)\nbarplot(1:10, col=MyPalette)\n\n\n\n"} {"package":"makePalette","topic":"makePaletteKM","snippet":"### Name: makePaletteKM\n### Title: Make your color palette with KMeans algorithm\n### Aliases: makePaletteKM\n\n### ** Examples\n\nmakePaletteKM(system.file(\"extdata\", \"picture02.jpg\", package=\"makePalette\"))\nmakePaletteKM(system.file(\"extdata\", \"picture04.png\", package=\"makePalette\"), 6)\nmakePaletteKM(system.file(\"extdata\", \"picture05.jpg\", package=\"makePalette\"), 10)\nMyPalette = makePaletteKM(system.file(\"extdata\", \"picture05.jpg\", package=\"makePalette\"), 10)\nbarplot(1:10, col=MyPalette)\n\n\n\n"} {"package":"sparseEigen","topic":"spEigen","snippet":"### Name: spEigen\n### Title: Sparse Spectral Decomposition of a Matrix\n### Aliases: spEigen\n\n### ** Examples\n\nlibrary(sparseEigen)\nn <- 100 # samples\nm <- 500 # dimension\nq <- 3 # number of sparse eigenvectors to be estimated\nsp_card <- 0.1*m # sparsity of each eigenvector\n\n# generate covariance matrix with sparse eigenvectors\nV <- matrix(0, m, q)\nV[cbind(seq(1, q*sp_card), rep(1:q, each = sp_card))] <- 1/sqrt(sp_card)\nV <- cbind(V, matrix(rnorm(m*(m-q)), m, m-q))\nV <- qr.Q(qr(V)) # orthogonalize eigenvectors\nlmd <- c(100*seq(from = q, to = 1), rep(1, m-q)) # generate eigenvalues\nR <- V %*% diag(lmd) %*% t(V) # covariance matrix\n\n# generate data\nX <- MASS::mvrnorm(n, rep(0, m), R) # random data with underlying sparse structure\n\n# standardand sparse eigenvectors\nres_standard <- eigen(cov(X))\nres_sparse <- spEigen(cov(X), q)\n\n# show inner product between estimated eigenvectors and originals (the closer to 1 the better)\nabs(diag(t(res_standard$vectors) %*% V[, 1:q])) #for standard estimated eigenvectors\nabs(diag(t(res_sparse$vectors) %*% V[, 1:q])) #for sparse estimated eigenvectors\n\n\n"} {"package":"sparseEigen","topic":"spEigenCov","snippet":"### Name: spEigenCov\n### Title: Covariance Matrix Estimation with Sparse Eigenvectors\n### Aliases: spEigenCov\n\n### ** Examples\n\n## Not run: \n##D library(sparseEigen)\n##D n <- 600 # samples\n##D m <- 500 # dimension\n##D q <- 3 # number of sparse eigenvectors to be estimated\n##D sp_card <- 0.1*m # sparsity of each eigenvector\n##D \n##D # generate covariance matrix with sparse eigenvectors\n##D V <- matrix(0, m, q)\n##D V[cbind(seq(1, q*sp_card), rep(1:q, each = sp_card))] <- 1/sqrt(sp_card)\n##D V <- cbind(V, matrix(rnorm(m*(m-q)), m, m-q))\n##D V <- qr.Q(qr(V)) # orthogonalize eigenvectors\n##D lmd <- c(100*seq(from = q, to = 1), rep(1, m-q)) # generate eigenvalues\n##D R <- V %*% diag(lmd) %*% t(V) # covariance matrix\n##D \n##D # generate data\n##D X <- MASS::mvrnorm(n, rep(0, m), R) # random data with underlying sparse structure\n##D \n##D # standard and sparse estimation\n##D res_standard <- eigen(cov(X))\n##D res_sparse <- spEigenCov(cov(X), q)\n##D \n##D # show inner product between estimated eigenvectors and originals (the closer to 1 the better)\n##D abs(diag(t(res_standard$vectors) %*% V[, 1:q])) #for standard estimated eigenvectors\n##D abs(diag(t(res_sparse$vectors) %*% V[, 1:q])) #for sparse estimated eigenvectors\n##D \n##D # show error between estimated and true covariance\n##D norm(cov(X) - R, type = 'F') #for sample covariance matrix\n##D norm(res_sparse$cov - R, type = 'F') #for covariance with sparse eigenvectors\n## End(Not run)\n\n\n"} {"package":"nnls","topic":"nnls","snippet":"### Name: nnls\n### Title: The Lawson-Hanson NNLS implemention of non-negative least\n### squares\n### Aliases: nnls\n### Keywords: optimize\n\n### ** Examples\n\n## simulate a matrix A\n## with 3 columns, each containing an exponential decay \nt <- seq(0, 2, by = .04)\nk <- c(.5, .6, 1)\nA <- matrix(nrow = 51, ncol = 3)\nAcolfunc <- function(k, t) exp(-k*t)\nfor(i in 1:3) A[,i] <- Acolfunc(k[i],t)\n\n## simulate a matrix X\n## with 3 columns, each containing a Gaussian shape \n## the Gaussian shapes are non-negative\nX <- matrix(nrow = 51, ncol = 3)\nwavenum <- seq(18000,28000, by=200)\nlocation <- c(25000, 22000, 20000) \ndelta <- c(3000,3000,3000)\nXcolfunc <- function(wavenum, location, delta)\n exp( - log(2) * (2 * (wavenum - location)/delta)^2)\nfor(i in 1:3) X[,i] <- Xcolfunc(wavenum, location[i], delta[i])\n\n## set seed for reproducibility\nset.seed(3300)\n\n## simulated data is the product of A and X with some\n## spherical Gaussian noise added \nmatdat <- A %*% t(X) + .005 * rnorm(nrow(A) * nrow(X))\n\n## estimate the rows of X using NNLS criteria \nnnls_sol <- function(matdat, A) {\n X <- matrix(0, nrow = 51, ncol = 3)\n for(i in 1:ncol(matdat)) \n X[i,] <- coef(nnls(A,matdat[,i]))\n X\n}\nX_nnls <- nnls_sol(matdat,A) \n\nmatplot(X_nnls,type=\"b\",pch=20)\nabline(0,0, col=grey(.6))\n\n## Not run: \n##D ## can solve the same problem with L-BFGS-B algorithm\n##D ## but need starting values for x \n##D bfgs_sol <- function(matdat, A) {\n##D startval <- rep(0, ncol(A))\n##D fn1 <- function(par1, b, A) sum( ( b - A %*% par1)^2)\n##D X <- matrix(0, nrow = 51, ncol = 3)\n##D for(i in 1:ncol(matdat)) \n##D X[i,] <- optim(startval, fn = fn1, b=matdat[,i], A=A,\n##D lower = rep(0,3), method=\"L-BFGS-B\")$par\n##D X\n##D }\n##D X_bfgs <- bfgs_sol(matdat,A) \n##D \n##D ## the RMS deviation under NNLS is less than under L-BFGS-B \n##D sqrt(sum((X - X_nnls)^2)) < sqrt(sum((X - X_bfgs)^2))\n##D \n##D ## and L-BFGS-B is much slower \n##D system.time(nnls_sol(matdat,A))\n##D system.time(bfgs_sol(matdat,A))\n##D \n##D ## can also solve the same problem by reformulating it as a\n##D ## quadratic program (this requires the quadprog package; if you\n##D ## have quadprog installed, uncomment lines below starting with\n##D ## only 1 \"#\" )\n##D \n##D # library(quadprog)\n##D \n##D # quadprog_sol <- function(matdat, A) {\n##D # X <- matrix(0, nrow = 51, ncol = 3)\n##D # bvec <- rep(0, ncol(A))\n##D # Dmat <- crossprod(A,A)\n##D # Amat <- diag(ncol(A))\n##D # for(i in 1:ncol(matdat)) { \n##D # dvec <- crossprod(A,matdat[,i]) \n##D # X[i,] <- solve.QP(dvec = dvec, bvec = bvec, Dmat=Dmat,\n##D # Amat=Amat)$solution\n##D # }\n##D # X\n##D # }\n##D # X_quadprog <- quadprog_sol(matdat,A) \n##D \n##D ## the RMS deviation under NNLS is about the same as under quadprog \n##D # sqrt(sum((X - X_nnls)^2))\n##D # sqrt(sum((X - X_quadprog)^2))\n##D \n##D ## and quadprog requires about the same amount of time \n##D # system.time(nnls_sol(matdat,A))\n##D # system.time(quadprog_sol(matdat,A))\n##D \n## End(Not run)\n\n\n\n"} {"package":"nnls","topic":"nnnpls","snippet":"### Name: nnnpls\n### Title: An implementation of least squares with non-negative and\n### non-positive constraints\n### Aliases: nnnpls\n### Keywords: optimize\n\n### ** Examples\n\n## simulate a matrix A\n## with 3 columns, each containing an exponential decay \nt <- seq(0, 2, by = .04)\nk <- c(.5, .6, 1)\nA <- matrix(nrow = 51, ncol = 3)\nAcolfunc <- function(k, t) exp(-k*t)\nfor(i in 1:3) A[,i] <- Acolfunc(k[i],t)\n\n## simulate a matrix X\n## with 3 columns, each containing a Gaussian shape \n## 2 of the Gaussian shapes are non-negative and 1 is non-positive \nX <- matrix(nrow = 51, ncol = 3)\nwavenum <- seq(18000,28000, by=200)\nlocation <- c(25000, 22000, 20000) \ndelta <- c(3000,3000,3000)\nXcolfunc <- function(wavenum, location, delta)\n exp( - log(2) * (2 * (wavenum - location)/delta)^2)\nfor(i in 1:3) X[,i] <- Xcolfunc(wavenum, location[i], delta[i])\nX[,2] <- -X[,2]\n\n## set seed for reproducibility\nset.seed(3300)\n\n## simulated data is the product of A and X with some\n## spherical Gaussian noise added \nmatdat <- A %*% t(X) + .005 * rnorm(nrow(A) * nrow(X))\n\n## estimate the rows of X using NNNPLS criteria \nnnnpls_sol <- function(matdat, A) {\n X <- matrix(0, nrow = 51, ncol = 3)\n for(i in 1:ncol(matdat)) \n X[i,] <- coef(nnnpls(A,matdat[,i],con=c(1,-1,1)))\n X\n}\nX_nnnpls <- nnnpls_sol(matdat,A) \n\n## Not run: \n##D \n##D ## can solve the same problem with L-BFGS-B algorithm\n##D ## but need starting values for x and \n##D ## impose a very low/high bound where none is desired\n##D bfgs_sol <- function(matdat, A) {\n##D startval <- rep(0, ncol(A))\n##D fn1 <- function(par1, b, A) sum( ( b - A %*% par1)^2)\n##D X <- matrix(0, nrow = 51, ncol = 3)\n##D for(i in 1:ncol(matdat)) \n##D X[i,] <- optim(startval, fn = fn1, b=matdat[,i], A=A,\n##D lower=rep(0, -1000, 0), upper=c(1000,0,1000),\n##D method=\"L-BFGS-B\")$par\n##D X\n##D }\n##D X_bfgs <- bfgs_sol(matdat,A) \n##D \n##D ## the RMS deviation under NNNPLS is less than under L-BFGS-B \n##D sqrt(sum((X - X_nnnpls)^2)) < sqrt(sum((X - X_bfgs)^2))\n##D \n##D ## and L-BFGS-B is much slower \n##D system.time(nnnpls_sol(matdat,A))\n##D system.time(bfgs_sol(matdat,A))\n##D \n##D ## can also solve the same problem by reformulating it as a\n##D ## quadratic program (this requires the quadprog package; if you\n##D ## have quadprog installed, uncomment lines below starting with\n##D ## only 1 \"#\" )\n##D \n##D # library(quadprog)\n##D \n##D # quadprog_sol <- function(matdat, A) {\n##D # X <- matrix(0, nrow = 51, ncol = 3)\n##D # bvec <- rep(0, ncol(A))\n##D # Dmat <- crossprod(A,A)\n##D # Amat <- diag(c(1,-1,1))\n##D # for(i in 1:ncol(matdat)) { \n##D # dvec <- crossprod(A,matdat[,i]) \n##D # X[i,] <- solve.QP(dvec = dvec, bvec = bvec, Dmat=Dmat,\n##D # Amat=Amat)$solution\n##D # }\n##D # X\n##D # }\n##D # X_quadprog <- quadprog_sol(matdat,A) \n##D \n##D ## the RMS deviation under NNNPLS is about the same as under quadprog \n##D # sqrt(sum((X - X_nnnpls)^2))\n##D # sqrt(sum((X - X_quadprog)^2))\n##D \n##D ## and quadprog requires about the same amount of time \n##D # system.time(nnnpls_sol(matdat,A))\n##D # system.time(quadprog_sol(matdat,A))\n## End(Not run)\n\n\n"} {"package":"bayespm","topic":"ECE","snippet":"### Name: ECE\n### Title: ECE dataset for the PCC process for Poisson with rate parameter\n### unknown\n### Aliases: ECE\n\n### ** Examples\n\n# Loading data\nattach(ECE)\n\n# Plotting data\ngraphpar <- par( oma = c(1,3,2,3) )\nplot( 1:length(defect_counts), defect_counts/inspected_units, type = \"b\", lty = 1,\n xlab = \"Observations\", ylab = \"\", xlim = c(0, 25), ylim = c(1.5, 10.5),\n lwd = 1, pch = 16, axes = FALSE, yaxs = \"i\", main = \"ECE dataset\" )\n\n# Adding points\npoints( 1:length(defect_counts), inspected_units, type = \"b\",\n lty = 2, lwd = 1, pch = 21, col = \"gray55\" )\n# Adding legend\nlegend( \"topleft\", legend=c(expression(paste(s[i])), expression(paste(x[i]/s[i])) ), bty = \"n\",\n cex = 0.8, lty = c(2, 1), lwd = 1, col = c (\"gray55\", \"black\") , pch = c(21, 16) )\n# Adding axis with names\naxis(1) ; axis(2) ; axis(4, col.axis = \"gray55\", col = \"gray55\")\nmtext( \"Number of Defects \\n per unit\", side = 2, line = 2.2, cex = 0.9 )\nmtext( \"Inspected units\", side = 4, line = 2, cex = 0.9, col = \"gray55\" )\n# Resetting graphical paramaters\npar(graphpar)\n\n\n\n"} {"package":"bayespm","topic":"aPTT","snippet":"### Name: aPTT\n### Title: Dataset for PCC process for Normal with both parameters unknown\n### Aliases: aPTT\n\n### ** Examples\n\n# Loading data\nattach(aPTT)\n\n# Plotting data\nplot( 1:length(aPTT_current), aPTT_current, type = \"b\", lty = 1, xlab = \"\", ylab = \"\",\n ylim = c(27.3, 33.4), xlim = c(-30, 30), lwd = 1, pch = 16, axes = FALSE, yaxs = \"i\",\n main = \"aPTT dataset\" )\n\n# x - axis for historical and current data\npastx <- c( -30, -20, -10, 0 ) ; currentx <- c( 0, 10, 20, 30 )\n# Adding points\npoints( -length(aPTT_historical):(-1), aPTT_historical,\n type = \"b\", lty = 2, xlab = \"\", ylab = \"\", lwd = 1, pch = 21, col = \"gray55\" )\n# Adding axis with names\naxis(2)\nmtext( \"Current Data\", side = 1, at = 15, line = 2.2, cex = 1.1 )\nmtext( \"Historical Data\", side = 1, at = -15, line = 2.2, cex = 1.1, col = \"gray55\" )\nmtext( \"aPTT [sec]\", side = 2, line = 2.2, cex = 1.1 )\naxis( 1, at = currentx, labels = currentx )\naxis( 1, at = pastx, labels = pastx, col.axis = \"gray55\", col = \"gray55\", lty = 2 )\nsegments( 0, 27.5, 0, 33.5, lwd = 1 )\n\n\n\n"} {"package":"bayespm","topic":"betabinom_HM","snippet":"### Name: betabinom_HM\n### Title: The Highest Mass (HM) interval of Beta-Binomial distribution.\n### Aliases: betabinom_HM\n\n### ** Examples\n\nbetabinom_HM(0.95, 10, 20, 180, plot = TRUE)\n\n\n\n"} {"package":"bayespm","topic":"betanbinom_HM","snippet":"### Name: betanbinom_HM\n### Title: The Highest Mass (HM) interval of Beta-Negative Binomial\n### distribution.\n### Aliases: betanbinom_HM\n\n### ** Examples\n\nbetanbinom_HM(0.95, 5, 20, 80, plot = TRUE)\n\n\n\n"} {"package":"bayespm","topic":"binom_PCC","snippet":"### Name: binom_PCC\n### Title: PCC for Binomial data with probability parameter unknown\n### Aliases: binom_PCC\n\n### ** Examples\n\n# 30 Binomial observations introducing an outlier at the 15th observation\nset.seed(10)\nSimData <- rbinom( n = 30, size = 20, prob = 0.6 )\nSimData[15] <- round( SimData[15] + 3*sqrt(20*0.6*0.4) )\nbinom_PCC( SimData, n = rep(20, 30) )\n\n\n"} {"package":"bayespm","topic":"binom_PRC","snippet":"### Name: binom_PRC\n### Title: PRC for Binomial data with probability parameter unknown\n### Aliases: binom_PRC\n\n### ** Examples\n\n\n# the PRC process for the first 30 data points in the third application in\n# \"Design and properties of the Predictive Ratio Cusum (PRC) control charts\"\n\n\n### HISTORICAL DATA (FIRST BATCH)\nHD <- c( 3, 3, 1, 5, 2, 1, 3, 3, 3, 0, 2, 1, 2, 1, 4, 1, 1, 0, 3, 2, 4, 6, 0, 1, 3, 2, 2, 4, 2, 1 )\n\n### Bernoulli trials\nnn <- 50\n\nN0 <- length(HD)\nNN0 <- rep(50, N0)\n\nbinom_PRC(data = HD, n = NN0)\n\n\n# the PRC process for the last 10 data points in the third application in\n# \"Design and properties of the Predictive Ratio Cusum (PRC) control charts\"\n\n### prior parameters before the first batch\n\na0 <- 1/2\nb0 <- 1/2\n\n### posterior parameters after the first batch\n\nap <- sum(HD) + a0\nbp <- sum(NN0) - sum(HD) + b0\n\nhl = 4.332 # the decision limit is derived by the function binom_PRC_h\n\n### CURRENT DATA (SECOND BATCH)\nCD <- c(2, 4, 5, 2, 4, 8, 4, 4, 8, 5)\n\nN <- length(CD)\nNN <- rep(50, N)\n\nbinom_PRC(data = CD, n = NN, a0 = ap, b0 = bp, h = hl)\n\n\n\n"} {"package":"bayespm","topic":"binom_PRC_h","snippet":"### Name: binom_PRC_h\n### Title: Derivation of the decision limit for the PRC for Binomial data\n### with probability parameter unknown\n### Aliases: binom_PRC_h\n\n### ** Examples\n\n\nbinom_PRC_h(ARL_0 = NULL, FAP = 0.05, N = 20, n = 10, a0 = 20, b0 = 180)\n\n# derivation of the decision limit of the third application in\n# \"Design and properties of the Predictive Ratio Cusum (PRC) control charts\"\n\narl0 <- 400\nap <- 66.5\nbp <- 1434.5\nkl <- 2\n\n\n# To replicate results from application set 'ARL0tol = .001' and 'it = 1e4'\nbinom_PRC_h(ARL_0 = arl0, ARL0tol = .01, k = kl, n = 50, a0 = ap, b0 = bp, it = 1e3)\n\n\n\n\n\n"} {"package":"bayespm","topic":"compgamma_HD","snippet":"### Name: compgamma_HD\n### Title: The Highest Density (HD) interval of Compound Gamma\n### distribution.\n### Aliases: compgamma_HD\n\n### ** Examples\n\ncompgamma_HD(0.95, 2, 10, 10, plot = TRUE)\n\n\n\n"} {"package":"bayespm","topic":"gamma_PCC","snippet":"### Name: gamma_PCC\n### Title: PCC for Gamma data with rate parameter unknown\n### Aliases: gamma_PCC\n\n### ** Examples\n\n# 30 Gamma observations introducing an outlier of 3*sd at the 15th observation\nset.seed(100)\nout <- rgamma( n = 30, shape = 4, rate = 4 )\nout[15] <- out[15] + 1.5\ngamma_PCC( out, al = 4 )\n\n\n\n"} {"package":"bayespm","topic":"gb2_HD","snippet":"### Name: gb2_HD\n### Title: The Highest Density (HD) interval of Generalized Beta of the\n### second kind distribution.\n### Aliases: gb2_HD\n\n### ** Examples\n\ngb2_HD(0.95, 4, 6, 6, plot = TRUE)\n\n\n\n"} {"package":"bayespm","topic":"invgamma_PCC","snippet":"### Name: invgamma_PCC\n### Title: PCC for Inverse-Gamma data with scale parameter unknown\n### Aliases: invgamma_PCC\n\n### ** Examples\n\n# 30 Inverse-Gamma observations introducing an outlier at the 15th observation\nset.seed(100)\nSimData <- 1/rgamma(n = 30, shape = 3, rate = 2)\nSimData[15] <- SimData[15] + 3\ninvgamma_PCC(SimData, al = 3)\n\n\n"} {"package":"bayespm","topic":"lnorm1_PCC","snippet":"### Name: lnorm1_PCC\n### Title: PCC for LogNormal data with scale parameter unknown\n### Aliases: lnorm1_PCC\n\n### ** Examples\n\nset.seed(9)\nSimData <- rlnorm(n = 30, meanlog = 0, sdlog = 1/2)\nSimData[15] <- SimData[15] + 3*sqrt( exp(1/4)*( exp(1/4)-1 ) )\nplot(SimData)\nlnorm1_PCC(SimData, sdl = 1/2)\n\n\n"} {"package":"bayespm","topic":"lnorm2_PCC","snippet":"### Name: lnorm2_PCC\n### Title: PCC for LogNormal data with shape parameter unknown\n### Aliases: lnorm2_PCC\n\n### ** Examples\n\n# 30 LogNormal observations introducing an outlier at the 15th observation\nset.seed(1)\nSimData <- rlnorm(n = 30, meanlog = 0, sdlog = 1/2)\nSimData[15] <- SimData[15] + 3*sqrt( exp(1/4)*( exp(1/4)-1 ) )\nplot(SimData)\nlnorm2_PCC(SimData, ml = 0)\n\n\n"} {"package":"bayespm","topic":"lnorm3_PCC","snippet":"### Name: lnorm3_PCC\n### Title: PCC for LogNormal data with both parameters unknown\n### Aliases: lnorm3_PCC\n\n### ** Examples\n\n# 30 LogNormal observations introducing an outlier at the 15th observation\nssddll <- 1/2\nset.seed(9)\nSimData <- rlnorm( n = 30, meanlog = 0, sdlog = ssddll)\nSimData[15] <- SimData[15] + 3*sqrt( exp(ssddll^2)*( exp(ssddll^2)-1 ) )\nplot(SimData)\nlnorm3_PCC(SimData)\n\n\n"} {"package":"bayespm","topic":"lnorm_HD","snippet":"### Name: lnorm_HD\n### Title: The Highest Density (HD) interval of Lognormal distribution.\n### Aliases: lnorm_HD\n\n### ** Examples\n\nlnorm_HD(0.95, 0, 1/2, plot = TRUE)\n\n\n\n"} {"package":"bayespm","topic":"lt_HD","snippet":"### Name: lt_HD\n### Title: The Highest Density (HD) interval of Logt distribution.\n### Aliases: lt_HD\n\n### ** Examples\n\nlt_HD(0.95, 10, 0, 1/2, plot = TRUE)\n\n\n\n"} {"package":"bayespm","topic":"nbinom_HM","snippet":"### Name: nbinom_HM\n### Title: The Highest Mass (HM) interval of Beta-Negative Binomial\n### distribution.\n### Aliases: nbinom_HM\n\n### ** Examples\n\nnbinom_HM(0.95, 4, 0.2, plot = TRUE)\n\n\n\n"} {"package":"bayespm","topic":"nbinom_PCC","snippet":"### Name: nbinom_PCC\n### Title: PCC for Negative Binomial data with probability parameter\n### unknown\n### Aliases: nbinom_PCC\n\n### ** Examples\n\n# 30 Negative Binomial observations introducing an outlier at the 15th observation\nset.seed(5)\nSimData <- rnbinom(n = 30, size = 10, prob = 0.9)\nSimData[15] <- round( SimData[15] + 3*sqrt(10*0.1/(0.9^2)) )\nnbinom_PCC(SimData, rl = 10)\n\n\n"} {"package":"bayespm","topic":"norm1_PCC","snippet":"### Name: norm1_PCC\n### Title: PCC for Normal data with mean unknown\n### Aliases: norm1_PCC\n\n### ** Examples\n\n# 30 Normal observations introducing an outlier of 3*sd at the 15th observation\nset.seed(1234)\nout <- rnorm(30)\nout[15] <- out[15] + 3\nnorm1_PCC(out, sdl = 1)\n\n# Real data application\nattach(aPTT)\nnorm1_PCC(data = aPTT_current, historical_data = aPTT_historical, sdl = 0.57)\n\n\n"} {"package":"bayespm","topic":"norm2_PCC","snippet":"### Name: norm2_PCC\n### Title: PCC for Normal data with variance unknown\n### Aliases: norm2_PCC\n\n### ** Examples\n\n# 30 Normal observations introducing an outlier of 3*sd at the 15th observation\nset.seed(1234)\nout <- rnorm(30)\nout[15] <- out[15] + 3\nnorm2_PCC(out, ml = 0)\n\n# Real data application\nattach(aPTT)\nnorm2_PCC(data = aPTT_current, historical_data = aPTT_historical, ml = 30)\n\n\n"} {"package":"bayespm","topic":"norm3_PCC","snippet":"### Name: norm3_PCC\n### Title: PCC for Normal data with both parameters unknown\n### Aliases: norm3_PCC\n\n### ** Examples\n\n# 30 Normal observations introducing an outlier of 3*sd at the 15th observation\nset.seed(1234)\nout <- rnorm(30)\nout[15] <- out[15] + 3\nnorm3_PCC(out)\n\n# Real data application\nattach(aPTT)\nnorm3_PCC(data = aPTT_current, historical_data = aPTT_historical)\n\n\n"} {"package":"bayespm","topic":"norm_HD","snippet":"### Name: norm_HD\n### Title: The Highest Density (HD) interval of Normal distribution.\n### Aliases: norm_HD\n\n### ** Examples\n\nnorm_HD(0.95, mu = 10, sdv = 1/2, plot = TRUE)\n\n\n\n"} {"package":"bayespm","topic":"norm_mean2_PRC","snippet":"### Name: norm_mean2_PRC\n### Title: PRC for Normal data with unknown parameters (mean)\n### Aliases: norm_mean2_PRC\n\n### ** Examples\n\n\n\n# the PRC process for the first application in\n# \"Design and properties of the Predictive Ratio Cusum (PRC) control charts\"\n\n\n### CD: Current data (New reagent)\n### HD: Historical data (Previous reagent)\n\nCD <- c( 31.0, 30.0, 32.0, 28.0, 33.2, 33.2, 35.1, 35.1, 33.9, 37.9,\n 33.2, 36.5, 33.2, 35.1, 34.5, 36.5, 33.2, 35.1, 37.2, 32.6, 36.5 )\nHD <- c( 31, 30, 33, 30, 33, 30, 31, 32, 32, 30, 33, 31, 34, 31, 34, 34, 36, 30,\n 33, 29, 34, 32, 32, 28, 34, 32, 32, 30, 31, 29, 31, 29, 31, 32,34,34,32 )\n\nN <- length(CD)\nn0 <- length(HD)\n\n### initial prior parameters\n\nM0F <- 31.8\nLF <- 1/2\nAF <- 2\nBF <- 2.1^2\n\nnorm_mean2_PRC( data = CD, historical_data = HD, alpha_0 = 1/n0, mu0 = M0F,\n l0 = LF, a0 = AF, b0 = BF, h = 3.749, two.sided = TRUE )\n\n\n\n### a real data application to aPTT values\n\n### CURRENT DATA aPTT\nCD <- c( 29.0, 29.1, 28.7, 28.2, 28.0, 29.1, 28.6, 28.7, 28.6, 29.0, 28.4,\n 28.1, 28.8, 29.7, 28.8, 29.8, 28.8, 29.4, 28.4, 28.7, 28.7, 29.5,\n 28.5, 28.4, 28.1, 28.6, 28.2, 29.6, 28.9, 29.1, 29.0, 29.9, 28.6,\n 29.3, 28.2, 28.6, 27.6, 27.3, 28.7, 27.2, 28.4, 28.0, 28.4, 27.8,\n 28.4, 28.4, 27.7, 29.2, 27.5, 27.7)\n\n### HISTORICAL DATA aPTT\nHD <- c( 28.0, 28.9, 27.7, 29.3, 28.9, 29.5, 28.2, 27.5, 28.8, 28.9, 28.7,\n 27.4, 28.6, 28.5, 29.6, 28.7, 21.3, 29.4, 28.1, 28.9, 28.3, 27.6,\n 29.0, 29.2, 27.8, 29.1, 28.9, 29.4, 29.4, 28.9, 28.9, 29.2, 29.4,\n 29.4, 28.1, 28.5, 29.7, 29.3, 28.6, 29.2, 29.3, 29.3, 29.3, 30.0,\n 29.1, 29.1, 26.8, 29.0, 29.3, 28.3)\n\n\nnorm_mean2_PRC( data = CD, historical_data = HD, mu0 = 28.9,\n l0 = 1/4, a0 = 2, b0 = 0.49, two.sided = TRUE )\n\n\n\n\n"} {"package":"bayespm","topic":"norm_mean2_PRC_h","snippet":"### Name: norm_mean2_PRC_h\n### Title: Derivation of the decision limit for the PRC for Normal data\n### with unknown parameters (mean)\n### Aliases: norm_mean2_PRC_h\n\n### ** Examples\n\n\n# Derivation of the decision limit of the first application in\n# \"Design and properties of the Predictive Ratio Cusum (PRC) control charts\"\n\nCD <- c( 31.0, 30.0, 32.0, 28.0, 33.2, 33.2, 35.1, 35.1, 33.9, 37.9,\n 33.2, 36.5, 33.2, 35.1, 34.5, 36.5, 33.2, 35.1, 37.2, 32.6, 36.5 )\nHD <- c( 31, 30, 33, 30, 33, 30, 31, 32, 32, 30, 33, 31, 34, 31, 34, 34, 36, 30,\n 33, 29, 34, 32, 32, 28, 34, 32, 32, 30, 31, 29, 31, 29, 31, 32, 34, 34, 32 )\n\nN <- length(CD)\nn0 <- length(HD)\nPa0 <- 1/n0\nM0F <- 31.8\nLF <- 1/2\nAF <- 2\nBF <- 2.1^2\nM0F ; LF ; AF ; BF\n\n# To replicate results from application set 'it = 1e5'\nnorm_mean2_PRC_h( ARL_0 = NULL, FAP = 0.05, N = N, l0 = LF, a0 = AF,\n historical_data = HD, alpha_0 = Pa0, it = 1e4 )\n\n\n\n\n\n\n\n"} {"package":"bayespm","topic":"pois_PCC","snippet":"### Name: pois_PCC\n### Title: PCC for Poisson data with rate parameter unknown\n### Aliases: pois_PCC\n\n### ** Examples\n\n# 30 Poisson observations introducing an outlier at the 15th observation\nset.seed(1111)\nout <- rpois(n = 30, lambda = 4)\nout[15] <- out[15] + 6\npois_PCC(out)\n\n# Real data application\nattach(ECE)\npois_PCC(data = defect_counts, s = inspected_units)\n\n\n"} {"package":"bayespm","topic":"pois_PRC","snippet":"### Name: pois_PRC\n### Title: PRC for Poisson data with rate parameter unknown\n### Aliases: pois_PRC\n\n### ** Examples\n\n# the PRC process for the second application in\n# \"Design and properties of the Predictive Ratio Cusum (PRC) control charts\"\n\n### CURRENT DATA\nCD <- c(1, 0, 0, 0, 1, 0, 3, 3, 3, 2, 5, 5, 2, 4, 4, 3, 4, 3, 8, 3, 2, 2)\n\n### product exposures per million\n\nsn <- c( 0.206, 0.313, 0.368, 0.678, 0.974, 0.927, 0.814, 0.696, 0.659, 0.775, 0.731,\n 0.710, 0.705, 0.754, 0.682, 0.686, 0.763, 0.833, 0.738, 0.741, 0.843, 0.792 )\n\n# regular process\npois_PRC(data = CD, s = sn)\n\n# FIR process\npois_PRC(data = CD, s = sn, FIR = TRUE)\n\n\n\n"} {"package":"bayespm","topic":"pois_PRC_h","snippet":"### Name: pois_PRC_h\n### Title: Derivation of the decision limit for the PRC for Poisson data\n### with probability parameter unknown\n### Aliases: pois_PRC_h\n\n### ** Examples\n\n\n\n\npois_PRC_h(ARL_0 = 150, c0 = 40, d0 = 10, it = 1e3)\n\n\n\n\n"} {"package":"bayespm","topic":"t_HD","snippet":"### Name: t_HD\n### Title: The Highest Density (HD) interval of Student's t distribution.\n### Aliases: t_HD\n\n### ** Examples\n\nt_HD( 0.95, df = 2, mu = 2, sdv = 3, plot = TRUE )\n\n\n"} {"package":"GenoTriplo","topic":"Clustering","snippet":"### Name: Clustering\n### Title: Clustering function\n### Aliases: Clustering\n\n### ** Examples\n\ndata(GenoTriplo_to_clust)\nploidy=3\nres = Clustering(dataset=GenoTriplo_to_clust,\n nb_clust_possible=ploidy+1,n_iter=5)\n\n\n\n"} {"package":"GenoTriplo","topic":"Run_Clustering","snippet":"### Name: Run_Clustering\n### Title: Launch parallel clustering\n### Aliases: Run_Clustering\n\n### ** Examples\n\n\ndata(GenoTriplo_to_clust)\nres = Run_Clustering(data_clustering=GenoTriplo_to_clust,\n ploidy=3,n_iter=5,n_core=1)\n# or if you want to automatically save the result\n# This will automatically create a folder and save the result in it\n# Run_Clustering(data_clustering=GenoTriplo_to_clust,\n# ploidy=3,n_iter=5,n_core=1,save_n='exemple')\n\n\n\n\n"} {"package":"GenoTriplo","topic":"Run_Genotyping","snippet":"### Name: Run_Genotyping\n### Title: Launch genotyping phase in parallel\n### Aliases: Run_Genotyping\n\n### ** Examples\n\n## No test: \ndata(GenoTriplo_to_clust)\ndata(GenoTriplo_to_geno)\nres = Run_Genotyping(data_clustering=GenoTriplo_to_clust,\n res_clust=GenoTriplo_to_geno,\n ploidy=3)\n## End(No test)\n\n\n\n"} {"package":"prevR","topic":"Noptim","snippet":"### Name: Noptim\n### Title: Suggested optimal value for N\n### Aliases: Noptim\n### Keywords: stat\n\n### ** Examples\n\nNoptim(fdhs)\n\n\n\n"} {"package":"prevR","topic":"TMWorldBorders","snippet":"### Name: TMWorldBorders\n### Title: Dataset \"TM World Borders Dataset 0.3\".\n### Aliases: TMWorldBorders\n### Keywords: datasets spatial\n\n### ** Examples\n\nplot(TMWorldBorders[\"NAME\"])\n\n\n"} {"package":"prevR","topic":"as.data.frame.prevR","snippet":"### Name: as.data.frame.prevR\n### Title: Convert an object of class prevR into a data.frame.\n### Aliases: as.data.frame.prevR as.data.frame\n### Keywords: manip\n\n### ** Examples\n\nstr(fdhs)\nstr(as.data.frame(fdhs))\n## Not run: \n##D r.fdhs <- rings(fdhs, N = c(100, 200, 300))\n##D str(r.fdhs)\n##D str(as.data.frame(r.fdhs, clusters.only = TRUE))\n##D str(as.data.frame(r.fdhs))\n##D str(as.data.frame(r.fdhs, N = 300))\n## End(Not run)\n\n\n\n"} {"package":"prevR","topic":"as.prevR","snippet":"### Name: as.prevR\n### Title: Create an object of class prevR.\n### Aliases: as.prevR\n### Keywords: manip\n\n### ** Examples\n\ncol <- c(\n id = \"cluster\",\n x = \"x\",\n y = \"y\",\n n = \"n\",\n pos = \"pos\",\n c.type = \"residence\",\n wn = \"weighted.n\",\n wpos = \"weighted.pos\"\n)\ndhs <- as.prevR(fdhs.clusters, col, fdhs.boundary)\n\nstr(dhs)\nprint(dhs)\n\n\n"} {"package":"prevR","topic":"changeproj,prevR-method","snippet":"### Name: changeproj,prevR-method\n### Title: Convert map projection of a object of class prevR.\n### Aliases: changeproj,prevR-method changeproj changeproj-methods\n### Keywords: manip spatial\n\n### ** Examples\n\nprint(fdhs)\nplot(fdhs, axes = TRUE, main = \"Projection: longitude/latitude\")\n\nfdhs2 <- changeproj(\n fdhs,\n \"+proj=utm +zone=30 +ellps=WGS84 +datum=WGS84 +units=m +no_defs\"\n)\nprint(fdhs2)\nplot(fdhs2, axes = TRUE, main = \"Projection: UTM Zone 30\")\n\n\n\n"} {"package":"prevR","topic":"create.boundary","snippet":"### Name: create.boundary\n### Title: Provide national boundaries of a country.\n### Aliases: create.boundary\n### Keywords: manip spatial\n\n### ** Examples\n\n## Not run: \n##D boundary <- create.boundary()\n## End(Not run)\n## Don't show: \npar(ask = TRUE)\n## End(Don't show)\nboundary <- create.boundary(\"Burkina Faso\")\nboundary <- create.boundary(\"Burkina Faso\",\n proj = \"+proj=utm +zone=30 +ellps=WGS84 +datum=WGS84 +units=m +no_defs\"\n)\nboundary <- create.boundary(countries = c(\"Burkina Faso\", \"Ghana\", \"Benin\"))\n## Don't show: \npar(ask = FALSE)\n## End(Don't show)\n\n\n\n"} {"package":"prevR","topic":"export,prevR-method","snippet":"### Name: export,prevR-method\n### Title: Export an object of class prevR.\n### Aliases: export,prevR-method export-methods export\n### Keywords: manip spatial\n\n### ** Examples\n\n## Not run: \n##D export(fdhs, element = \"boundary\", file = \"area\")\n##D export(fdhs, element = \"clusters\", format = \"shp\", file = \"points\")\n##D \n##D dhs <- rings(fdhs, N = c(100, 300, 500))\n##D export(dhs, element = \"clusters\", format = \"csv\", N = 300, file = \"points\")\n## End(Not run)\n\n\n\n"} {"package":"prevR","topic":"fdhs","snippet":"### Name: fdhs\n### Title: Fictitious data generated by a DHS simulation.\n### Aliases: fdhs fdhs.boundary fdhs.clusters\n### Keywords: datasets\n\n### ** Examples\n\n## Not run: \n##D str(fdhs)\n##D str(fdhs.clusters)\n##D str(fdhs.boundary)\n##D demo(prevR)\n## End(Not run)\n\n\n"} {"package":"prevR","topic":"import.dhs","snippet":"### Name: import.dhs\n### Title: Import DHS data.\n### Aliases: import.dhs\n### Keywords: manip\n\n### ** Examples\n\n## Not run: \n##D imported_data <- import.dhs(\"data.sav\", \"gps.dbf\")\n## End(Not run)\n\n\n\n"} {"package":"prevR","topic":"is.prevR","snippet":"### Name: is.prevR\n### Title: Test if an object is of class prevR. This function test if the\n### class of an object is prevR. It could be used to test the slot\n### 'rings' or the slot 'boundary'.\n### Aliases: is.prevR\n### Keywords: class\n\n### ** Examples\n\ncol <- c(\n id = \"cluster\",\n x = \"x\",\n y = \"y\",\n n = \"n\",\n pos = \"pos\",\n c.type = \"residence\",\n wn = \"weighted.n\",\n wpos = \"weighted.pos\"\n)\ndhs <- as.prevR(fdhs.clusters, col, fdhs.boundary)\n\nis.prevR(dhs)\nis.prevR(dhs, \"rings\")\nis.prevR(dhs, \"boundary\")\n\ndhs <- rings(dhs, N = 300)\nis.prevR(dhs, \"rings\")\n\n\n\n"} {"package":"prevR","topic":"kde,prevR-method","snippet":"### Name: kde,prevR-method\n### Title: Kernel density estimation for prevR object.\n### Aliases: kde,prevR-method kde-methods kde\n### Keywords: smooth spatial\n\n### ** Examples\n\n## Not run: \n##D dhs <- rings(fdhs, N = c(100, 200, 300, 400, 500))\n##D \n##D prev.N300 <- kde(dhs, N = 300, nb.cells = 200)\n##D \n##D plot(prev.N300, lty = 0)\n##D \n##D library(ggplot2)\n##D ggplot(prev.N300) +\n##D aes(fill = k.wprev.N300.RInf) +\n##D geom_sf(colour = \"transparent\") +\n##D scale_fill_gradientn(colors = prevR.colors.red()) +\n##D theme_prevR_light()\n##D \n##D # Export k.wprev.N300.RInf surface in ASCII Grid\n##D r <- terra::rast(stars::st_rasterize(prev.N300))\n##D # writeRaster(r[[2]], \"kprev.N300.asc\")\n## End(Not run)\n\n\n\n"} {"package":"prevR","topic":"krige,ANY,prevR-method","snippet":"### Name: krige,ANY,prevR-method\n### Title: Spatial interpolation (kriging and inverse distance weighting)\n### for objects of class prevR.\n### Aliases: krige,ANY,prevR-method krige,prevR-method krige-methods krige\n### idw,ANY,prevR-method idw-methods idw,prevR-method idw\n### Keywords: smooth spatial\n\n### ** Examples\n\n ## Not run: \n##D dhs <- rings(fdhs, N = c(100,200,300,400,500))\n##D radius.N300 <- krige('r.radius', dhs, N = 300, nb.cells = 50)\n##D prev.krige <- krige(r.wprev ~ 1, dhs, N = c(100, 300, 500))\n##D \n##D plot(prev.krige, lty = 0)\n##D \n##D library(ggplot2)\n##D ggplot(prev.krige) +\n##D aes(fill = r.wprev.N300.RInf) +\n##D geom_sf(colour = \"transparent\") +\n##D scale_fill_gradientn(colors = prevR.colors.red()) +\n##D theme_prevR_light()\n##D \n##D # Export r.wprev.N300.RInf surface in ASCII Grid\n##D r <- terra::rast(stars::st_rasterize(prev.krige))\n##D # writeRaster(r[[2]], \"wprev.N300.asc\")\n##D \n## End(Not run)\n\n\n\n"} {"package":"prevR","topic":"make.grid.prevR","snippet":"### Name: make.grid.prevR\n### Title: Create a spatial grid from an object of class prevR.\n### Aliases: make.grid.prevR\n### Keywords: manip spatial\n\n### ** Examples\n\nmake.grid.prevR(fdhs)\nmake.grid.prevR(fdhs, nb.cells = 200)\n\n\n"} {"package":"prevR","topic":"plot,prevR,missing-method","snippet":"### Name: plot,prevR,missing-method\n### Title: Plot object of class prevR.\n### Aliases: plot,prevR,missing-method plot plot-methods plot,prevR-method\n### Keywords: hplot\n\n### ** Examples\n\n## Don't show: \npar(ask = TRUE)\n## End(Don't show)\nplot(fdhs, type = \"position\", main = \"position\", axes = TRUE)\nplot(fdhs, type = \"c.type\", main = \"c.type\")\nplot(fdhs, type = \"count\", main = \"count\", factor.size = 0.1)\nplot(fdhs, type = \"flower\", main = \"flower\")\n## Don't show: \npar(ask = FALSE)\n## End(Don't show)\n\n\n\n"} {"package":"prevR","topic":"prevR-class","snippet":"### Name: prevR-class\n### Title: Objects of class prevR.\n### Aliases: prevR-class\n### Keywords: classes\n\n### ** Examples\n\nshowClass(\"prevR\")\n\ncol <- c(\n id = \"cluster\",\n x = \"x\",\n y = \"y\",\n n = \"n\",\n pos = \"pos\",\n c.type = \"residence\",\n wn = \"weighted.n\",\n wpos = \"weighted.pos\"\n)\ndhs <- as.prevR(fdhs.clusters, col, fdhs.boundary)\nstr(dhs)\nprint(dhs)\n\n## Not run: \n##D dhs <- rings(fdhs, N = c(100, 300, 500))\n##D str(dhs)\n##D print(dhs)\n## End(Not run)\n\n\n"} {"package":"prevR","topic":"prevR-package","snippet":"### Name: prevR-package\n### Title: Estimating regional trends of a prevalence from a DHS.\n### Aliases: prevR-package\n### Keywords: package\n\n### ** Examples\n\n## Not run: \n##D par(ask = TRUE)\n##D # Creating an object of class prevR\n##D col <- c(\n##D id = \"cluster\",\n##D x = \"x\",\n##D y = \"y\",\n##D n = \"n\",\n##D pos = \"pos\",\n##D c.type = \"residence\",\n##D wn = \"weighted.n\",\n##D wpos = \"weighted.pos\"\n##D )\n##D dhs <- as.prevR(fdhs.clusters, col, fdhs.boundary)\n##D \n##D str(dhs)\n##D print(dhs)\n##D \n##D plot(dhs, main = \"Clusters position\")\n##D plot(dhs, type = \"c.type\", main = \"Clusters by residence\")\n##D plot(dhs, type = \"count\", main = \"Observations by cluster\")\n##D plot(dhs, type = \"flower\", main = \"Positive cases by cluster\")\n##D \n##D # Changing coordinates projection\n##D plot(dhs, axes = TRUE)\n##D dhs <- changeproj(\n##D dhs,\n##D \"+proj=utm +zone=30 +ellps=WGS84 +datum=WGS84 +units=m +no_defs\"\n##D )\n##D print(dhs)\n##D plot(dhs, axes = TRUE)\n##D \n##D # Calculating rings of equal number of observations for different values of N\n##D dhs <- rings(dhs, N = c(100, 200, 300, 400, 500))\n##D print(dhs)\n##D summary(dhs)\n##D \n##D # Prevalence surface for N=300\n##D prev.N300 <- kde(dhs, N = 300, nb.cells = 200)\n##D plot(\n##D prev.N300[\"k.wprev.N300.RInf\"],\n##D pal = prevR.colors.red,\n##D lty = 0,\n##D main = \"Regional trends of prevalence (N=300)\"\n##D )\n##D \n##D # Smoothing ring radii surface (spatial interpolation by kriging)\n##D radius.N300 <- krige(\"r.radius\", dhs, N = 300, nb.cells = 200)\n##D plot(\n##D radius.N300,\n##D pal = prevR.colors.blue,\n##D lty = 0,\n##D main = \"Radius of circle (N=300)\"\n##D )\n##D par(ask = FALSE)\n## End(Not run)\n\n\n"} {"package":"prevR","topic":"prevR.colors","snippet":"### Name: prevR.colors\n### Title: Continuous color palettes.\n### Aliases: prevR.colors prevR.colors.blue prevR.colors.blue.inverse\n### prevR.colors.gray prevR.colors.gray.inverse prevR.colors.green\n### prevR.colors.green.inverse prevR.colors.red prevR.colors.red.inverse\n### prevR.demo.pal prevR.colors.qgis.pal\n### Keywords: color\n\n### ** Examples\n\nprevR.demo.pal(25)\nprevR.colors.red(5)\ncol2rgb(prevR.colors.red(5))\n\n## Not run: \n##D prevR.colors.qgis.pal(\"palette.txt\", seq(0, 25, length.out = 100), \"red\")\n## End(Not run)\n\n\n\n"} {"package":"prevR","topic":"print,prevR-method","snippet":"### Name: print,prevR-method\n### Title: Summary of a prevR object.\n### Aliases: print,prevR-method print print-methods\n\n### ** Examples\n\nprint(fdhs)\n## Not run: \n##D dhs <- rings(fdhs, N = c(100, 300, 500))\n##D print(dhs)\n## End(Not run)\n\n\n\n"} {"package":"prevR","topic":"quick.prevR","snippet":"### Name: quick.prevR\n### Title: Quick prevR analysis and plot\n### Aliases: quick.prevR\n### Keywords: plot smooth spatial\n\n### ** Examples\n\n## Not run: \n##D quick.prevR(fdhs)\n## End(Not run)\n\n\n\n"} {"package":"prevR","topic":"rings,prevR-method","snippet":"### Name: rings,prevR-method\n### Title: Calculation of rings of equal number of observation and/or equal\n### radius.\n### Aliases: rings,prevR-method rings rings-methods\n### Keywords: math spatial\n\n### ** Examples\n\n## Not run: \n##D print(fdhs)\n##D dhs <- rings(fdhs, N = c(100, 200, 300, 400, 500))\n##D print(dhs)\n## End(Not run)\n\n\n"} {"package":"prevR","topic":"show,prevR-method","snippet":"### Name: show,prevR-method\n### Title: Summary of a prevR object.\n### Aliases: show,prevR-method show show-methods\n\n### ** Examples\n\nfdhs\n## Not run: \n##D dhs <- rings(fdhs, N = c(100, 300, 500))\n##D dhs\n## End(Not run)\n\n\n\n"} {"package":"prevR","topic":"summary,prevR-method","snippet":"### Name: summary,prevR-method\n### Title: Detailed summary of the variables of a prevR object\n### Aliases: summary,prevR-method summary-methods summary prevRsummary\n\n### ** Examples\n\nsummary(fdhs)\n## Not run: \n##D dhs <- rings(fdhs, N = c(100, 300, 500))\n##D summary(dhs)\n##D summary(dhs, c(0, 0.25, 0.5, 0.75, 1))\n## End(Not run)\n\n\n\n"} {"package":"prevR","topic":"xyz2dataframe","snippet":"### Name: xyz2dataframe\n### Title: Convert a surface in xyz to a data frame.\n### Aliases: xyz2dataframe\n### Keywords: manip spatial\n\n### ** Examples\n\nx <- matrix(c(2, 4, 6, 8, 10, 2, 4, 6, 8, 10), ncol = 2)\nop <- KernSmooth::bkde2D(x, bandwidth = 1)\nstr(op)\n\nop.df <- xyz2dataframe(op)\nstr(op.df)\n\n\n\n"} {"package":"HydroPortailStats","topic":"Generate","snippet":"### Name: Generate\n### Title: Random numbers generator\n### Aliases: Generate\n\n### ** Examples\n\nGenerate('Normal',c(0,1),10)\nGenerate('GEV',c(100,25,-0.2),10)\nGenerate('GEV',c(100,25,0.2),10)\nGenerate('Poisson',0.75,10)\n\n\n"} {"package":"HydroPortailStats","topic":"GetCdf","snippet":"### Name: GetCdf\n### Title: Cumulative Distribution Function (cdf)\n### Aliases: GetCdf\n\n### ** Examples\n\nGetCdf(0,'Normal',c(0,1))\nGetCdf(200,'GEV',c(100,25,-0.2))\nGetCdf(200,'GEV',c(100,25,0.2))\nGetCdf(3,'Poisson',0.75)\n\n\n"} {"package":"HydroPortailStats","topic":"GetEmpFreq","snippet":"### Name: GetEmpFreq\n### Title: Empirical nonexceedance frequency\n### Aliases: GetEmpFreq\n\n### ** Examples\n\nGetEmpFreq(i=1:10,n=10)\nGetEmpFreq(i=1:10,n=10,formula='Standard')\nGetEmpFreq(i=1:10,n=10,formula='MinusOne')\nGetEmpFreq(i=1:10,n=10,formula='Cunnane')\n\n\n"} {"package":"HydroPortailStats","topic":"GetEstimate_BAY","snippet":"### Name: GetEstimate_BAY\n### Title: Bayesian estimation of a distribution\n### Aliases: GetEstimate_BAY\n\n### ** Examples\n\ny=c(9.2,9.5,11.4,9.5,9.4,9.6,10.5,11.1,10.5,10.4)\nprior1=list(dist='FlatPrior',par=NULL)\nprior2=list(dist='LogNormal',par=c(1,1))\nprior3=list(dist='Normal',par=c(0,0.25))\nprior=list(prior1,prior2,prior3)\npar0=GetEstimate_ROUGH(y,'GEV')$par\nmcmc=GetEstimate_BAY(y,'GEV',prior,par0,batch.length=50,batch.n=50)\ngraphicalpar=par(mfrow=c(2,3))\nplot(mcmc$x[,1],type='l'); plot(mcmc$x[,2],type='l'); plot(mcmc$x[,3],type='l')\nhist(mcmc$x[,1]); hist(mcmc$x[,2]); hist(mcmc$x[,3])\npar(graphicalpar)\n\n\n"} {"package":"HydroPortailStats","topic":"GetEstimate_HYDRO2","snippet":"### Name: GetEstimate_HYDRO2\n### Title: Hydro2 estimate of a distribution\n### Aliases: GetEstimate_HYDRO2\n\n### ** Examples\n\ny=c(9.2,9.5,11.4,9.5,9.4,9.6,10.5,11.1,10.5,10.4)\nGetEstimate_HYDRO2(y,'Normal')\nGetEstimate_HYDRO2(y,'LogNormal')\nGetEstimate_HYDRO2(y,'Gumbel')\nGetEstimate_HYDRO2(y,'GEV')\nGetEstimate_HYDRO2(y,'Poisson')\n\n\n"} {"package":"HydroPortailStats","topic":"GetEstimate_LMOM","snippet":"### Name: GetEstimate_LMOM\n### Title: L-Moment estimate of a distribution\n### Aliases: GetEstimate_LMOM\n\n### ** Examples\n\ny=c(9.2,9.5,11.4,9.5,9.4,9.6,10.5,11.1,10.5,10.4)\nGetEstimate_LMOM(y,'Normal')\nGetEstimate_LMOM(y,'LogNormal')\nGetEstimate_LMOM(y,'Gumbel')\nGetEstimate_LMOM(y,'GEV')\nGetEstimate_LMOM(y,'Poisson')\n\n\n"} {"package":"HydroPortailStats","topic":"GetEstimate_ML","snippet":"### Name: GetEstimate_ML\n### Title: Maximum-likelihood estimate of a distribution\n### Aliases: GetEstimate_ML\n\n### ** Examples\n\ny=c(9.2,9.5,11.4,9.5,9.4,9.6,10.5,11.1,10.5,10.4)\nGetEstimate_ML(y,'Normal')\nGetEstimate_ML(y,'LogNormal')\nGetEstimate_ML(y,'Gumbel')\nGetEstimate_ML(y,'Gumbel',par0=GetEstimate_ROUGH(y,'Gumbel')$par)\nGetEstimate_ML(y,'GEV',par0=GetEstimate_ROUGH(y,'GEV')$par)\nGetEstimate_ML(y,'Poisson')\n\n\n"} {"package":"HydroPortailStats","topic":"GetEstimate_MOM","snippet":"### Name: GetEstimate_MOM\n### Title: Moment estimate of a distribution\n### Aliases: GetEstimate_MOM\n\n### ** Examples\n\ny=c(9.2,9.5,11.4,9.5,9.4,9.6,10.5,11.1,10.5,10.4)\nGetEstimate_MOM(y,'Normal')\nGetEstimate_MOM(y,'LogNormal')\nGetEstimate_MOM(y,'Gumbel')\nGetEstimate_MOM(y,'GEV')\nGetEstimate_MOM(y,'Poisson')\n\n\n"} {"package":"HydroPortailStats","topic":"GetEstimate_ROUGH","snippet":"### Name: GetEstimate_ROUGH\n### Title: Rough estimate of a distribution\n### Aliases: GetEstimate_ROUGH\n\n### ** Examples\n\ny=c(9.2,9.5,11.4,9.5,9.4,9.6,10.5,11.1,10.5,10.4)\nGetEstimate_ROUGH(y,'Normal')\nGetEstimate_ROUGH(y,'LogNormal')\nGetEstimate_ROUGH(y,'Gumbel')\nGetEstimate_ROUGH(y,'GEV')\nGetEstimate_ROUGH(y,'Poisson')\n\n\n"} {"package":"HydroPortailStats","topic":"GetParFeas","snippet":"### Name: GetParFeas\n### Title: Parameter feasibility\n### Aliases: GetParFeas\n\n### ** Examples\n\n# Feasible\nGetParFeas('Normal',c(0,1))\n# Not feasible because second parameter (standard deviation) is negative\nGetParFeas('Normal',c(0,-1))\n\n\n"} {"package":"HydroPortailStats","topic":"GetParName","snippet":"### Name: GetParName\n### Title: Parameter names.\n### Aliases: GetParName\n\n### ** Examples\n\nGetParName('Normal')\nGetParName('GEV')\nGetParName('GEV',lang='en')\n\n\n"} {"package":"HydroPortailStats","topic":"GetParNumber","snippet":"### Name: GetParNumber\n### Title: Number of parameters.\n### Aliases: GetParNumber\n\n### ** Examples\n\nGetParNumber('Normal')\nGetParNumber('GEV')\n\n\n"} {"package":"HydroPortailStats","topic":"GetPdf","snippet":"### Name: GetPdf\n### Title: Probability Density Function (pdf)\n### Aliases: GetPdf\n\n### ** Examples\n\nGetPdf(0,'Normal',c(0,1))\nGetPdf(200,'GEV',c(100,25,-0.2))\nGetPdf(200,'GEV',c(100,25,0.2))\nGetPdf(3,'Poisson',0.75)\n\n\n"} {"package":"HydroPortailStats","topic":"GetQfromT","snippet":"### Name: GetQfromT\n### Title: Get quantile from return period\n### Aliases: GetQfromT\n\n### ** Examples\n\ny=stats::rnorm(50)\nH3=Hydro3_Estimation(y,'Normal')\nGetQfromT(100,H3)\n\n\n"} {"package":"HydroPortailStats","topic":"GetQuantile","snippet":"### Name: GetQuantile\n### Title: Quantile Function\n### Aliases: GetQuantile\n\n### ** Examples\n\nGetQuantile(0.99,'Normal',c(0,1))\nGetQuantile(0.99,'GEV',c(100,25,-0.2))\nGetQuantile(0.99,'GEV',c(100,25,0.2))\nGetQuantile(0.99,'Poisson',0.75)\n\n\n"} {"package":"HydroPortailStats","topic":"GetReducedVariate","snippet":"### Name: GetReducedVariate\n### Title: Reduced variate\n### Aliases: GetReducedVariate\n\n### ** Examples\n\nGetReducedVariate(0.99,'Normal')\nGetReducedVariate(0.99,'Gumbel')\nGetReducedVariate(0.99,'GEV')\nGetReducedVariate(0.99,'Poisson')\n\n\n"} {"package":"HydroPortailStats","topic":"GetTfromQ","snippet":"### Name: GetTfromQ\n### Title: Get return period from value\n### Aliases: GetTfromQ\n\n### ** Examples\n\ny=stats::rnorm(50)\nH3=Hydro3_Estimation(y,'Normal')\nGetTfromQ(3,H3)\n\n\n"} {"package":"HydroPortailStats","topic":"GetUncertainty_ML","snippet":"### Name: GetUncertainty_ML\n### Title: Maximum-likelihood estimation of uncertainty\n### Aliases: GetUncertainty_ML\n\n### ** Examples\n\ny=c(9.2,9.5,11.4,9.5,9.4,9.6,10.5,11.1,10.5,10.4)\nestim=GetEstimate_ML(y,'Gumbel',par0=GetEstimate_ROUGH(y,'Gumbel')$par)\nGetUncertainty_ML(y,'Gumbel',par=estim$par)\n\n\n"} {"package":"HydroPortailStats","topic":"Hydro3_Estimation","snippet":"### Name: Hydro3_Estimation\n### Title: Hydro3 estimation\n### Aliases: Hydro3_Estimation\n\n### ** Examples\n\ny=stats::rnorm(50)\nH3=Hydro3_Estimation(y,'Normal')\nH3=Hydro3_Estimation(y,'GEV',Emeth='ML',Umeth='ML')\n\n\n"} {"package":"HydroPortailStats","topic":"Hydro3_Plot","snippet":"### Name: Hydro3_Plot\n### Title: Hydro3 plot\n### Aliases: Hydro3_Plot\n\n### ** Examples\n\ny=stats::rnorm(50)\nH3=Hydro3_Estimation(y,'Normal')\nHydro3_Plot(H3)\n\n\n"} {"package":"HydroPortailStats","topic":"KS","snippet":"### Name: KS\n### Title: Kolmogorov-Smirnov Test\n### Aliases: KS\n\n### ** Examples\n\ny=stats::rnorm(20)\nKS(y,'Normal',c(0,1))\nKS(y,'Normal',c(1,1))\nKS(y,'Gumbel',c(0,1))\n\n\n"} {"package":"HydroPortailStats","topic":"MK","snippet":"### Name: MK\n### Title: Mann-Kemdall Test\n### Aliases: MK\n\n### ** Examples\n\ny=stats::rnorm(50)\nMK(y)\ny=y+0.1*(1:length(y))\nMK(y)\n\n\n"} {"package":"HydroPortailStats","topic":"Metropolis_OAAT","snippet":"### Name: Metropolis_OAAT\n### Title: One-At-A-Time Metropolis sampler\n### Aliases: Metropolis_OAAT\n\n### ** Examples\n\n# Bivariate target distribution: beta(0.8,0.4) X exp(1)\nf=function(x){stats::dbeta(x[1],0.8,0.4,log=TRUE)+stats::dexp(x[2],log=TRUE)}\nx0=c(0.5,2)\nsdjump=c(0.5,1)\nmcmc=Metropolis_OAAT(f,x0,1000,sdjump)\ngraphicalpar=par(mfrow=c(1,3))\nplot(mcmc$x);hist(mcmc$x[,1]); hist(mcmc$x[,2])\npar(graphicalpar)\n\n\n"} {"package":"HydroPortailStats","topic":"Metropolis_OAAT_adaptive","snippet":"### Name: Metropolis_OAAT_adaptive\n### Title: Adaptive One-At-A-Time Metropolis sampler\n### Aliases: Metropolis_OAAT_adaptive\n\n### ** Examples\n\n# Bivariate target distribution: beta(0.8,0.4) X exp(1)\nf=function(x){stats::dbeta(x[1],0.8,0.4,log=TRUE)+stats::dexp(x[2],log=TRUE)}\nx0=c(0.5,2)\nsdjump=c(0.5,1)\nmcmc=Metropolis_OAAT_adaptive(f,x0,sdjump)\ngraphicalpar=par(mfrow=c(1,3))\nplot(mcmc$x);hist(mcmc$x[,1]); hist(mcmc$x[,2])\npar(graphicalpar)\n\n\n"} {"package":"HydroPortailStats","topic":"Metropolis_OAAT_jump","snippet":"### Name: Metropolis_OAAT_jump\n### Title: One-At-A-Time Metropolis sampler\n### Aliases: Metropolis_OAAT_jump\n\n### ** Examples\n\n# Bivariate target distribution: beta(2,10) X exp(1)\nf=function(x){stats::dbeta(x[1],2,10,log=TRUE)+stats::dexp(x[2],log=TRUE)}\nx0=c(0.5,0.5)\nfx0=f(x0)\nsdjump=c(0.1,0.1)\nMetropolis_OAAT_jump(f,x0,fx0,sdjump)\n\n\n"} {"package":"HydroPortailStats","topic":"Pettitt","snippet":"### Name: Pettitt\n### Title: Pettitt Test\n### Aliases: Pettitt\n\n### ** Examples\n\ny=stats::rnorm(50)\nPettitt(y)\ny[26:50]=y[26:50]+2\nPettitt(y)\n\n\n"} {"package":"plotfunctions","topic":"addInterval","snippet":"### Name: addInterval\n### Title: Draw intervals or arrows on plots.\n### Aliases: addInterval\n\n### ** Examples\n\nemptyPlot(1000,5, xlab='Time', ylab='Y')\n# add interval indication for Time=200 to Time=750:\naddInterval(1, 200, 750, lwd=2, col='red')\n\n# zero-length intervals also should work:\naddInterval(pos=521, lowVals=c(1.35, 1.5, 4.33), highVals=c(1.15,1.5, 4.05),\n horiz=FALSE, length=.1, lwd=4)\n\n# combine with getCoords for consistent positions with different axes:\npar(mfrow=c(2,2))\n# 1st plot:\nemptyPlot(1000,c(-1,5), h0=0)\naddInterval(getCoords(.1,side=2), 200,800, \n col='red', lwd=2)\naddInterval(getCoords(.5,side=1), 1,4, horiz=FALSE,\n col='blue', length=.15, angle=100, lwd=4)\nabline(h=getCoords(.1, side=2), lty=3, col='red', xpd=TRUE)\nabline(v=getCoords(.5, side=1), lty=3, col='blue', xpd=TRUE)\n# 2nd plot:\nemptyPlot(1000,c(-250, 120), h0=0)\naddInterval(getCoords(.1,side=2), 750,1200, \n col='red', lwd=2, minmax=c(0,1000))\nabline(h=getCoords(.1, side=2), lty=3, col='red', xpd=TRUE)\n# 3rd plot:\nemptyPlot(c(-50,50),c(20,120), h0=0)\naddInterval(getCoords(.5,side=1), 80,120, horiz=FALSE,\n col='blue', code=2, length=.15, lwd=4, lend=1)\nabline(v=getCoords(.5, side=1), lty=3, col='blue', xpd=TRUE)\n\n# Alternative boxplots: \nb <- boxplot(count ~ spray, data = InsectSprays, plot=FALSE)$stats\nemptyPlot(c(1,6), range(b[c(1,5),]), h0=0)\naddInterval(1:6, b[1,], b[5,], horiz=FALSE)\n# no end lines:\naddInterval(1:6, b[2,], b[4,], horiz=FALSE, lwd=8, length=0, lend=2)\n# no error with zero-length intervals:\naddInterval(1:6, b[3,], b[3,], horiz=FALSE, lwd=2, length=.1, lend=2)\n\n# reset\npar(mfrow=c(1,1))\n\n\n"} {"package":"plotfunctions","topic":"add_bars","snippet":"### Name: add_bars\n### Title: Adding bars to an existing plot.\n### Aliases: add_bars\n\n### ** Examples\n\n# hypothetical experiment:\nadults = stats::rpois(100, lambda = 5)\nchildren = stats::rpois(100, lambda = 4)\nnewd <- data.frame(Adults = table( factor(adults, levels=0:15) ),\n Children = table( factor(children, levels=0:15) ) )\nnewd <- newd[,c(1,2,4)]\nnames(newd)[1] <- 'value'\n\n# barplot of Adults:\nb <- barplot(newd$Adults.Freq, beside=TRUE, names.arg=newd$value, \n border=NA, ylim=c(0,30))\n# overlay Children measures:\nadd_bars(b, newd$Children.Freq, col='red', density=25, xpd=TRUE)\n\n# variants:\nb <- barplot(newd$Adults.Freq, beside=TRUE, names.arg=newd$value, \n border=NA, ylim=c(0,30))\nadd_bars(b+.1, newd$Children.Freq, width=.85, col=alpha('red'), \n border=NA, xpd=TRUE)\n\nemptyPlot(c(-30,30), c(0,15), v0=0, ylab='Condition')\nadd_bars(-1*newd$Children.Freq, 0:15, y0=0, col=alpha('blue'), \n border='blue', horiz=TRUE)\nadd_bars(newd$Adults.Freq, 0:15, y0=0, col=alpha('red'), \n border='red', horiz=TRUE)\nmtext(c('Children', 'Adults'), side=3, at=c(-15,15), line=1, cex=1.25, \n font=2)\n\n# adding shadow:\nb <- barplot(newd$Adults.Freq, beside=TRUE, names.arg=newd$value, \n width=.9, \n col='black', border=NA)\nadd_bars(b+.2, newd$Adults.Freq+.2, y0=.2, width=.9, \n col=alpha('black', f=.2), border=NA, xpd=TRUE)\n\n\n\n"} {"package":"plotfunctions","topic":"add_n_points","snippet":"### Name: add_n_points\n### Title: Add groups of points to a plot\n### Aliases: add_n_points\n\n### ** Examples\n\n\ns <- table(cars$speed)\nd <- tapply(cars$dist, list(cars$speed), mean)\n\nemptyPlot(range(as.numeric(names(s))), range(d), \n xlab='dist', ylab='mean speed')\nadd_n_points(as.numeric(names(s)), d, s, pch='*')\n\n# decrease space between groups of points:\nemptyPlot(range(as.numeric(names(s))), range(d), \n xlab='dist', ylab='mean speed')\nadd_n_points(as.numeric(names(s)), d, s, sep=0)\n\n# decrease width of groups of points:\nemptyPlot(range(as.numeric(names(s))), range(d), \n xlab='dist', ylab='mean speed')\nadd_n_points(as.numeric(names(s)), d, s, width=0.8)\n\n# horizontal vs vertical:\nemptyPlot(range(d),range(as.numeric(names(s))), \n ylab='dist', xlab='mean speed')\nadd_n_points(d, as.numeric(names(s)), s, horiz=FALSE) \n\n\n\n"} {"package":"plotfunctions","topic":"alpha","snippet":"### Name: alpha\n### Title: Adjusting the transparency of colors.\n### Aliases: alpha\n\n### ** Examples\n\nemptyPlot(100,100, h=50, v=50)\nrect(25,25,75,75, col=alpha('red',f=1))\nrect(35,41,63,81, col=alpha(rgb(0,1,.5),f=.25), \n border=alpha(rgb(0,1,.5), f=.65), lwd=4)\n\nemptyPlot(1,1, axes=FALSE, main='Tunnel of 11 squares')\ncenter <- c(.75, .25)\nmycol <- 'steelblue'\nfor(i in seq(0,1,by=.1)){\n rect(center[1]-center[1]*(1.1-i), center[2]-center[2]*(1.1-i), \n center[1]+(1-center[1])*(1.1-i), center[2]+(1-center[2])*(1.1-i), \n col=alpha(mycol, f=i), border=mycol, lty=1, lwd=.5, xpd=TRUE)\n}\naxis(1, at=center[1]-center[1]*(1.1-seq(0,1,by=.1)), labels=seq(0,1,by=.1))\n\n# see alphaPalette for an elaboration of this example\n\n\n\n"} {"package":"plotfunctions","topic":"alphaPalette","snippet":"### Name: alphaPalette\n### Title: Manipulate the transparency in a palette.\n### Aliases: alphaPalette\n\n### ** Examples\n\n# a palette of 5 white transparent colors:\nalphaPalette('white', f.seq=1:5/5)\n# the same palette:\nalphaPalette('white', f.seq=c(.2,1), n=5)\n# a palette with 10 colors blue, yellow and red, that differ in transparency\nalphaPalette(c('blue', 'yellow', 'red'), f.seq=c(0.1,.8), n=10)\n\nemptyPlot(1,1, axes=FALSE, main='Tunnel of 11 squares')\nmycol <- 'steelblue'\ncenter <- c(.75, .25)\ni = seq(0,1,by=.1)\nfillcol <- alphaPalette(c(mycol, 'black'), f.seq=i)\nlinecol <- alphaPalette(mycol, f.seq=1-i)\nrect(center[1]-center[1]*(1.1-i), center[2]-center[2]*(1.1-i), \n center[1]+(1-center[1])*(1.1-i), center[2]+(1-center[2])*(1.1-i), \n col=fillcol, border=linecol, lty=1, lwd=1, xpd=TRUE)\n\n\n\n"} {"package":"plotfunctions","topic":"check_normaldist","snippet":"### Name: check_normaldist\n### Title: Compare distribution of data with normal distribution.\n### Aliases: check_normaldist\n\n### ** Examples\n\nset.seed(123)\n# normal distribution:\ntest <- rnorm(1000)\ncheck_normaldist(test)\n# t-distribution:\ntest <- rt(1000, df=5)\ncheck_normaldist(test)\n# skewed data, e.g., reaction times:\ntest <- exp(rnorm(1000, mean=.500, sd=.25))\ncheck_normaldist(test)\n# center first:\ncheck_normaldist(scale(test))\n# binomial distribution:\ntest <- rbinom(1000, 1, .3)\ncheck_normaldist(test)\n# count data:\ntest <- rbinom(1000, 100, .3)\ncheck_normaldist(test)\n\n\n"} {"package":"plotfunctions","topic":"color_contour","snippet":"### Name: color_contour\n### Title: Creates a contour plot with colored background.\n### Aliases: color_contour\n\n### ** Examples\n\n\n# Volcano example of R (package datasets)\ncolor_contour(z=volcano)\n# change color and lines:\ncolor_contour(z=volcano, color='terrain', col=alpha(1), lwd=2, lty=5)\n# change x-axis values and zlim:\ncolor_contour(x=seq(500,700, length=nrow(volcano)),\n z=volcano, color='terrain', col=alpha(1), lwd=2, zlim=c(0,200))\n\n# compare with similar functions:\nfilled.contour(volcano, color.palette=terrain.colors)\n\n# without contour lines:\ncolor_contour(z=volcano, color='terrain', lwd=0, drawlabels=FALSE)\n# without background:\ncolor_contour(z=volcano, color=NULL, add.color.legend=FALSE)\n\n\n"} {"package":"plotfunctions","topic":"convertFile","snippet":"### Name: convertFile\n### Title: Replacing separators (for example, decimal and thousand\n### separators).\n### Aliases: convertFile\n\n### ** Examples\n\n## Not run: \n##D # normally, the function call would look something like this:\n##D convertFile('example1.csv', symbol1=',', symbol2='.', sep='\\t', \n##D newsymbol1='.', newsymbol2='')\n##D # But as we are not sure that the file example1.csv is available,\n##D # we need to do something a little more complicated to point to \n##D # the file 'example1.csv' that comes with the package:\n##D \n##D # finding one of the example files from the package:\n##D file1 <- system.file('extdata', 'example1.csv', package = 'plotfunctions')\n##D \n##D # example 1: \n##D system.time({\n##D convertFile(file1, symbol1=',', symbol2='.', \n##D newsymbol1='.', newsymbol2='', outputfile='example1_new.csv')\n##D })\n##D # example 2: type 'yes' to overwrite the previous output file, \n##D # or specify a different filename in outputfile.\n##D system.time({\n##D convertFile(file1, symbol1=',', symbol2='.', sep='\\t', \n##D newsymbol1='.', newsymbol2='', columns=1:2, outputfile='example1_new.csv')\n##D })\n##D # Example 1 takes less time, as it does not use read.table, \n##D # but just reads the file as text lines. However, the column \n##D # version could be useful when symbols should be replaced only \n##D # in specific columns.\n##D # Note that Example 2 writes the output with quotes, but this is \n##D # not a problem for read.table:\n##D dat <- read.table('example1_new.csv', header=TRUE, sep='\\t', \n##D stringsAsFactors=FALSE)\n## End(Not run)\n\n\n"} {"package":"plotfunctions","topic":"dotplot_error","snippet":"### Name: dotplot_error\n### Title: Utility function\n### Aliases: dotplot_error\n\n### ** Examples\n\n\n# example InsectSprays from R datasets\navg <- aggregate(count ~ spray, data=InsectSprays, mean)\navg <- merge(avg, \n aggregate(count ~ spray, data=InsectSprays, sd),\n by='spray', all=TRUE)\n\ndotplot_error(avg$count.x, se.val=avg$count.y, labels=avg$spray)\n\n# we could add the type of spray to the averages:\navg$type <- c(1,1,2,2,2,1)\ndotplot_error(avg$count.x, se.val=avg$count.y, groups=avg$type, labels=avg$spray) \n\n\n\n"} {"package":"plotfunctions","topic":"drawDevArrows","snippet":"### Name: drawDevArrows\n### Title: Draw arrows between different plots.\n### Aliases: drawDevArrows\n\n### ** Examples\n\n\n### EXAMPLE 1 ################################\n\n# setup 4 panels:\npar(mfrow=c(2,2))\n\n#------------------\n# PLOT 1: two points\n#------------------\n\nplot(0.5, 0.5, main='1', \n pch=21, lwd=3, col='red', bg='white', cex=1.2)\npoints(.5, .375, pch=22, lwd=3, col='blue', cex=1.2)\n\n# Draw an error between the two points:\ndrawDevArrows(start=c(.5,.5), end=c(.5,.375), \n units='coords', arrows='start', length=.1, lty=1)\n# ... which is the same as using arrows:\narrows(x0=.5, x1=.5, y0=.5, y1=.375, code=1, length=.1, lty=1)\n\n# ... but these arrows can also be clipped to the device \n# instead of the plot region (see leftbottom corner):\ndrawDevArrows(start=c(.5,.5), end=c(.5,.375), \n units='dev', arrows='start', length=.1, lty=1)\n\n# The function getArrowPos converts coordinates to device coordinates:\nx1 <- getArrowPos(x=0.5, y=0.5, units='coords')\nx2 <- getArrowPos(x=0.5, y=0.375, units='coords')\ndrawDevArrows(x1, x2, col='purple',\n arrows='start', length=.1, lty=2, lwd=2)\n\n\n# Setup 4 arrows with the same starting points, \n# but defined differently:\na1 <- getArrowPos(x=0.5, y=0.375, units='coords')\na2 <- getArrowPos(x=0.5, y=0.21, units='prop')\na3 <- getArrowPos(x=0.55, y=0.36, units='prop', dev='fig')\na4 <- getArrowPos(x=0.5*0.55, y=.5*0.36+.5, units='prop', dev='dev')\n\n# Setup 3 arrows with the same x and y values, \n# which define different starting points in practice:\nb1 <- getArrowPos(x=.5, y=.5, units='prop', dev='plot')\nb2 <- getArrowPos(x=.5, y=.5, units='prop', dev='fig')\nb3 <- getArrowPos(x=.5, y=.5, units='prop', dev='dev')\n\n\n#------------------\n# PLOT 2: different coordinates\n#------------------\n\nplot(c(-2.33, 20), c(.3, .8), type='n', main='2')\npoints(15,.8, pch=21, lwd=3, col='red', bg='white', cex=1.2)\n\n# define end point for b:\nb <- getArrowPos(x=15, y=.8)\n\n# Draw arrow b1:\ndrawDevArrows(start=b1, end=b, arrows='start', length=.1, lty=1)\n\n\n#------------------\n# PLOT 3: upside down axis\n#------------------\n\nemptyPlot(c(25, 1050), c(15,-15), eegAxis=TRUE, h0=0)\n# plot line:\nx <- 0:1000\ny <- 10*cos(x/100)\nlines(x, y, col=4)\n# draw point points on gthe line:\nx <- c(200,400,600,800)\ny <- 10*cos(x/100)\npoints(x,y, pch=18)\n\n# To avoid calling the function drawDevArrows 4 times, we rewrite\n# the x- and y-positions of the 4 coordinates a1, a2, a3, a4 in one list:\na.start <- list(x=c(a1$x, a2$x, a3$x, a4$x), y=c(a1$y, a2$y, a3$y, a4$y))\n# Define end points on the line:\na.end <- getArrowPos(x=x, y=y)\ndrawDevArrows(start=a.start, end=a.end, arrows='none', lty=3)\n\n# Note that these four coordinates are actually referring \n# to the same starting point!\n# So instead we could have written:\ndrawDevArrows(start=a1, end=a.end, arrows='none', col=alpha('red'), lwd=2)\n\n\n#------------------\n# PLOT 4: wrapping up\n#------------------\n\n# Arrows could be constructed when the plot is not yet called, \n# as they are clipped to the device:\ndrawDevArrows(start=c(0,7), end=c(7,0), col='gray', lwd=4, lty=3, arrows='none')\n\n# Add the plot:\nplot(1,1, bg='green')\n\n# Finish b2 and b3: same x and y, but different coordinates\ndrawDevArrows(start=b2, end=b, arrows='start', length=.1, lty=2)\ndrawDevArrows(start=b3, end=b, arrows='start', length=.1, lty=3)\n\n\n\n### EXAMPLE 2 ################################\n\n\n\n# setup 4 plots:\npar(mfrow=c(2,2))\n\nn <- 50\n\n#------------------\n# PLOT 1: empty\n#------------------\n\n\nemptyPlot(c(25, 1050), c(15,-15), axes=FALSE)\nlines(0:1000, 10*cos(0:1000/200), col=4)\nx <- seq(0,1000, length=n)\ny <- 10*cos(x/200)\n\na <- getArrowPos(x=x, y=y)\n\n\n#------------------\n# PLOT 2\n#------------------\n\nemptyPlot(c(25, 1050), c(15,-15), axes=FALSE)\nlines(0:1000, 10*sin(0:1000/200), col=1)\nx <- seq(0,1000, length=n)\ny <- 10*sin(x/200)\n\n\nb <- getArrowPos(x=x, y=y)\n\n\n\n#------------------\n# PLOT 3\n#------------------\n\nemptyPlot(c(25, 1050), c(15,-15), axes=FALSE)\nlines(0:1000, 10*cos(0:1000/200), col=4)\nx <- seq(0,1000, length=n)\ny <- 10*cos(x/200)\n\n\nc <- getArrowPos(x=rev(x), y=rev(y))\n\n\n#------------------\n# PLOT 4\n#------------------\n\nemptyPlot(c(25, 1050), c(15,-15), axes=FALSE)\nlines(0:1000, 10*sin(0:1000/200), col=1)\nx <- seq(0,1000, length=n)\ny <- 10*sin(x/200)\n\nd1 <- getArrowPos(x=rev(x), y=rev(y))\nd2 <- getArrowPos(x=x, y=y)\n\n\n#------------------\n# DRAW ARROWS\n#------------------\n\ndrawDevArrows(start=a, end=b, arrows='none', col='gray')\ndrawDevArrows(start=c, end=d1, arrows='none', col='gray')\n\ndrawDevArrows(start=a, end=c, arrows='none', \n col=alphaPalette(c('green', 'blue'), f.seq=c(0,1), n=n))\ndrawDevArrows(start=b, end=d2, arrows='none', \n col=alphaPalette('pink', f.seq=c(1,.1), n=n))\n\n\n"} {"package":"plotfunctions","topic":"emptyPlot","snippet":"### Name: emptyPlot\n### Title: Utility function\n### Aliases: emptyPlot\n\n### ** Examples\n\n# generate some measurements:\nx <- runif(100,0,100)\ny <- rpois(100,lambda=3)\n\n# Setup empty plot window fitting for data:\nemptyPlot(range(x), range(y))\n# To add data, use lines() and points()\npoints(x,y, pch=16, col=alpha('steelblue'))\n\n# Category labels:\nemptyPlot(toupper(letters[1:5]), 1)\n# order matters:\nemptyPlot(sample(toupper(letters[1:5])), 1)\n# actually, they are plotted on x-positions 1:5\npoints(1:5, rnorm(5, mean=.5, sd=.1))\n# also possible for y-axis or both:\nemptyPlot(c(200,700), toupper(letters[1:5]))\nemptyPlot(as.character(8:3), toupper(letters[1:5]))\n# change orientation of labels:\npar(las=1)\nemptyPlot(c(200,700), toupper(letters[1:5]))\npar(las=0) # set back to default\n\n# More options:\nemptyPlot(range(x), range(y),\n main='Data', ylab='Y', xlab='Time')\n# add averages:\nm <- tapply(y, list(round(x/10)*10), mean)\nlines(as.numeric(names(m)), m, type='o', pch=4)\n\n# with vertical and horizontal lines:\nemptyPlot(1, 1, h0=.5, v0=.75)\n# eeg axis (note the axes labels):\nemptyPlot(c(-200,1000), c(-5,5),\n main='EEG', v0=0, h0=0,\n eegAxis=TRUE)\n\n# simplify axes:\nemptyPlot(c(-3.2,1.1), c(53,58),\n xmark=TRUE, ymark=TRUE, las=1)\n# compare with R default:\nemptyPlot(c(-3.2,1.1), c(53,58), las=1)\n# also possible to specify values manually:\nemptyPlot(c(-3.2,1.1), c(53,58),\n xmark=c(-3.2,0, 1.1), ymark=c(55,57), las=1)\n\n# empty window:\nemptyPlot(1,1,axes=FALSE)\n# add box:\nemptyPlot(1,1, bty='o')\n\n\n\n"} {"package":"plotfunctions","topic":"errorBars","snippet":"### Name: errorBars\n### Title: Add error bars to a plot.\n### Aliases: errorBars\n\n### ** Examples\n\n\n# example InsectSprays from R datasets\n\nInsectSprays$type <- ifelse( InsectSprays$spray %in% c('A', 'B', 'F'), 1,2)\navg <- with(InsectSprays, tapply(count, list(spray), mean))\nsds <- with(InsectSprays, tapply(count, list(spray), sd))\n\n\n# barplot:\nb <- barplot(avg, beside=TRUE, main='Insect Sprays', ylim=c(0,20))\nerrorBars(b, avg, sds, xpd=TRUE, length=.05)\n\n# constrain error bars to max and min of plot:\nb <- barplot(avg, beside=TRUE, main='Insect Sprays', ylim=c(0,20))\nerrorBars(b, avg, sds, minmax=c(0,20), xpd=TRUE, length=.05) \n\n# add borders:\nb <- barplot(avg, beside=TRUE, main='Insect Sprays', ylim=c(0,20),\n col=1, border=NA)\nerrorBars(b, avg, sds, minmax=c(0,20), xpd=TRUE, length=.05, border=TRUE) \n\n# change layout:\nb <- barplot(avg, beside=TRUE, main='Insect Sprays', ylim=c(0,20),\n col=1, border=NA)\nerrorBars(b, avg, sds, minmax=c(0,20), xpd=TRUE, border=TRUE, \n length=.05, col='blue', # settings for error bars \n border.length=.1, border.col='yellow', border.lwd=5) # settings border\n\n# line plot with asymmetric fake errors:\nemptyPlot(toupper(letters[1:6]), 20, main='Averages', xlab='Spray')\nci.low <- abs(rnorm(6, mean=2))\nci.high <- abs(rnorm(6, mean=4))\n\nerrorBars(1:6, avg, ci.high, ci.l= ci.low, length=.05, lwd=2)\npoints(1:6, avg, pch=21, type='o', lty=3, lwd=2,\n bg='white', xpd=TRUE)\n# also horizontal bars possible:\nerrorBars(10, 1, 1.2, horiz=TRUE, col='red')\n\n\n\n"} {"package":"plotfunctions","topic":"fill_area","snippet":"### Name: fill_area\n### Title: Utility function\n### Aliases: fill_area\n\n### ** Examples\n\n# density of a random sample from normal distribution:\ntest <- density(rnorm(1000))\nemptyPlot(range(test$x), range(test$y))\nfill_area(test$x, test$y)\nfill_area(test$x, test$y, from=.1, col='red')\nfill_area(test$x, test$y, from=.2, col='blue', density=10, lwd=3)\nlines(test$x, test$y, lwd=2)\n\n\n\n"} {"package":"plotfunctions","topic":"findAbsMin","snippet":"### Name: findAbsMin\n### Title: Return the value (or the element with the value) closest to\n### zero.\n### Aliases: findAbsMin\n\n### ** Examples\n\n(test <- seq(-25,25, by=3))\nmin(test[test>0])\nmax(test[test<0])\nmin(abs(test))\nfindAbsMin(test)\n\n\n"} {"package":"plotfunctions","topic":"find_n_neighbors","snippet":"### Name: find_n_neighbors\n### Title: Return n neighbors around given indices.\n### Aliases: find_n_neighbors\n\n### ** Examples\n\nvectorIndices <- 1:1000\nindOutliers <- c(2,10, 473, 359, 717, 519)\nfn3 <- find_n_neighbors(indOutliers, n=3, max=max(vectorIndices))\nfn20 <- find_n_neighbors(indOutliers, n=20, max=max(vectorIndices))\n\n# check fn3:\nprint(fn3)\n\n# Plot:\nemptyPlot(c(-10,1000), c(-1,1), h0=0, v0=indOutliers)\npoints(fn3, rep(.5, length(fn3)), pch='*')\npoints(fn20, rep(-.5, length(fn20)), pch='*')\n\n\n"} {"package":"plotfunctions","topic":"getCoords","snippet":"### Name: getCoords\n### Title: Convert proportions into coordinates of the plot or figure\n### region.\n### Aliases: getCoords\n\n### ** Examples\n\n# set larger plot window, depending on your system:\n# dev.new(,with=8, height=4) # windows, mac\n# quartz(,8,4) # Mac\n# x11(width=8, height=4) # linux\npar(mfrow=c(1,2))\n\n# PLOT 1: y-range is -1 to 1\nemptyPlot(c(0,1),c(-1,1), h0=0, v0=0.5)\n# calculate the x-coordinates for points at proportion\n# -0.2, 0, .25, .5, 1.0, and 1.1 of the plot window:\np1 <- getCoords(pos=c(-0.2,0,.25,.5,1,1.1), side=2)\n# use xpd=TRUE to plot outside plot region:\npoints(rep(0.5,length(p1)), p1, pch=16, xpd=TRUE)\n# add legend outside plot region, in upper-right corner of figure:\nlegend(x=getCoords(1,side=1, input='f'), y=getCoords(1, side=2, input='f'),\n xjust=1, yjust=1,\n legend=c('points'), pch=16, xpd=TRUE)\n# Note: this can easier be achieved with function getFigCoords\n\n# PLOT 2: y-range is 25 to 37\n# we would like to plot the points and legend at same positions\nemptyPlot(c(0,1),c(25,37), h0=0, v0=0.5)\np1 <- getCoords(pos=c(-0.2,0,.25,.5,1,1.1), side=2)\npoints(rep(0.5,length(p1)), p1, pch=16, xpd=TRUE)\n# add legend outside plot region, in upper-left corner of figure:\nlegend(x=getCoords(0,side=1, input='f'), y=getCoords(1, side=2, input='f'),\n xjust=0, yjust=1,\n legend=c('points'), pch=16, xpd=TRUE)\n\n\n\n"} {"package":"plotfunctions","topic":"getDec","snippet":"### Name: getDec\n### Title: Return the number of decimal places.\n### Aliases: getDec\n\n### ** Examples\n\ngetDec(c(10,10.432, 11.01, .000001))\n\n\n"} {"package":"plotfunctions","topic":"getFigCoords","snippet":"### Name: getFigCoords\n### Title: Get the figure region as coordinates of the current plot region,\n### or as corrdinates of the figure region.\n### Aliases: getFigCoords\n\n### ** Examples\n\n# setup plot region:\nemptyPlot(1,1, bty='o')\nfc <- getFigCoords()\npc <- getFigCoords('p')\narrows(x0=pc[c(1,2,1,2)], x1=fc[c(1,2,1,2)],\n y0=pc[c(3,3,4,4)], y1=fc[c(3,3,4,4)], xpd=TRUE)\n\n# Same plot with different axis:\nemptyPlot(c(250,500),c(331, 336), bty='o')\nfc <- getFigCoords()\npc <- getFigCoords('p')\narrows(x0=pc[c(1,2,1,2)], x1=fc[c(1,2,1,2)],\n y0=pc[c(3,3,4,4)], y1=fc[c(3,3,4,4)], xpd=TRUE)\nhc <- getFigCoords('h')\n\n# other options:\n# 1. center of figure region:\nabline(v=getFigCoords('hf')[1], col='blue', xpd=TRUE)\nabline(h=getFigCoords('hf')[2], col='blue', xpd=TRUE)\n# 2. center of plot region:\nabline(v=getFigCoords('hp')[1], col='red', lty=3)\nabline(h=getFigCoords('hp')[2], col='red', lty=3)\n\n\n\n"} {"package":"plotfunctions","topic":"getProps","snippet":"### Name: getProps\n### Title: Transform coordinates into proportions of the figure or plot\n### region.\n### Aliases: getProps\n\n### ** Examples\n\n# not very easy-to-calculate-with x- and y-axis values\nemptyPlot(c(-2.35, 37.4), c(9,11), v0=0)\n# draw a mirror symmetric image of boxes:\np1 <- c(9.5, 9.5)\np2 <- c(4,9.7)\np3 <- c(20,9)\np1m <- getCoords(1-getProps(p1, side=c(1,2)), side=c(1,2))\np2m <- getCoords(1-getProps(p2, side=c(1,2)), side=c(1,2))\np3m <- getCoords(1-getProps(p3, side=c(1,2)), side=c(1,2))\nxdist <- diff(getCoords(c(0,.1), side=1))\nydist <- diff(getCoords(c(0,.1), side=2))\nrect(xleft=c(p1[1],p2[1], p3[1], p1m[1], p2m[1], p3m[1])-xdist, \n xright=c(p1[1],p2[1], p3[1], p1m[1], p2m[1], p3m[1])+xdist,\n ybottom=c(p1[2],p2[2], p3[2], p1m[2], p2m[2], p3m[2])-ydist, \n ytop=c(p1[2],p2[2], p3[2], p1m[2], p2m[2], p3m[2])+ydist, \n col=rep(c('red', NA, 'lightblue'),2), xpd=TRUE )\n\n\n\n"} {"package":"plotfunctions","topic":"getRange","snippet":"### Name: getRange\n### Title: Function for rounding and/or segmenting a range.\n### Aliases: getRange\n\n### ** Examples\n\nzlim <- c(-2.5, 3.01)\n# does not change anything:\ngetRange(zlim)\n# create a range of 5 numbers: \n# (basically just using seq )\ngetRange(zlim, n.seg=5)\n# rounds the numbers:\ngetRange(zlim, dec=0)\ngetRange(zlim, n.seg=5, dec=0)\n# extreme values are multiplications of 5\n# that contains zlim values:\ngetRange(zlim, step=5)\ngetRange(zlim, step=5, n.seg=5)\n# similar, but not the same:\ngetRange(zlim, n.seg=5, dec=0)\ngetRange(zlim, n.seg=5, step=1)\n# combining:\ngetRange(zlim, n.seg=5, step=1, dec=0)\n\n\n\n"} {"package":"plotfunctions","topic":"getRatioCoords","snippet":"### Name: getRatioCoords\n### Title: Move a vector n elements forward or backward.\n### Aliases: getRatioCoords\n\n### ** Examples\n\ndata(img)\nemptyPlot(100, c(50, 100), h0=0, v0=0)\n# calculate height : width ratio of image:\nim.r <- dim(img$image)[1]/dim(img$image)[2]\np <- getRatioCoords(ratio=im.r, width=20)\n# inspect p:\np\n# No position specified, so centered:\nplot_image(img, type='image', add=TRUE,\n xrange=p$x, yrange=p$y)\n# ... or we could provide a position:\np <- getRatioCoords(ratio=im.r, width=20,\n xleft=20, ybottom=60)\nplot_image(img, type='image', add=TRUE,\n xrange=p$x, yrange=p$y)\n\n# Using proportions of plot region:\np <- getRatioCoords(ratio=im.r, height=.5,\n xleft=0, ytop=1, input='prop')\nplot_image(img, type='image', add=TRUE,\n xrange=p$x, yrange=p$y)\n\n# Changing the ratio to square:\np <- getRatioCoords(ratio=1, height=.5,\n xright=1, ybottom=0, input='prop')\nplot_image(img, type='image', add=TRUE,\n xrange=p$x, yrange=p$y)\n# ... and to a long rectangle:\np <- getRatioCoords(ratio=.5, height=1,\n xright=1, ybottom=0, input='prop')\nplot_image(img, type='image', add=TRUE,\n xrange=p$x, yrange=p$y, \n replace.colors=list('#B.+'='#FF000033'),\n border='red')\n\n\n\n"} {"package":"plotfunctions","topic":"get_palette","snippet":"### Name: get_palette\n### Title: Retrieve the color scheme for contour plots.\n### Aliases: get_palette\n\n### ** Examples\n\npal <- get_palette('terrain', nCol=10)\nnames(pal)\nimage(matrix(1:10, ncol=10), col=pal$color, axes=FALSE)\n# user defined color palette:\npal <- get_palette(c('green', 'orange', 'red'))\nimage(matrix(1:10, ncol=10), col=pal$color, axes=FALSE)\n\n\n\n"} {"package":"plotfunctions","topic":"gradientLegend","snippet":"### Name: gradientLegend\n### Title: Add a gradient legend to a plot.\n### Aliases: gradientLegend\n\n### ** Examples\n\n# empty plot:\nemptyPlot(1,1, main='Test plot', axes=FALSE)\nbox()\n# legend on outside of plotregion:\ngradientLegend(valRange=c(-14,14), pos=.5, side=1)\ngradientLegend(valRange=c(-14,14), pos=.5, side=2)\ngradientLegend(valRange=c(-14,14), pos=.5, side=3)\ngradientLegend(valRange=c(-14,14), pos=.5, side=4)\n\n# legend on inside of plotregion:\ngradientLegend(valRange=c(-14,14), pos=.5, side=1, inside=TRUE)\ngradientLegend(valRange=c(-14,14), pos=.5, side=2, inside=TRUE)\ngradientLegend(valRange=c(-14,14), pos=.5, side=3, inside=TRUE)\ngradientLegend(valRange=c(-14,14), pos=.5, side=4, inside=TRUE)\n\n# empty plot:\nemptyPlot(1,1, main='Test plot', axes=FALSE)\nbox()\n# number of segments:\ngradientLegend(valRange=c(-14,14), n.seg=3, pos=.5, side=1)\ngradientLegend(valRange=c(-14,14), n.seg=c(-3,5), pos=.5, side=1, \n inside=TRUE)\n\n# This produces a warning, as there is no space for labels here:\n## Not run: \n##D gradientLegend(valRange=c(-14.235,14.2), pos=.5, \n##D n.seg = c(-7,0), side=4)\n## End(Not run)\n# different solutions:\n# 1. adjust range (make sure also to adjust the range in the plot, \n# for example by changing zlim)\nemptyPlot(1,1, main='Test plot')\ngradientLegend(valRange=c(-14,14), n.seg = c(-7,0), side=4)\n# 2. reduce number of decimals:\nemptyPlot(1,1, main='Test plot')\ngradientLegend(valRange=c(-14.235,14.2), n.seg = c(-7,0), dec=1, side=4)\n# 3. change labels to inside plot window:\nemptyPlot(1,1, main='Test plot')\ngradientLegend(valRange=c(-14.235,14.2), n.seg = c(-7,0), \n dec=1, side=4, inside=TRUE)\n# 4. increase right margin:\noldmar <- par()$mar\npar(mar=c(5.1,3.1,4.1,4.1))\nemptyPlot(1,1, main='Test plot')\ngradientLegend(valRange=c(-14.235,14.2), dec=2, \n n.seg = c(-7,0), side=4)\npar(mar=oldmar) # return old values\n# 5. change label position:\nemptyPlot(1,1, main='Test plot')\ngradientLegend(valRange=c(-14.235,14.2), dec=2, \n n.seg = c(-7,0), side=4, pos.num=2)\ngradientLegend(valRange=c(-14.235,14.2), dec=2, \n n.seg = c(-7,0), side=4, pos.num=1, pos=.5)\n# 6. change legend position and length:\nemptyPlot(1,1, main='Test plot')\ngradientLegend(valRange=c(-14.235,14.2), dec=2, \n n.seg = c(-7,0), side=3, length=.5, pos=.75)\n\n# change border color (and font color too!)\ngradientLegend(valRange=c(-14,14),pos=.75, length=.5,\ncolor=alphaPalette('white', f.seq=seq(0,1, by=.1)), \nborder.col=alpha('gray'))\n\n# when defining custom points, it is still important to specify side:\n\ngradientLegend(valRange=c(-14,14), pos=c(.5,.25,.7,-.05), coords=TRUE, \n border.col='red', side=1)\ngradientLegend(valRange=c(-14,14), pos=c(.5,.25,.7,-.05), coords=TRUE, \n border.col='red', side=2)\n\n\n\n\n"} {"package":"plotfunctions","topic":"group_sort","snippet":"### Name: group_sort\n### Title: Sort split by grouping predictor.\n### Aliases: group_sort\n\n### ** Examples\n\n# example InsectSprays from R datasets\nInsectSprays$Type <- ifelse(InsectSprays$spray %in% c('A','B', 'F'), 1, 2)\n\nind <- group_sort(InsectSprays$count, \n group=list(Spray=InsectSprays$spray, Type=InsectSprays$Type))\nInsectSprays[ind,]\nInsectSprays\n\n\n"} {"package":"plotfunctions","topic":"isColor","snippet":"### Name: isColor\n### Title: Check whether color specifications exists.\n### Aliases: isColor\n\n### ** Examples\n\n# correct color definitions:\nisColor(c('#FF0000FF', '#00FF00FF', '#0000FFFF'))\nisColor(c('red', 'steelblue', 'green3'))\nisColor(c(1,7,28))\n# mixtures are possible too:\nisColor(c('#FF0000FF', 'red', 1, '#FF0000', rgb(.1,0,0)))\n\n# return colors:\n# note that 28 is converted to 4...\nisColor(c(1,7,28), return.colors=TRUE) \nisColor(c('#FF0000CC', 'red', 1, '#FF0000'), return.colors=TRUE)\n\n# 4 incorrect colors, 1 correct:\ntest <- c('#FH0000', 3, '#FF00991', 'lavendel', '#AABBCCFFF')\nisColor(test)\nisColor(test, return.colors=TRUE)\n\n\n\n"} {"package":"plotfunctions","topic":"legend_margin","snippet":"### Name: legend_margin\n### Title: Add legend with respect to figure instead of plot region.\n### Allows to move legend to margin of plot.\n### Aliases: legend_margin\n\n### ** Examples\n\nplot(cars$speed, cars$dist, pch=16)\nlegend_margin('topleft', legend=c('points'), pch=16)\n# compare with default legend:\nlegend('topleft', legend=c('points'), pch=16)\n\n\n"} {"package":"plotfunctions","topic":"list2str","snippet":"### Name: list2str\n### Title: Combine list values as string.\n### Aliases: list2str\n\n### ** Examples\n\ntest <- list(a=c(1,2,3), b='a', c=c(TRUE, FALSE), d='test')\nlist2str(c('a','c', 'd'), test) \n\n\n"} {"package":"plotfunctions","topic":"marginDensityPlot","snippet":"### Name: marginDensityPlot\n### Title: Plot density of distribution in margins of the plot.\n### Aliases: marginDensityPlot\n\n### ** Examples\n\n# density of a random sample from normal distribution:\nval1 <- qnorm(ppoints(500))\nval2 <- qt(ppoints(500), df = 2)\ndens1 <- density(val1)\ndens2 <- density(val2)\n\n# setup plot window:\npar(mfrow=c(1,1), cex=1.1)\n\n# increase margin\noldmar <- par()$mar \npar(mar=oldmar + c(0,0,0,4))\n\n# plot qqnorm\nqqnorm(val2, main='t distribution',\n pch='*', col='steelblue',\n xlim=c(-3,3),\n bty='n')\nqqline(val1)\nabline(h=0, col=alpha('gray'))\nabline(v=0, col=alpha('gray'))\n\n# filled distribution in right margin:\nmarginDensityPlot(dens2, side=4, allDensities=list(dens1, dens2),\n col='steelblue',lwd=2)\n# add lines:\nmarginDensityPlot(dens2, side=4, allDensities=list(dens1, dens2),\n col='steelblue',density=25, lwd=2)\n# compare to normal:\nmarginDensityPlot(dens1, side=4, allDensities=list(dens1, dens2), \n col=NA, border=1)\n# Other sides are also possible:\nmarginDensityPlot(dens1, side=3, allDensities=list(dens1, dens2), \n col=NA, border=alpha(1), lwd=2)\nmarginDensityPlot(dens2, side=3, allDensities=list(dens1, dens2), \n col=NA, border=alpha('steelblue'), lwd=3)\n# adjust the starting point with argument 'from' to bottom of plot:\nmarginDensityPlot(dens1, side=3, \n from=getCoords(0, side=2), lwd=2)\nmarginDensityPlot(dens2, side=3, \n col='steelblue', from=getCoords(0, side=2), lwd=2,\n maxDensityValue=2*max(dens2$y))\n\nlegend(getFigCoords('p')[2], getFigCoords('p')[3],\n yjust=0, legend=c('t distribution', 'Gaussian'),\n fill=c('steelblue', 'black'),\n cex=.75, xpd=TRUE, bty='n')\n\n\n\n"} {"package":"plotfunctions","topic":"move_n_point","snippet":"### Name: move_n_point\n### Title: Move a vector n elements forward or backward.\n### Aliases: move_n_point\n\n### ** Examples\n\n(x <- -10:30)\nprev <- move_n_point(x)\nchange <- x - prev\npost5 <- move_n_point(x, n=-5)\n\nemptyPlot(length(x), range(x))\nlines(x)\nlines(prev, col='red')\nlines(post5, col='blue')\n\n\n\n"} {"package":"plotfunctions","topic":"orderBoxplot","snippet":"### Name: orderBoxplot\n### Title: Order boxplot stats following a given ordering.\n### Aliases: orderBoxplot\n\n### ** Examples\n\nhead(ToothGrowth)\n# sort on basis of mean length:\nbp <- boxplot(len ~ dose:supp, data = ToothGrowth, plot=FALSE)\nidx <- sortGroups(len ~ dose:supp, data = ToothGrowth)\nbp2 <- orderBoxplot(bp, idx)\n# compare:\nbp$names\nbp2$names\n\n\n"} {"package":"plotfunctions","topic":"plot_error","snippet":"### Name: plot_error\n### Title: Utility function\n### Aliases: plot_error\n\n### ** Examples\n\n\n# generate some data:\nx <- -10:20\ny <- 0.3*(x - 3)^2 + rnorm(length(x))\ns <- 0.2*abs(100-y + rnorm(length(x)))\n\n# Plot line and standard deviation:\nemptyPlot(range(x), range(y), h0=0)\nplot_error(x, y, s)\n# Change layout:\nemptyPlot(range(x), range(y), h0=0)\nplot_error(x, y, s, shade=TRUE, lty=3, lwd=3)\n\n# Use of se.fit2 for asymmetrical error bars:\ncu <- y + .65*s\ncl <- y - s\nemptyPlot(range(x), range(y), h0=0)\nplot_error(x, y, s, shade=TRUE)\nplot_error(x, y, se.fit=cu, se.fit2=cl, col='red', shade=TRUE)\n\n# Some layout options:\nemptyPlot(range(x), range(y), h0=0)\nplot_error(x, y, s, lty=3, lwd=1, ci.lty=1, ci.lwd=3)\nemptyPlot(range(x), range(y), h0=0)\nplot_error(x, y, s, shade=TRUE, lty=3, lwd=3)\nemptyPlot(range(x), range(y), h0=0)\nplot_error(x, y, s, shade=TRUE, lty=1, lwd=3, ci.lwd=3, border='red')\nemptyPlot(range(x), range(y), h0=0)\nplot_error(x, y, s, shade=TRUE, lty=1, lwd=3, density=10, ci.lwd=3)\n\n\n\n"} {"package":"plotfunctions","topic":"plot_image","snippet":"### Name: plot_image\n### Title: Add images to plots.\n### Aliases: plot_image\n\n### ** Examples\n\n\n# see Volcano example at help(image)\n# create image object:\nmyimg <- list(image=volcano-min(volcano), col=terrain.colors(max(volcano)-min(volcano)))\n# create emoty plot window:\nemptyPlot(1,1, main='Volcano images')\n# add image topleft corner:\nplot_image(img=myimg, xrange=c(0,.25), yrange=c(.75,1), add=TRUE)\n# add transparent image as overlay:\nmyimg$col <- alpha(myimg$col, f=.25)\nplot_image(img=myimg, add=TRUE, fill.plotregion=TRUE, bty='n')\n# add image:\nmyimg$col <- topo.colors(max(myimg$image))\nplot_image(img=myimg, xrange=c(0.125,.375), yrange=c(.5,.875), add=TRUE)\n# add some points and lines:\npoints(runif(10,0,1), runif(10,0,1), type='o')\n\n# keep ratio:\nemptyPlot(1,1, main='Volcano images')\n# I would like to add an image in the following field:\nrect(xleft=0, xright=.5, ybottom=0, ytop=.3, col='gray', border=NA)\n# add image with keep.ratio=true\nplot_image(img=myimg, xrange=c(0,.5), yrange=c(0,.3), \n add=TRUE, keep.ratio=TRUE, border=NA)\n# as y-side is longest, this side will be fitted in \n# the rectangle and the x position adjusted with adj:\nplot_image(img=myimg, xrange=c(0,.5), yrange=c(0,.3), \n add=TRUE, keep.ratio=TRUE, border=2, adj=0.5)\nplot_image(img=myimg, xrange=c(0,.5), yrange=c(0,.3), \n add=TRUE, keep.ratio=TRUE, border=3, adj=1)\n\n# keep.ratio and border:\nplot_image(img=myimg, xrange=c(0,1), yrange=c(0,1), \n keep.ratio=TRUE, adj=0.5)\nplot_image(img=myimg, xrange=c(0,.5), yrange=c(0,1), \n keep.ratio=TRUE, adj=0.5)\nemptyPlot(1,1, axes=FALSE)\nplot_image(img=myimg, xrange=c(0,1), yrange=c(0,1), \n add=TRUE, keep.ratio=TRUE, adj=0.5)\n\n\n\n"} {"package":"plotfunctions","topic":"plot_signifArea","snippet":"### Name: plot_signifArea\n### Title: Creates a colored surface plot from data frame input.\n### Aliases: plot_signifArea\n\n### ** Examples\n\n# From the package graphics, see help(image):\nx <- 10*(1:nrow(volcano))\ny <- 10*(1:ncol(volcano))\ntmp <- data.frame(value = (as.vector(volcano) - 120), \n x = 10*rep(1:nrow(volcano), ncol(volcano)), \n y = 10*rep(1:ncol(volcano), each=nrow(volcano)),\n CI = rep(20, nrow(volcano)*ncol(volcano)))\nplotsurface(tmp, view=c('x', 'y'), predictor='value', main='Maunga Whau Volcano')\nplot_signifArea(tmp, view=c('x', 'y'), predictor='value', valCI='CI')\n\n# change color:\nplotsurface(tmp, view=c('x', 'y'), predictor='value', main='Maunga Whau Volcano')\nplot_signifArea(tmp, view=c('x', 'y'), predictor='value', valCI='CI', \n col='red')\n# or completely remove 'nonsignificant' area:\nplot_signifArea(tmp, view=c('x', 'y'), predictor='value', valCI='CI', \n col='white', alpha=1)\n\n\n\n"} {"package":"plotfunctions","topic":"plotsurface","snippet":"### Name: plotsurface\n### Title: Creates a colored surface plot from data frame input.\n### Aliases: plotsurface\n\n### ** Examples\n\n\n# From the package graphics, see help(image):\nx <- 10*(1:nrow(volcano))\ny <- 10*(1:ncol(volcano))\nimage(x, y, volcano, col = terrain.colors(100), axes = FALSE)\ncontour(x, y, volcano, levels = seq(90, 200, by = 5),\n add = TRUE, col = 'peru')\naxis(1, at = seq(100, 800, by = 100))\naxis(2, at = seq(100, 600, by = 100))\nbox()\ntitle(main = 'Maunga Whau Volcano', font.main = 4)\n\n# now with plot surface:\n# first convert to data frame\ntmp <- data.frame(value = as.vector(volcano), \n x = 10*rep(1:nrow(volcano), ncol(volcano)), \n y = 10*rep(1:ncol(volcano), each=nrow(volcano)))\nplotsurface(tmp, view=c('x', 'y'), predictor='value', \n main='Maunga Whau Volcano')\n\n# or with gray scale colors:\nplotsurface(tmp, view=c('x', 'y'), predictor='value', \n main='Maunga Whau Volcano', color='gray')\n\n# change color range:\nplotsurface(tmp, view=c('x', 'y'), predictor='value', \n main='Maunga Whau Volcano', zlim=c(0,200))\n\n#' remove color and color legend:\nplotsurface(tmp, view=c('x', 'y'), predictor='value', \n main='Maunga Whau Volcano', \n color=NULL, col=1, add.color.legend=FALSE)\n\n\n\n"} {"package":"plotfunctions","topic":"se","snippet":"### Name: se\n### Title: Calculate standard error of the mean.\n### Aliases: se\n\n### ** Examples\n\n# load example data:\ndata(chickwts)\nstr(chickwts)\n\n# first calculate means per feeding type:\navg <- with(chickwts, tapply(weight, list(feed), mean))\npar(cex=1.25)\nb <- barplot(avg, beside=TRUE, names.arg=FALSE, ylim=c(0,450))\ntext(b, rep(0, length(b)), labels=names(avg), srt=90, adj=-.25)\n\n# calculate mean collapsing over feeding types:\nabline(h=mean(avg), lwd=1.5, col='red1')\n# add SE reflecting variation between feeding types:\nabline(h=mean(avg)+c(-1,1)*se(avg), lty=2, col='red1')\ntext(getCoords(.5), mean(avg)+se(avg), \n labels=expression('mean' %+-% '1SE'), pos=3, col='red1')\n\n# Note that SE makes more sense for experiments with \n# different groups or participants.\n\n\n\n"} {"package":"plotfunctions","topic":"sortBoxplot","snippet":"### Name: sortBoxplot\n### Title: Produce box-and-whisker plot(s) ordered by function such as mean\n### or median.\n### Aliases: sortBoxplot\n\n### ** Examples\n\nhead(ToothGrowth)\n# sort on basis of mean length:\nsortBoxplot(len ~ dose:supp, data = ToothGrowth)\n# sort on basis of median length:\nsortBoxplot(len ~ dose:supp, data = ToothGrowth, decreasing=FALSE)\n# on the basis of variation (sd):\nsortBoxplot(len ~ dose:supp, data = ToothGrowth, FUN='sd', col=alpha(2))\n\n\n"} {"package":"plotfunctions","topic":"sortGroups","snippet":"### Name: sortGroups\n### Title: Sort groups based on a function such as mean value or deviation.\n### Aliases: sortGroups\n\n### ** Examples\n\nhead(ToothGrowth)\n# sort on basis of mean length:\nsortGroups(len ~ dose:supp, data = ToothGrowth)\nlabels = levels(interaction(ToothGrowth$dose, ToothGrowth$supp))\nlabels[sortGroups(len ~ dose:supp, data = ToothGrowth)]\n\n\n"} {"package":"tmvtnorm","topic":"dtmvnorm.marginal","snippet":"### Name: dtmvnorm.marginal\n### Title: One-dimensional marginal density functions from a Truncated\n### Multivariate Normal distribution\n### Aliases: dtmvnorm.marginal\n### Keywords: distribution multivariate\n\n### ** Examples\n\n#############################################\n#\n# Example 1: truncated bivariate normal\n#\n#############################################\n\n# parameters of the bivariate normal distribution\nsigma = matrix(c(1 , 0.95,\n 0.95, 1 ), 2, 2)\nmu = c(0,0)\n\n# sample from multivariate normal distribution\nX = rmvnorm(5000, mu, sigma)\n\n# tuncation in x2 with x2 <= 0\nX.trunc = X[X[,2]<0,]\n\n# plot the realisations before and after truncation\npar(mfrow=c(2,2))\nplot(X, col=\"gray\", xlab=expression(x[1]), ylab=expression(x[2]), \n main=\"realisations from a\\n truncated bivariate normal distribution\")\npoints(X.trunc)\nabline(h=0, lty=2, col=\"gray\")\n#legend(\"topleft\", col=c(\"gray\", \"black\")\n\n# marginal density for x1 from realisations\nplot(density(X.trunc[,1]), main=expression(\"marginal density for \"*x[1]))\n\n# one-dimensional marginal density for x1 using the formula\nx <- seq(-5, 5, by=0.01)\nfx <- dtmvnorm.marginal(x, n=1, mean=mu, sigma=sigma, \n lower=c(-Inf,-Inf), upper=c(Inf,0))\nlines(x, fx, lwd=2, col=\"red\")\n\n# marginal density for x2\nplot(density(X.trunc[,2]), main=expression(\"marginal density for \"*x[2]))\n\n# one-dimensional marginal density for x2 using the formula\nx <- seq(-5, 5, by=0.01)\nfx <- dtmvnorm.marginal(x, n=2, mean=mu, sigma=sigma, \n lower=c(-Inf,-Inf), upper=c(Inf,0))\nlines(x, fx, lwd=2, col=\"blue\")\n\n#############################################\n#\n# Example 2 : truncated trivariate normal\n#\n#############################################\n\n# parameters of the trivariate normal distribution\nsigma = outer(1:3,1:3,pmin)\nmu = c(0,0,0)\n\n# sample from multivariate normal distribution\nX = rmvnorm(2000, mu, sigma)\n\n# truncation in x2 and x3 : x2 <= 0, x3 <= 0\nX.trunc = X[X[,2]<=0 & X[,3]<=0,]\n\npar(mfrow=c(2,3))\nplot(X, col=\"gray\", xlab=expression(x[1]), ylab=expression(x[2]), \n main=\"realisations from a\\n truncated trivariate normal distribution\")\npoints(X.trunc, col=\"black\")\nabline(h=0, lty=2, col=\"gray\")\n\nplot(X[,2:3], col=\"gray\", xlab=expression(x[2]), ylab=expression(x[3]), \n main=\"realisations from a\\n truncated trivariate normal distribution\")\npoints(X.trunc[,2:3], col=\"black\")\nabline(h=0, lty=2, col=\"gray\")\nabline(v=0, lty=2, col=\"gray\")\n\nplot(X[,c(1,3)], col=\"gray\", xlab=expression(x[1]), ylab=expression(x[3]), \n main=\"realisations from a\\n truncated trivariate normal distribution\")\npoints(X.trunc[,c(1,3)], col=\"black\")\nabline(h=0, lty=2, col=\"gray\")\n\n# one-dimensional marginal density for x1 from realisations and formula\nplot(density(X.trunc[,1]), main=expression(\"marginal density for \"*x[1]))\nx <- seq(-5, 5, by=0.01)\nfx <- dtmvnorm.marginal(x, n=1, mean=mu, sigma=sigma, \n lower=c(-Inf,-Inf,-Inf), upper=c(Inf,0,0))\nlines(x, fx, lwd=2, col=\"red\")\n\n# one-dimensional marginal density for x2 from realisations and formula\nplot(density(X.trunc[,2]), main=expression(\"marginal density for \"*x[2]))\nx <- seq(-5, 5, by=0.01)\nfx <- dtmvnorm.marginal(x, n=2, mean=mu, sigma=sigma, \n lower=c(-Inf,-Inf,-Inf), upper=c(Inf,0,0))\nlines(x, fx, lwd=2, col=\"red\")\n\n# one-dimensional marginal density for x3 from realisations and formula\nplot(density(X.trunc[,3]), main=expression(\"marginal density for \"*x[3]))\nx <- seq(-5, 5, by=0.01)\nfx <- dtmvnorm.marginal(x, n=3, mean=mu, sigma=sigma, \n lower=c(-Inf,-Inf,-Inf), upper=c(Inf,0,0))\nlines(x, fx, lwd=2, col=\"red\")\n\n\n"} {"package":"tmvtnorm","topic":"dtmvnorm.marginal2","snippet":"### Name: dtmvnorm.marginal2\n### Title: Bivariate marginal density functions from a Truncated\n### Multivariate Normal distribution\n### Aliases: dtmvnorm.marginal2\n### Keywords: distribution multivariate\n\n### ** Examples\n\n \n lower = c(-0.5, -1, -1)\n upper = c( 2.2, 2, 2)\n \n mean = c(0,0,0)\n sigma = matrix(c(2.0, -0.6, 0.7, \n -0.6, 1.0, -0.2, \n 0.7, -0.2, 1.0), 3, 3)\n \n # generate random samples from untruncated and truncated distribution\n Y = rmvnorm(10000, mean=mean, sigma=sigma)\n X = rtmvnorm(500, mean=mean, sigma=sigma, lower=lower, upper=upper, \n algorithm=\"gibbs\")\n \n # compute bivariate marginal density of x1 and x2\n xq <- seq(lower[1], upper[1], by=0.1)\n xr <- seq(lower[2], upper[2], by=0.1)\n \n grid <- matrix(NA, length(xq), length(xr))\n for (i in 1:length(xq))\n {\n for (j in 1:length(xr))\n {\n grid[i,j] = dtmvnorm.marginal2(xq=xq[i], xr=xr[j], \n q=1, r=2, sigma=sigma, lower=lower, upper=upper)\n }\n }\n \n plot(Y[,1], Y[,2], xlim=c(-4, 4), ylim=c(-4, 4), \n main=expression(\"bivariate marginal density (\"*x[1]*\",\"*x[2]*\")\"), \n xlab=expression(x[1]), ylab=expression(x[2]), col=\"gray80\")\n points(X[,1], X[,2], col=\"black\")\n \n lines(x=c(lower[1], upper[1], upper[1], lower[1], lower[1]), \n y=c(lower[2],lower[2],upper[2],upper[2],lower[2]), \n lty=2, col=\"red\")\n contour(xq, xr, grid, add=TRUE, nlevels = 8, col=\"red\", lwd=2)\n \n # scatterplot matrices for untruncated and truncated points\n require(lattice)\n splom(Y)\n splom(X)\n\n\n"} {"package":"tmvtnorm","topic":"dtmvt","snippet":"### Name: dtmvt\n### Title: Truncated Multivariate Student t Density\n### Aliases: dtmvt\n### Keywords: distribution multivariate\n\n### ** Examples\n\n# Example\n\nx1 <- seq(-2, 3, by=0.1)\nx2 <- seq(-2, 3, by=0.1)\n\nmean <- c(0,0)\nsigma <- matrix(c(1, -0.5, -0.5, 1), 2, 2)\nlower <- c(-1,-1)\n\n\ndensity <- function(x)\n{\n\tz=dtmvt(x, mean=mean, sigma=sigma, lower=lower)\n\tz\n}\n\nfgrid <- function(x, y, f)\n{\n\tz <- matrix(nrow=length(x), ncol=length(y))\n\tfor(m in 1:length(x)){\n\t\tfor(n in 1:length(y)){\n\t\t\tz[m,n] <- f(c(x[m], y[n]))\n\t\t}\n\t}\n\tz\n}\n\n# compute multivariate-t density d for grid\nd <- fgrid(x1, x2, function(x) dtmvt(x, mean=mean, sigma=sigma, lower=lower))\n\n# compute multivariate normal density d for grid\nd2 <- fgrid(x1, x2, function(x) dtmvnorm(x, mean=mean, sigma=sigma, lower=lower))\n\n# plot density as contourplot\ncontour(x1, x2, d, nlevels=5, main=\"Truncated Multivariate t Density\", \n\t\txlab=expression(x[1]), ylab=expression(x[2]))\n\ncontour(x1, x2, d2, nlevels=5, add=TRUE, col=\"red\")\nabline(v=-1, lty=3, lwd=2)\nabline(h=-1, lty=3, lwd=2)\n\n\n"} {"package":"tmvtnorm","topic":"gmm.tmvnorm","snippet":"### Name: gmm.tmvnorm\n### Title: GMM Estimation for the Truncated Multivariate Normal\n### Distribution\n### Aliases: gmm.tmvnorm\n\n### ** Examples\n\n## Not run: \n##D set.seed(1.234)\n##D \n##D # the actual parameters\n##D lower <- c(-1, -2)\n##D upper <- c(3, Inf)\n##D mu <- c(0, 0)\n##D sigma <- matrix(c(1, 0.8,\n##D 0.8, 2), 2, 2)\n##D \n##D # generate random samples \n##D X <- rtmvnorm(n=500, mu, sigma, lower, upper)\n##D \n##D # estimate mean vector and covariance matrix sigma from random samples X\n##D # with default start values\n##D gmm.fit1 <- gmm.tmvnorm(X, lower=lower, upper=upper)\n##D \n##D # diagnostic output of the estimated parameters\n##D summary(gmm.fit1)\n##D vcov(gmm.fit1)\n##D \n##D # confidence intervals\n##D confint(gmm.fit1)\n##D \n##D # choosing a different start value\n##D gmm.fit2 <- gmm.tmvnorm(X, lower=lower, upper=upper, \n##D start=list(mu=c(0.1, 0.1), \n##D sigma=matrix(c(1, 0.4, 0.4, 1.8),2,2)))\n##D summary(gmm.fit2)\n##D \n##D # GMM estimation with Lee (1983) moment conditions\n##D gmm.fit3 <- gmm.tmvnorm(X, lower=lower, upper=upper, method=\"Lee\")\n##D summary(gmm.fit3)\n##D confint(gmm.fit3)\n##D \n##D # MLE estimation for comparison\n##D mle.fit1 <- mle.tmvnorm(X, lower=lower, upper=upper)\n##D confint(mle.fit1)\n## End(Not run)\n\n\n"} {"package":"tmvtnorm","topic":"mle.tmvnorm","snippet":"### Name: mle.tmvnorm\n### Title: Maximum Likelihood Estimation for the Truncated Multivariate\n### Normal Distribution\n### Aliases: mle.tmvnorm\n\n### ** Examples\n\n## Not run: \n##D set.seed(1.2345)\n##D \n##D # the actual parameters\n##D lower <- c(-1,-1)\n##D upper <- c(1, 2)\n##D mu <- c(0, 0)\n##D sigma <- matrix(c(1, 0.7,\n##D 0.7, 2), 2, 2)\n##D \n##D # generate random samples \n##D X <- rtmvnorm(n=500, mu, sigma, lower, upper)\n##D method <- \"BFGS\"\n##D \n##D # estimate mean vector and covariance matrix sigma from random samples X\n##D # with default start values\n##D mle.fit1 <- mle.tmvnorm(X, lower=lower, upper=upper)\n##D \n##D # diagnostic output of the estimated parameters\n##D summary(mle.fit1)\n##D logLik(mle.fit1)\n##D vcov(mle.fit1)\n##D \n##D # profiling the log likelihood and confidence intervals\n##D mle.profile1 <- profile(mle.fit1, X, method=\"BFGS\", trace=TRUE)\n##D confint(mle.profile1)\n##D \n##D par(mfrow=c(3,2))\n##D plot(mle.profile1)\n##D \n##D # choosing a different start value\n##D mle.fit2 <- mle.tmvnorm(X, lower=lower, upper=upper, \n##D start=list(mu=c(0.1, 0.1), \n##D sigma=matrix(c(1, 0.4, 0.4, 1.8),2,2)))\n##D summary(mle.fit2)\n## End(Not run)\n\n\n"} {"package":"tmvtnorm","topic":"mtmvnorm","snippet":"### Name: mtmvnorm\n### Title: Computation of Mean Vector and Covariance Matrix For Truncated\n### Multivariate Normal Distribution\n### Aliases: mtmvnorm moments\n### Keywords: distribution multivariate\n\n### ** Examples\n\n mu <- c(0.5, 0.5, 0.5)\n sigma <- matrix(c( 1, 0.6, 0.3,\n 0.6, 1, 0.2,\n 0.3, 0.2, 2), 3, 3)\n \n a <- c(-Inf, -Inf, -Inf)\n b <- c(1, 1, 1)\n\n # compute first and second moments\n mtmvnorm(mu, sigma, lower=a, upper=b)\n \n # compare with simulated results\n X <- rtmvnorm(n=1000, mean=mu, sigma=sigma, lower=a, upper=b)\n colMeans(X)\n cov(X)\n\n\n"} {"package":"tmvtnorm","topic":"ptmvnorm","snippet":"### Name: ptmvnorm\n### Title: Truncated Multivariate Normal Distribution\n### Aliases: ptmvnorm\n### Keywords: distribution multivariate\n\n### ** Examples\n\n sigma <- matrix(c(5, 0.8, 0.8, 1), 2, 2)\n Fx <- ptmvnorm(lowerx=c(-1,-1), upperx=c(0.5,0), mean=c(0,0), \n sigma=sigma, lower=c(-1,-1), upper=c(1,1))\n\n\n"} {"package":"tmvtnorm","topic":"ptmvnorm.marginal","snippet":"### Name: ptmvtnorm.marginal\n### Title: One-dimensional marginal CDF function for a Truncated\n### Multivariate Normal and Student t distribution\n### Aliases: ptmvnorm.marginal ptmvt.marginal\n### Keywords: distribution multivariate\n\n### ** Examples\n\n## Example 1: Truncated multi-normal\nlower <- c(-1,-1,-1)\nupper <- c(1,1,1)\nmean <- c(0,0,0)\nsigma <- matrix(c( 1, 0.8, 0.2, \n 0.8, 1, 0.1,\n 0.2, 0.1, 1), 3, 3)\n\nX <- rtmvnorm(n=1000, mean=c(0,0,0), sigma=sigma, lower=lower, upper=upper)\n\nx <- seq(-1, 1, by=0.01)\nFx <- ptmvnorm.marginal(xn=x, n=1, mean=c(0,0,0), sigma=sigma, lower=lower, upper=upper) \n\nplot(ecdf(X[,1]), main=\"marginal CDF for truncated multi-normal\")\nlines(x, Fx, type=\"l\", col=\"blue\")\n\n## Example 2: Truncated multi-t\nX <- rtmvt(n=1000, mean=c(0,0,0), sigma=sigma, df=2, lower=lower, upper=upper)\n\nx <- seq(-1, 1, by=0.01)\nFx <- ptmvt.marginal(xn=x, n=1, mean=c(0,0,0), sigma=sigma, lower=lower, upper=upper) \n\nplot(ecdf(X[,1]), main=\"marginal CDF for truncated multi-t\")\nlines(x, Fx, type=\"l\", col=\"blue\")\n\n\n"} {"package":"tmvtnorm","topic":"ptmvt","snippet":"### Name: ptmvt\n### Title: Truncated Multivariate Student t Distribution\n### Aliases: ptmvt\n### Keywords: math multivariate\n\n### ** Examples\n\nsigma <- matrix(c(5, 0.8, 0.8, 1), 2, 2)\nFx <- ptmvt(lowerx=c(-1,-1), upperx=c(0.5,0), mean=c(0,0), sigma=sigma, df=3, \n lower=c(-1,-1), upper=c(1,1))\n\n\n"} {"package":"tmvtnorm","topic":"qtmvnorm.marginal","snippet":"### Name: qtmvnorm-marginal\n### Title: Quantiles of the Truncated Multivariate Normal Distribution in\n### one dimension\n### Aliases: qtmvnorm.marginal\n### Keywords: distribution multivariate\n\n### ** Examples\n\n# finite dimensional distribution of the Geometric Brownian Motion log-returns \n# with truncation\n\n# volatility p.a.\nsigma=0.4\n\n# risk free rate\nr = 0.05\n\n# n=3 points in time\nT <- c(0.5, 0.7, 1)\n\n# covariance matrix of Geometric Brownian Motion returns\nSigma = sigma^2*outer(T,T,pmin)\n\n# mean vector of the Geometric Brownian Motion returns\nmu = (r - sigma^2/2) * T\n\n# lower truncation vector a (a<=x<=b)\na = rep(-Inf, 3)\n\n# upper truncation vector b (a<=x<=b)\nb = c(0, 0, Inf)\n\n# quantile of the t_1 returns\nqtmvnorm.marginal(p=0.95, interval = c(-10, 10), tail = \"lower.tail\", n=1, \n mean = mu, sigma = Sigma, lower=a, upper=b)\n\n\n"} {"package":"tmvtnorm","topic":"rtmvnorm","snippet":"### Name: rtmvnorm\n### Title: Sampling Random Numbers From The Truncated Multivariate Normal\n### Distribution\n### Aliases: rtmvnorm rtmvnorm.sparseMatrix\n### Keywords: distribution multivariate\n\n### ** Examples\n\n################################################################################\n#\n# Example 1: \n# rejection sampling in 2 dimensions \n#\n################################################################################\n\nsigma <- matrix(c(4,2,2,3), ncol=2)\nx <- rtmvnorm(n=500, mean=c(1,2), sigma=sigma, upper=c(1,0))\nplot(x, main=\"samples from truncated bivariate normal distribution\",\n xlim=c(-6,6), ylim=c(-6,6), \n xlab=expression(x[1]), ylab=expression(x[2]))\nabline(v=1, lty=3, lwd=2, col=\"gray\")\nabline(h=0, lty=3, lwd=2, col=\"gray\")\n\n################################################################################\n#\n# Example 2: \n# Gibbs sampler for 4 dimensions\n#\n################################################################################\n\nC <- matrix(0.8, 4, 4)\ndiag(C) <- rep(1, 4)\nlower <- rep(-4, 4)\nupper <- rep(-1, 4)\n\n# acceptance rate alpha\nalpha <- pmvnorm(lower=lower, upper=upper, mean=rep(0,4), sigma=C)\nalpha\n\n# Gibbs sampler\nX1 <- rtmvnorm(n=20000, mean = rep(0,4), sigma=C, lower=lower, upper=upper, \n algorithm=\"gibbs\", burn.in.samples=100)\n# Rejection sampling\nX2 <- rtmvnorm(n=5000, mean = rep(0,4), sigma=C, lower=lower, upper=upper)\n\ncolMeans(X1)\ncolMeans(X2)\n\nplot(density(X1[,1], from=lower[1], to=upper[1]), col=\"red\", lwd=2, \n main=\"Kernel density estimates from random samples \n generated by Gibbs vs. Rejection sampling\")\nlines(density(X2[,1], from=lower[1], to=upper[1]), col=\"blue\", lwd=2)\nlegend(\"topleft\",legend=c(\"Gibbs Sampling\",\"Rejection Sampling\"), \n col=c(\"red\",\"blue\"), lwd=2, bty=\"n\")\n\n################################################################################\n#\n# Example 3: \n# Autocorrelation plot for Gibbs sampler\n# with and without thinning\n#\n################################################################################\n\nsigma <- matrix(c(4,2,2,3), ncol=2)\nX1 <- rtmvnorm(n=10000, mean=c(1,2), sigma=sigma, upper=c(1,0), \n algorithm=\"rejection\")\nacf(X1)\n# no autocorrelation among random points\n\nX2 <- rtmvnorm(n=10000, mean=c(1,2), sigma=sigma, upper=c(1,0), \n algorithm=\"gibbs\")\nacf(X2)\n# exhibits autocorrelation among random points\n\nX3 <- rtmvnorm(n=10000, mean=c(1,2), sigma=sigma, upper=c(1,0), \n algorithm=\"gibbs\", thinning=2)\nacf(X3)\n# reduced autocorrelation among random points\n\nplot(density(X1[,1], to=1))\nlines(density(X2[,1], to=1), col=\"blue\")\nlines(density(X3[,1], to=1), col=\"red\")\n\n################################################################################\n#\n# Example 4: Univariate case\n#\n################################################################################\n\nX <- rtmvnorm(100, mean=0, sigma=1, lower=-1, upper=1)\n\n################################################################################\n#\n# Example 5: Linear Constraints\n#\n################################################################################\n\nmean <- c(0, 0)\nsigma <- matrix(c(10, 0,\n 0, 1), 2, 2)\n\n# Linear Constraints\n#\n# a1 <= x1 + x2 <= b2\n# a2 <= x1 - x2 <= b2\n#\n# [ a1 ] <= [ 1 1 ] [ x1 ] <= [b1]\n# [ a2 ] [ 1 -1 ] [ x2 ] [b2]\na <- c(-2, -2)\nb <- c( 2, 2)\nD <- matrix(c(1, 1,\n 1, -1), 2, 2) \n\nX <- rtmvnorm(n=10000, mean, sigma, lower=a, upper=b, D=D, algorithm=\"gibbsR\")\nplot(X, main=\"Gibbs sampling for multivariate normal \n with linear constraints according to Geweke (1991)\")\n\n# mark linear constraints as lines\nfor (i in 1:nrow(D)) {\n abline(a=a[i]/D[i, 2], b=-D[i,1]/D[i, 2], col=\"red\")\n abline(a=b[i]/D[i, 2], b=-D[i,1]/D[i, 2], col=\"red\")\n}\n \n################################################################################\n#\n# Example 6: Using precision matrix H rather than sigma\n#\n################################################################################\n\nlower <- c(-1, -1)\nupper <- c(1, 1)\nmean <- c(0.5, 0.5)\nsigma <- matrix(c(1, 0.8, 0.8, 1), 2, 2)\nH <- solve(sigma)\nD <- matrix(c(1, 1, 1, -1), 2, 2)\nX <- rtmvnorm(n=1000, mean=mean, H=H, lower=lower, upper=upper, D=D, algorithm=\"gibbs\")\nplot(X, main=\"Gibbs sampling with precision matrix and linear constraints\")\n\n################################################################################\n#\n# Example 7: Using sparse precision matrix H in high dimensions\n#\n################################################################################\n\n## Not run: \n##D d <- 1000\n##D I_d <- sparseMatrix(i=1:d, j=1:d, x=1)\n##D W <- sparseMatrix(i=c(1:d, 1:(d-1)), j=c(1:d, (2:d)), x=0.5)\n##D H <- t(I_d - 0.5 * W) ##D \n##D lower <- rep(0, d)\n##D upper <- rep(2, d)\n##D \n##D # Gibbs sampler generates n=100 draws in d=1000 dimensions\n##D X <- rtmvnorm.sparseMatrix(n=100, mean = rep(0,d), H=H, lower=lower, upper=upper,\n##D burn.in.samples=100)\n##D colMeans(X) \n##D cov(X)\n## End(Not run)\n\n\n"} {"package":"tmvtnorm","topic":"rtmvnorm2","snippet":"### Name: rtmvnorm2\n### Title: Sampling Random Numbers From The Truncated Multivariate Normal\n### Distribution With Linear Constraints\n### Aliases: rtmvnorm2\n### Keywords: distribution multivariate\n\n### ** Examples\n\n## Not run: \n##D ################################################################################\n##D #\n##D # Example 5a: Number of linear constraints r > dimension d\n##D #\n##D ################################################################################\n##D \n##D # general linear restrictions a <= Dx <= b with x (d x 1); D (r x d); a,b (r x 1)\n##D \n##D # Dimension d=2, r=3 linear constraints\n##D #\n##D # a1 <= x1 + x2 <= b2\n##D # a2 <= x1 - x2 <= b2\n##D # a3 <= 0.5x1 - x2 <= b3\n##D #\n##D # [ a1 ] <= [ 1 1 ] [ x1 ] <= [b1]\n##D # [ a2 ] [ 1 -1 ] [ x2 ] [b2]\n##D # [ a3 ] [ 0.5 -1 ] [b3]\n##D \n##D D <- matrix(\n##D c( 1, 1,\n##D 1, -1,\n##D 0.5, -1), 3, 2, byrow=TRUE)\n##D a <- c(0, 0, 0)\n##D b <- c(1, 1, 1)\n##D \n##D # mark linear constraints as lines\n##D plot(NA, xlim=c(-0.5, 1.5), ylim=c(-1,1))\n##D for (i in 1:3) {\n##D abline(a=a[i]/D[i, 2], b=-D[i,1]/D[i, 2], col=\"red\")\n##D abline(a=b[i]/D[i, 2], b=-D[i,1]/D[i, 2], col=\"red\")\n##D }\n##D \n##D ### Gibbs sampling for general linear constraints a <= Dx <= b\n##D mean <- c(0, 0)\n##D sigma <- matrix(c(1.0, 0.2, \n##D 0.2, 1.0), 2, 2)\n##D x0 <- c(0.5, 0.2) # Gibbs sampler start value \n##D X <- rtmvnorm2(n=1000, mean, sigma, lower=a, upper=b, D, start.value=x0)\n##D \n##D # show random points within simplex\n##D points(X, pch=20, col=\"black\")\n## End(Not run)\n\n\n"} {"package":"tmvtnorm","topic":"rtmvt","snippet":"### Name: rtmvt\n### Title: Sampling Random Numbers From The Truncated Multivariate Student\n### t Distribution\n### Aliases: rtmvt\n### Keywords: distribution multivariate\n\n### ** Examples\n\n###########################################################\n#\n# Example 1\n#\n###########################################################\t\n\n# Draw from multi-t distribution without truncation\nX1 <- rtmvt(n=10000, mean=rep(0, 2), df=2)\nX2 <- rtmvt(n=10000, mean=rep(0, 2), df=2, lower=c(-1,-1), upper=c(1,1))\n\n###########################################################\n#\n# Example 2\n#\n###########################################################\t\n\ndf = 2\nmu = c(1,1,1)\nsigma = matrix(c( 1, 0.5, 0.5,\n 0.5, 1, 0.5,\n 0.5, 0.5, 1), 3, 3)\nlower = c(-2,-2,-2)\nupper = c(2, 2, 2)\n\n# Rejection sampling\nX1 <- rtmvt(n=10000, mu, sigma, df, lower, upper)\n\n# Gibbs sampling without thinning\nX2 <- rtmvt(n=10000, mu, sigma, df, lower, upper, \n algorithm=\"gibbs\")\n\n# Gibbs sampling with thinning\nX3 <- rtmvt(n=10000, mu, sigma, df, lower, upper, \n algorithm=\"gibbs\", thinning=2)\t\n \nplot(density(X1[,1], from=lower[1], to=upper[1]), col=\"red\", lwd=2,\n main=\"Gibbs vs. Rejection\")\nlines(density(X2[,1], from=lower[1], to=upper[1]), col=\"blue\", lwd=2)\nlegend(\"topleft\",legend=c(\"Rejection Sampling\",\"Gibbs Sampling\"), \n col=c(\"red\",\"blue\"), lwd=2)\n\nacf(X1) # no autocorrelation in Rejection sampling\nacf(X2) # strong autocorrelation of Gibbs samples\nacf(X3) # reduced autocorrelation of Gibbs samples after thinning\t\n\n\n"} {"package":"tmvtnorm","topic":"dtmvnorm","snippet":"### Name: tmvnorm\n### Title: Truncated Multivariate Normal Density\n### Aliases: dtmvnorm\n### Keywords: distribution multivariate\n\n### ** Examples\n\ndtmvnorm(x=c(0,0), mean=c(1,1), upper=c(0,0))\n\n###########################################\n#\n# Example 1: \n# truncated multivariate normal density \n#\n############################################\n\nx1<-seq(-2, 3, by=0.1)\nx2<-seq(-2, 3, by=0.1)\n\ndensity<-function(x)\n{\n sigma=matrix(c(1, -0.5, -0.5, 1), 2, 2)\n z=dtmvnorm(x, mean=c(0,0), sigma=sigma, lower=c(-1,-1))\n z\n}\n\nfgrid <- function(x, y, f)\n{\n z <- matrix(nrow=length(x), ncol=length(y))\n for(m in 1:length(x)){\n for(n in 1:length(y)){\n z[m,n] <- f(c(x[m], y[n]))\n }\n }\n z\n}\n\n# compute density d for grid\nd=fgrid(x1, x2, density)\n\n# plot density as contourplot\ncontour(x1, x2, d, nlevels=5, main=\"Truncated Multivariate Normal Density\", \n xlab=expression(x[1]), ylab=expression(x[2]))\nabline(v=-1, lty=3, lwd=2)\nabline(h=-1, lty=3, lwd=2)\n\n###########################################\n#\n# Example 2: \n# generation of random numbers\n# from a truncated multivariate normal distribution \n#\n############################################\n\nsigma <- matrix(c(4,2,2,3), ncol=2)\nx <- rtmvnorm(n=500, mean=c(1,2), sigma=sigma, upper=c(1,0))\nplot(x, main=\"samples from truncated bivariate normal distribution\",\n xlim=c(-6,6), ylim=c(-6,6), \n xlab=expression(x[1]), ylab=expression(x[2]))\nabline(v=1, lty=3, lwd=2, col=\"gray\")\nabline(h=0, lty=3, lwd=2, col=\"gray\")\n\n\n"} {"package":"gdata","topic":"Args","snippet":"### Name: Args\n### Title: Describe Function Arguments\n### Aliases: Args\n### Keywords: programming utilities documentation\n\n### ** Examples\n\nArgs(glm)\nArgs(scan)\nArgs(legend, sort=TRUE)\n\n\n"} {"package":"gdata","topic":"ConvertMedUnits","snippet":"### Name: ConvertMedUnits\n### Title: Convert medical measurements between International Standard (SI)\n### and US 'Conventional' Units.\n### Aliases: ConvertMedUnits\n### Keywords: manip\n\n### ** Examples\n\ndata(MedUnits)\n\n# Show available conversions\nMedUnits$Measurement\n\n# Convert SI Glucose measurement to 'Conventional' units\nGlucoseSI <- c(5, 5.4, 5, 5.1, 5.6, 5.1, 4.9, 5.2, 5.5) # in SI Units\nGlucoseUS <- ConvertMedUnits(GlucoseSI, \"Glucose\", to=\"US\")\ncbind(GlucoseSI, GlucoseUS)\n\n## Not run: \n##D # See what happens when there is more than one match\n##D ConvertMedUnits(27.5, \"Creatin\", to=\"US\")\n## End(Not run)\n\n# To solve the problem do:\nConvertMedUnits(27.5, \"Creatinine\", to=\"US\", exact=TRUE)\n\n\n"} {"package":"gdata","topic":"MedUnits","snippet":"### Name: MedUnits\n### Title: Table of conversions between Intertional Standard (SI) and US\n### 'Conventional' Units for common medical measurements.\n### Aliases: MedUnits\n### Keywords: datasets\n\n### ** Examples\n\ndata(MedUnits)\n# Show available conversions\nMedUnits$Measurement\n\n# Utility function\nmatchUnits <- function(X) MedUnits[grep(X, MedUnits$Measurement),]\n\n# Convert SI Glucose measurement to 'Conventional' units\nGlucoseSI = c(5, 5.4, 5, 5.1, 5.6, 5.1, 4.9, 5.2, 5.5) # in SI Units\nGlucoseUS = GlucoseSI / matchUnits(\"Glucose\")$Conversion\ncbind(GlucoseSI, GlucoseUS)\n\n# Also consider using ConvertMedUnits()\nConvertMedUnits(GlucoseSI, \"Glucose\", to=\"US\")\n\n\n"} {"package":"gdata","topic":"ans","snippet":"### Name: ans\n### Title: Value of Last Evaluated Expression\n### Aliases: ans\n### Keywords: programming\n\n### ** Examples\n\n2+2 # Trivial calculation\nans() # See the answer again\n\ngamma(1:15) # Some intensive calculation\nfac14 <- ans() # store the results into a variable\n\nrnorm(20) # Generate some standard normal values\nans()^2 # Convert to Chi-square(1) values\nstem(ans()) # Now show a stem-and-leaf table\n\n\n"} {"package":"gdata","topic":"bindData","snippet":"### Name: bindData\n### Title: Bind two data frames into a multivariate data frame\n### Aliases: bindData\n### Keywords: manip misc\n\n### ** Examples\n\nn1 <- 6\nn2 <- 12\nn3 <- 4\n## Single trait 1\nnum <- c(5:n1, 10:13)\n(tmp1 <- data.frame(y1=rnorm(n=n1),\n f1=factor(rep(c(\"A\", \"B\"), n1/2)),\n ch=letters[num],\n fa=factor(letters[num]),\n nu=(num) + 0.5,\n id=factor(num), stringsAsFactors=FALSE))\n\n## Single trait 2 with repeated records, some subjects also in tmp1\nnum <- 4:9\n(tmp2 <- data.frame(y2=rnorm(n=n2),\n f2=factor(rep(c(\"C\", \"D\"), n2/2)),\n ch=letters[rep(num, times=2)],\n fa=factor(letters[rep(c(num), times=2)]),\n nu=c((num) + 0.5, (num) + 0.25),\n id=factor(rep(num, times=2)), stringsAsFactors=FALSE))\n\n## Single trait 3 with completely distinct set of subjects\nnum <- 1:4\n(tmp3 <- data.frame(y3=rnorm(n=n3),\n f3=factor(rep(c(\"E\", \"F\"), n3/2)),\n ch=letters[num],\n fa=factor(letters[num]),\n nu=(num) + 0.5,\n id=factor(num), stringsAsFactors=FALSE))\n\n## Combine all datasets\n(tmp12 <- bindData(x=tmp1, y=tmp2, common=c(\"id\", \"nu\", \"ch\", \"fa\")))\n(tmp123 <- bindData(x=tmp12, y=tmp3, common=c(\"id\", \"nu\", \"ch\", \"fa\")))\n\n## Sort by subject\ntmp123[order(tmp123$ch), ]\n\n\n"} {"package":"gdata","topic":"case","snippet":"### Name: case\n### Title: Map elements of a vector according to the provided 'cases'\n### Aliases: case\n### Keywords: manip\n\n### ** Examples\n\n## default = NA\ncase(c(1,1,4,3), \"a\"=1, \"b\"=2, \"c\"=3)\n\n## default = \"foo\"\ncase(c(1,1,4,3), \"a\"=1, \"b\"=2, \"c\"=3, default=\"foo\")\n\n\n"} {"package":"gdata","topic":"cbindX","snippet":"### Name: cbindX\n### Title: Column-bind objects with different number of rows\n### Aliases: cbindX\n### Keywords: misc\n\n### ** Examples\n\ndf1 <- data.frame(a=1:3, b=c(\"A\", \"B\", \"C\"))\ndf2 <- data.frame(c=as.character(1:5), a=5:1)\n\nma1 <- matrix(as.character(1:4), nrow=2, ncol=2)\nma2 <- matrix(1:6, nrow=3, ncol=2)\n\ncbindX(df1, df2)\ncbindX(ma1, ma2)\ncbindX(df1, ma1)\ncbindX(df1, df2, ma1, ma2)\ncbindX(ma1, ma2, df1, df2)\n\n\n"} {"package":"gdata","topic":"centerText","snippet":"### Name: centerText\n### Title: Center Text Strings\n### Aliases: centerText\n### Keywords: manip character\n\n### ** Examples\n\ncat(centerText(\"One Line Test\"), \"\\n\\n\")\n\nmText <-c(\"This\", \"is an example\",\n \" of a multiline text \",\n \"with \",\n \" leading\",\n \" and trailing \",\n \"spaces.\")\ncat(\"\\n\", centerText(mText), \"\\n\", sep=\"\\n\")\n\n\n"} {"package":"gdata","topic":"combine","snippet":"### Name: combine\n### Title: Combine R Objects With a Column Labeling the Source\n### Aliases: combine\n### Keywords: array manip\n\n### ** Examples\n\na <- matrix(rnorm(12),ncol=4,nrow=3)\nb <- 1:4\ncombine(a,b)\n\ncombine(x=a,b)\ncombine(x=a,y=b)\ncombine(a,b,names=c(\"one\",\"two\"))\n\nc <- 1:6\ncombine(b,c)\n\n\n"} {"package":"gdata","topic":"drop.levels","snippet":"### Name: drop.levels\n### Title: Drop unused factor levels\n### Aliases: drop.levels\n### Keywords: manip\n\n### ** Examples\n\nf <- factor(c(\"A\", \"B\", \"C\", \"D\"))[1:3]\ndrop.levels(f)\n\nl <- list(f=f, i=1:3, c=c(\"A\", \"B\", \"D\"))\ndrop.levels(l)\n\ndf <- as.data.frame(l)\nstr(df)\nstr(drop.levels(df))\n\n\n"} {"package":"gdata","topic":"duplicated2","snippet":"### Name: duplicated2\n### Title: Determine Duplicate Elements\n### Aliases: duplicated2\n### Keywords: logic manip\n\n### ** Examples\n\niris[duplicated(iris), ] # 2nd duplicated value\niris[duplicated(iris, fromLast=TRUE), ] # 1st duplicated value\niris[duplicated2(iris), ] # both duplicated values\n\n\n"} {"package":"gdata","topic":"env","snippet":"### Name: env\n### Title: Describe All Loaded Environments\n### Aliases: env\n### Keywords: data environment utilities\n\n### ** Examples\n\n## Not run: \n##D env()\n## End(Not run)\n\n\n"} {"package":"gdata","topic":"first","snippet":"### Name: first\n### Title: Return first or last element of an object\n### Aliases: first last first<- last<-\n### Keywords: manip\n\n### ** Examples\n\n## Vector\nv <- 1:10\nfirst(v)\nlast(v)\n\nfirst(v) <- 9\nv\n\nlast(v) <- 20\nv\n\n## List\nl <- list(a=1, b=2, c=3)\nfirst(l)\nlast(l)\n\nfirst(l) <- \"apple\"\nlast(l) <- \"banana\"\nl\n\n## Data frame\ndf <- data.frame(a=1:2, b=3:4, c=5:6)\nfirst(df)\nlast(df)\n\nfirst(df) <- factor(c(\"red\",\"green\"))\nlast(df) <- list(c(20,30)) # note the enclosing list!\ndf\n\n## Matrix\nm <- as.matrix(df)\nfirst(m)\nlast(m)\n\nfirst(m) <- \"z\"\nlast(m) <- \"q\"\nm\n\n\n"} {"package":"gdata","topic":"frameApply","snippet":"### Name: frameApply\n### Title: Subset analysis on data frames\n### Aliases: frameApply\n### Keywords: manip\n\n### ** Examples\n\ndata(ELISA, package=\"gtools\")\n\n# Default is slightly unintuitive, but commonly useful:\nframeApply(ELISA, by = c(\"PlateDay\", \"Read\"))\n\n# Wouldn't actually recommend this model! Just a demo:\nframeApply(ELISA, on = c(\"Signal\", \"Concentration\"), by = c(\"PlateDay\", \"Read\"),\n fun = function(dat) coef(lm(Signal ~ Concentration, data = dat)))\n\nframeApply(ELISA, on = \"Signal\", by = \"Concentration\",\n fun = function(dat) {\n x <- dat[[1]]\n out <- c(Mean = mean(x, na.rm=TRUE),\n SD = sd(x, na.rm=TRUE),\n N = sum(x, na.rm=TRUE))},\n subset = !is.na(Concentration))\n\n\n"} {"package":"gdata","topic":"getDateTimeParts","snippet":"### Name: getYear\n### Title: Get date/time parts from date and time objects\n### Aliases: getDateTimeParts getYear getYear.default getYear.Date\n### getYear.POSIXct getYear.POSIXlt getMonth getMonth.default\n### getMonth.Date getMonth.POSIXct getMonth.POSIXlt getDay getDay.default\n### getDay.Date getDay.POSIXct getDay.POSIXlt getHour getHour.default\n### getMin getMin.default getSec getSec.default\n### Keywords: manip misc\n\n### ** Examples\n\n## Date\ntmp <- Sys.Date()\ntmp\ngetYear(tmp)\ngetMonth(tmp)\ngetDay(tmp)\n\n## POSIXct\ntmp <- as.POSIXct(tmp)\ngetYear(tmp)\ngetMonth(tmp)\ngetDay(tmp)\n\n## POSIXlt\ntmp <- as.POSIXlt(tmp)\ngetYear(tmp)\ngetMonth(tmp)\ngetDay(tmp)\n\n\n"} {"package":"gdata","topic":"humanReadable","snippet":"### Name: humanReadable\n### Title: Print Byte Size in Human Readable Format\n### Aliases: humanReadable\n### Keywords: misc\n\n### ** Examples\n\n# Simple example: maximum addressible size of 32 bit pointer\nhumanReadable(2^32-1)\nhumanReadable(2^32-1, standard=\"IEC\")\nhumanReadable(2^32-1, standard=\"SI\")\nhumanReadable(2^32-1, standard=\"Unix\")\n\nhumanReadable(2^32-1, unit=\"MiB\")\nhumanReadable(2^32-1, standard=\"IEC\", unit=\"MiB\")\nhumanReadable(2^32-1, standard=\"SI\", unit=\"MB\")\nhumanReadable(2^32-1, standard=\"Unix\", unit=\"M\")\n\n# Vector of sizes\nmatrix(humanReadable(c(60810, 124141, 124, 13412513), width=4))\nmatrix(humanReadable(c(60810, 124141, 124, 13412513), width=4, unit=\"KiB\"))\n\n# Specify digits rather than width\nmatrix(humanReadable(c(60810, 124141, 124, 13412513), width=NULL, digits=2))\n\n# Change the justification\nmatrix(humanReadable(c(60810, 124141, 124, 13412513), width=NULL,\n justify=c(\"right\", \"right\")))\n\n\n"} {"package":"gdata","topic":"interleave","snippet":"### Name: interleave\n### Title: Interleave Rows of Data Frames or Matrices\n### Aliases: interleave\n### Keywords: category array\n\n### ** Examples\n\n# Simple example\na <- matrix(1:10,ncol=2,byrow=TRUE)\nb <- matrix(letters[1:10],ncol=2,byrow=TRUE)\nc <- matrix(LETTERS[1:10],ncol=2,byrow=TRUE)\ninterleave(a,b,c)\n\n# Create a 2-way table of means, standard errors, and nobs\ng1 <- sample(letters[1:5], 1000, replace=TRUE)\ng2 <- sample(LETTERS[1:3], 1000, replace=TRUE)\ndat <- rnorm(1000)\n\nstderr <- function(x) sqrt(var(x,na.rm=TRUE) / nobs(x))\n\nmeans <- tapply(dat, list(g1, g2), mean)\nstderrs <- tapply(dat, list(g1, g2), stderr)\nns <- tapply(dat, list(g1, g2), nobs)\nblanks <- matrix(\" \", nrow=5, ncol=3)\n\ntab <- interleave(\"Mean\"=round(means,2),\n \"Std Err\"=round(stderrs,2),\n \"N\"=ns, \" \"=blanks, sep=\" \")\nprint(tab, quote=FALSE)\n\n# Using drop to control coercion to a lower dimensions\nm1 <- matrix(1:4)\nm2 <- matrix(5:8)\n\ninterleave(m1, m2, drop=TRUE) # this will be coerced to a vector\ninterleave(m1, m2, drop=FALSE) # this will remain a matrix\n\n\n"} {"package":"gdata","topic":"is.what","snippet":"### Name: is.what\n### Title: Run Multiple is.* Tests on a Given Object\n### Aliases: is.what\n### Keywords: classes NA programming error utilities\n\n### ** Examples\n\nis.what(pi)\nis.what(NA, verbose=TRUE)\nis.what(lm(1~1))\nis.what(is.what)\n\n\n"} {"package":"gdata","topic":"keep","snippet":"### Name: keep\n### Title: Remove All Objects, Except Those Specified\n### Aliases: keep\n### Keywords: data environment utilities\n\n### ** Examples\n\ndata(trees, CO2)\nkeep(trees)\n# To remove all objects except trees, run:\n# keep(trees, sure=TRUE)\n\n\n"} {"package":"gdata","topic":"right","snippet":"### Name: left\n### Title: Return the leftmost or rightmost columns of a matrix or data\n### frame\n### Aliases: right left right.data.frame left.data.frame right.matrix\n### left.matrix\n### Keywords: manip\n\n### ** Examples\n\nm <- matrix(1:100, ncol=10)\ncolnames(m) <- paste(\"Col\",1:10, sep=\"_\")\n\nleft(m)\nright(m)\n\n# When no column names are present, they are added by default\ncolnames(m) <- NULL\n\nleft(m)\ncolnames(left(m))\n\nright(m)\ncolnames(right(m))\n\n# Prevent addition of column numbers\nleft(m, add.col.nums = FALSE)\ncolnames(left(m, add.col.nums = FALSE))\n\nright(m, add.col.nums = FALSE) # columns are labeled 1:6\ncolnames(right(m, add.col.nums = FALSE)) # instead of 5:10\n\n# Works for data frames too!\nd <- data.frame(m)\nleft(d)\nright(d)\n\n# Use negative n to specify number of columns to omit\nleft(d, -3)\nright(d, -3)\n\n\n"} {"package":"gdata","topic":"ll","snippet":"### Name: ll\n### Title: Describe Objects or Elements\n### Aliases: ll\n### Keywords: data attribute classes list environment print utilities\n\n### ** Examples\n\nll()\nll(all=TRUE)\nll(\"package:base\")\nll(\"package:base\", class=\"function\", invert=TRUE)\n\nll(infert)\nmodel <- glm(case~spontaneous+induced, family=binomial, data=infert)\nll(model, dim=TRUE)\nll(model, sort=TRUE)\nll(model$family)\n\n\n"} {"package":"gdata","topic":"ls.funs","snippet":"### Name: ls.funs\n### Title: List function objects\n### Aliases: ls.funs\n### Keywords: misc environment\n\n### ** Examples\n\n## List functions defined in the global environment:\nls.funs()\n\n## List functions available in the base package:\nls.funs(\"package:base\")\n\n\n"} {"package":"gdata","topic":"mapLevels","snippet":"### Name: mapLevels\n### Title: Mapping levels\n### Aliases: mapLevels mapLevels.default mapLevels.factor\n### mapLevels.character mapLevels.list mapLevels.data.frame\n### print.levelsMap print.listLevelsMap is.levelsMap is.listLevelsMap\n### as.levelsMap as.listLevelsMap .checkLevelsMap .checkListLevelsMap\n### \"[.levelsMap\" \"[.listLevelsMap\" c.levelsMap c.listLevelsMap\n### unique.levelsMap sort.levelsMap mapLevels<- mapLevels<-.default\n### mapLevels<-.factor mapLevels<-.character mapLevels<-.list\n### mapLevels<-.data.frame\n### Keywords: misc manip\n\n### ** Examples\n\n## Integer levelsMap\n(f <- factor(sample(letters, size=20, replace=TRUE)))\n(mapInt <- mapLevels(f))\n\n## Integer to factor\n(int <- as.integer(f))\n(mapLevels(int) <- mapInt)\nall.equal(int, f)\n\n## Remap levels of a factor\n(fac <- factor(as.integer(f)))\n(mapLevels(fac) <- mapInt) # the same as levels(fac) <- mapInt\nall.equal(fac, f)\n\n## Character levelsMap\nf1 <- factor(letters[1:10])\nf2 <- factor(letters[5:14])\n\n## Internal codes are the same, but levels are not\nas.integer(f1)\nas.integer(f2)\n\n## Get character levelsMaps and combine them\nmapCha1 <- mapLevels(f1, codes=FALSE)\nmapCha2 <- mapLevels(f2, codes=FALSE)\n(mapCha <- c(mapCha1, mapCha2))\n\n## Remap factors\nmapLevels(f1) <- mapCha # the same as levels(f1) <- mapCha\nmapLevels(f2) <- mapCha # the same as levels(f2) <- mapCha\n\n## Internal codes are now \"consistent\" among factors\nas.integer(f1)\nas.integer(f2)\n\n## Remap characters to get factors\nf1 <- as.character(f1); f2 <- as.character(f2)\nmapLevels(f1) <- mapCha\nmapLevels(f2) <- mapCha\n\n## Internal codes are now \"consistent\" among factors\nas.integer(f1)\nas.integer(f2)\n\n\n"} {"package":"gdata","topic":"matchcols","snippet":"### Name: matchcols\n### Title: Select columns names matching certain critera\n### Aliases: matchcols\n### Keywords: manip\n\n### ** Examples\n\n# Create a matrix with many named columns\nx <- matrix(ncol=30, nrow=5)\ncolnames(x) <- c(\"AffyID\",\"Overall Group Means: Control\",\n \"Overall Group Means: Moderate\",\n \"Overall Group Means: Marked\",\n \"Overall Group Means: Severe\",\n \"Overall Group StdDev: Control\",\n \"Overall Group StdDev: Moderate\",\n \"Overall Group StdDev: Marked\",\n \"Overall Group StdDev: Severe\",\n \"Overall Group CV: Control\",\n \"Overall Group CV: Moderate\",\n \"Overall Group CV: Marked\",\n \"Overall Group CV: Severe\",\n \"Overall Model P-value\",\n \"Overall Model: (Intercept): Estimate\",\n \"Overall Model: Moderate: Estimate\",\n \"Overall Model: Marked: Estimate\",\n \"Overall Model: Severe: Estimate\",\n \"Overall Model: (Intercept): Std. Error\",\n \"Overall Model: Moderate: Std. Error\",\n \"Overall Model: Marked: Std. Error\",\n \"Overall Model: Severe: Std. Error\",\n \"Overall Model: (Intercept): t value\",\n \"Overall Model: Moderate: t value\",\n \"Overall Model: Marked: t value\",\n \"Overall Model: Severe: t value\",\n \"Overall Model: (Intercept): Pr(>|t|)\",\n \"Overall Model: Moderate: Pr(>|t|)\",\n \"Overall Model: Marked: Pr(>|t|)\",\n \"Overall Model: Severe: Pr(>|t|)\")\n\n# Get the columns which give estimates or p-values\n# only for marked and severe groups\nmatchcols(x, with=c(\"Pr\", \"Std. Error\"),\n without=c(\"Intercept\",\"Moderate\"),\n method=\"or\")\n\n# Get just the column which give the p-value for the intercept\nmatchcols(x, with=c(\"Intercept\", \"Pr\"))\n\n\n"} {"package":"gdata","topic":"mv","snippet":"### Name: mv\n### Title: Rename an Object\n### Aliases: mv\n### Keywords: environment data\n\n### ** Examples\n\na <- 1:10\na\nmv(\"a\", \"b\")\nb\nexists(\"a\")\n\n\n"} {"package":"gdata","topic":"nPairs","snippet":"### Name: nPairs\n### Title: Number of variable pairs\n### Aliases: nPairs\n### Keywords: misc\n\n### ** Examples\n\n# Test data\ntest <- data.frame(V1=c(1, 2, 3, 4, 5),\n V2=c(NA, 2, 3, 4, 5),\n V3=c(1, NA, NA, NA, NA),\n V4=c(1, 2, 3, NA, NA))\n\n# Number of variable pairs\nnPairs(x=test)\n\n# Without names\nnPairs(x=test, names=FALSE)\n\n# Longer names\ncolnames(test) <- c(\"Variable1\", \"Variable2\", \"Variable3\", \"Variable4\")\nnPairs(x=test)\n\n# Margin\nnPairs(x=test, margin=TRUE)\n\n# Summary\nsummary(object=nPairs(x=test))\n\n\n"} {"package":"gdata","topic":"nobs","snippet":"### Name: nobs\n### Title: Compute the Number of Non-Missing Observations\n### Aliases: nobs n_obs nobs.data.frame nobs.default nobs.lm\n### Keywords: attribute\n\n### ** Examples\n\nx <- c(1, 2, 3, 5, NA, 6, 7, 1, NA)\nlength(x)\nnobs(x)\n\ndf <- data.frame(x=rnorm(100), y=rnorm(100))\ndf[1,1] <- NA\ndf[1,2] <- NA\ndf[2,1] <- NA\nnobs(df)\n\nfit <- lm(y~x, data=df)\nnobs(fit)\nn_obs(fit)\n\n# Comparison\n# gdata\nnobs(x)\nnobs(df)\n# stats\nlength(na.omit(x))\nsapply(df, function(x) length(na.omit(x)))\n\n\n"} {"package":"gdata","topic":"object_size","snippet":"### Name: object_size\n### Title: Report the Space Allocated for Objects\n### Aliases: object_size c.object_sizes as.object_sizes is.object_sizes\n### format.object_sizes print.object_sizes object.size\n### Keywords: utilities\n\n### ** Examples\n\nobject_size(letters)\nobject_size(ls)\n\n# Find the 10 largest objects in the base package\nallObj <- sapply(ls(\"package:base\"), function(x)\n object_size(get(x, envir=baseenv())))\n(bigObj <- as.object_sizes(rev(sort(allObj))[1:10]))\nprint(bigObj, humanReadable=TRUE)\n\nas.object_sizes(14567567)\n\noldopt <- options(humanReadable=TRUE)\n(z <- object_size(letters,\n c(letters, letters),\n rep(letters, 100),\n rep(letters, 10000)))\nis.object_sizes(z)\nas.object_sizes(14567567)\noptions(oldopt)\n\n# Comparison\n# gdata\nprint(object_size(loadNamespace), humanReadable=TRUE)\nprint(bigObj, humanReadable=TRUE)\n# utils\nprint(utils::object.size(loadNamespace), units=\"auto\")\nsapply(bigObj, utils:::format.object_size, units=\"auto\")\n# ll\nll(\"package:base\")[order(-ll(\"package:base\")$KB)[1:10],]\n\n\n"} {"package":"gdata","topic":"rename.vars","snippet":"### Name: rename.vars\n### Title: Remove or rename variables in a data frame\n### Aliases: rename.vars remove.vars\n### Keywords: manip\n\n### ** Examples\n\ndata <- data.frame(x=1:10,y=1:10,z=1:10)\nnames(data)\ndata <- rename.vars(data, c(\"x\",\"y\",\"z\"), c(\"first\",\"second\",\"third\"))\nnames(data)\n\ndata <- remove.vars(data, \"second\")\nnames(data)\n\n\n"} {"package":"gdata","topic":"reorder.factor","snippet":"### Name: reorder.factor\n### Title: Reorder the Levels of a Factor\n### Aliases: reorder.factor\n### Keywords: manip\n\n### ** Examples\n\n## Don't show: \n set.seed(123456)\n## End(Don't show)\n# Create a 4 level example factor\ntrt <- factor(sample(c(\"PLACEBO\", \"300 MG\", \"600 MG\", \"1200 MG\"),\n 100, replace=TRUE))\nsummary(trt)\n# Note that the levels are not in a meaningful order.\n\n# Change the order to something useful\n# - default \"mixedsort\" ordering\ntrt2 <- reorder(trt)\nsummary(trt2)\n# - using indexes:\ntrt3 <- reorder(trt, new.order=c(4, 2, 3, 1))\nsummary(trt3)\n# - using label names:\ntrt4 <- reorder(trt, new.order=c(\"PLACEBO\", \"300 MG\", \"600 MG\", \"1200 MG\"))\nsummary(trt4)\n# - using frequency\ntrt5 <- reorder(trt, X=rnorm(100), FUN=mean)\nsummary(trt5)\n\n# Drop out the '300 MG' level\ntrt6 <- reorder(trt, new.order=c(\"PLACEBO\", \"600 MG\", \"1200 MG\"))\nsummary(trt6)\n\n\n"} {"package":"gdata","topic":"resample","snippet":"### Name: resample\n### Title: Consistent Random Samples and Permutations\n### Aliases: resample\n### Keywords: misc\n\n### ** Examples\n\n## Sample behavior differs if first argument is scalar vs vector\nsample(c(10))\nsample(c(10, 10))\n\n## Resample has the consistent behavior for both cases\nresample(c(10))\nresample(c(10, 10))\n\n\n"} {"package":"gdata","topic":"startsWith","snippet":"### Name: startsWith\n### Title: Does String Start or End With Another String?\n### Aliases: startsWith starts_with\n### Keywords: character\n\n### ** Examples\n\n## Simple example\nstartsWith(\"Testing\", \"Test\")\n\n## Vector examples\ns <- c(\"Testing\", \" Testing\", \"testing\", \"Texting\")\nnames(s) <- s\n\nstartsWith(s, \"Test\") # \" Testing\", \"testing\", and \"Texting\" do not match\nstartsWith(s, \"Test\", trim=TRUE) # Now \" Testing\" matches\nstartsWith(s, \"Test\", ignore.case=TRUE) # Now \"testing\" matches\n\n# Comparison\n# gdata\nstartsWith(s, \"Test\", trim=TRUE)\nstartsWith(s, \"Test\", ignore.case=TRUE)\n# base\nstartsWith(trimws(s), \"Test\")\nstartsWith(tolower(s), tolower(\"Test\"))\n\n\n"} {"package":"gdata","topic":"trim","snippet":"### Name: trim\n### Title: Remove leading and trailing spaces from character strings\n### Aliases: trim\n### Keywords: manip character\n\n### ** Examples\n\ns <- \" this is an example string \"\ntrim(s)\n\nf <- factor(c(s, s, \" A\", \" B \", \" C \", \"D \"))\nlevels(f)\n\ntrim(f)\nlevels(trim(f))\n\ntrim(f, recode.factor=FALSE)\nlevels(trim(f, recode.factor=FALSE))\n\nl <- list(s=rep(s, times=6), f=f, i=1:6)\ntrim(l)\n\ndf <- as.data.frame(l)\ntrim(df)\n\n\n"} {"package":"gdata","topic":"trimSum","snippet":"### Name: trimSum\n### Title: Trim a vector such that the last/first value represents the sum\n### of trimmed values\n### Aliases: trimSum\n### Keywords: manip\n\n### ** Examples\n\nx <- 1:10\ntrimSum(x, n=5)\ntrimSum(x, n=5, right=FALSE)\n\nx[9] <- NA\ntrimSum(x, n=5)\ntrimSum(x, n=5, na.rm=TRUE)\n\n\n"} {"package":"gdata","topic":"isUnknown","snippet":"### Name: unknownToNA\n### Title: Change unknown values to NA and vice versa\n### Aliases: isUnknown isUnknown.default isUnknown.POSIXlt isUnknown.list\n### isUnknown.data.frame isUnknown.matrix unknownToNA unknownToNA.default\n### unknownToNA.factor unknownToNA.list unknownToNA.data.frame\n### NAToUnknown NAToUnknown.default NAToUnknown.factor NAToUnknown.list\n### NAToUnknown.data.frame\n### Keywords: manip NA\n\n### ** Examples\n\nxInt <- c(0, 1, 0, 5, 6, 7, 8, 9, NA)\nisUnknown(x=xInt, unknown=0)\nisUnknown(x=xInt, unknown=c(0, NA))\n(xInt <- unknownToNA(x=xInt, unknown=0))\n(xInt <- NAToUnknown(x=xInt, unknown=0))\n\nxFac <- factor(c(\"0\", 1, 2, 3, NA, \"NA\"))\nisUnknown(x=xFac, unknown=0)\nisUnknown(x=xFac, unknown=c(0, NA))\nisUnknown(x=xFac, unknown=c(0, \"NA\"))\nisUnknown(x=xFac, unknown=c(0, \"NA\", NA))\n(xFac <- unknownToNA(x=xFac, unknown=\"NA\"))\n(xFac <- NAToUnknown(x=xFac, unknown=\"NA\"))\n\nxList <- list(xFac=xFac, xInt=xInt)\nisUnknown(xList, unknown=c(\"NA\", 0))\nisUnknown(xList, unknown=list(\"NA\", 0))\ntmp <- c(0, \"NA\")\nnames(tmp) <- c(\".default\", \"xFac\")\nisUnknown(xList, unknown=tmp)\ntmp <- list(.default=0, xFac=\"NA\")\nisUnknown(xList, unknown=tmp)\n\n(xList <- unknownToNA(xList, unknown=tmp))\n(xList <- NAToUnknown(xList, unknown=999))\n\n\n"} {"package":"gdata","topic":"unmatrix","snippet":"### Name: unmatrix\n### Title: Convert a matrix into a vector, with appropriate names\n### Aliases: unmatrix\n### Keywords: manip\n\n### ** Examples\n\n# Simple example\nm <- matrix(letters[1:10], ncol=5)\nm\nunmatrix(m)\n\n# Unroll model output\nx <- rnorm(100)\ny <- rnorm(100, mean=3+5*x, sd=0.25)\nm <- coef(summary(lm(y ~ x)))\nunmatrix(m)\n\n\n"} {"package":"gdata","topic":"update.list","snippet":"### Name: update.list\n### Title: Update the elements of a list, or rows of a data frame\n### Aliases: update.list update.data.frame\n### Keywords: data manip\n\n### ** Examples\n\n# Update list\nold <- list(a=1,b=\"red\",c=1.37)\nnew <- list(b=\"green\",c=2.4)\n\nupdate(old, new)\nupdate.list(old,new) # equivalent\n\nolder <- list(a=0, b=\"orange\", 4, 5, 6)\nnewer <- list(b=\"purple\", 7, 8, 9)\nupdate(older, newer) # ignores unnamed elements of newer\nupdate(older, newer, unnamed=TRUE) # appends unnamed elements of newer\n\n# Update data frame\nold <- data.frame(letter=letters[1:5], number=1:5)\nnew <- data.frame(letter=letters[c(5, 1, 7)], number=c(-5, -1, -7))\n\nupdate(old, new, by=\"letter\") # default is append=TRUE\nupdate(old, new, by=\"letter\", append=FALSE)\nupdate(old, new, by=\"letter\", verbose=FALSE)\n\n\n"} {"package":"gdata","topic":"upperTriangle","snippet":"### Name: upperTriangle\n### Title: Extract or replace the upper/lower triangular portion of a\n### matrix\n### Aliases: upperTriangle upperTriangle<- lowerTriangle lowerTriangle<-\n### Keywords: array\n\n### ** Examples\n\nx <- matrix(1:25, nrow=5, ncol=5)\nx\nupperTriangle(x)\nupperTriangle(x, diag=TRUE)\nupperTriangle(x, diag=TRUE, byrow=TRUE)\n\nlowerTriangle(x)\nlowerTriangle(x, diag=TRUE)\nlowerTriangle(x, diag=TRUE, byrow=TRUE)\n\nupperTriangle(x) <- NA\nx\n\nupperTriangle(x, diag=TRUE) <- 1:15\nx\n\nlowerTriangle(x) <- NA\nx\n\nlowerTriangle(x, diag=TRUE) <- 1:15\nx\n\n## Copy lower triangle into upper triangle to make\n## the matrix (diagonally) symmetric\nx <- matrix(LETTERS[1:25], nrow=5, ncol=5, byrow=TRUE)\nx\nlowerTriangle(x) = upperTriangle(x, byrow=TRUE)\nx\n\n\n"} {"package":"gdata","topic":"wideByFactor","snippet":"### Name: wideByFactor\n### Title: Create multivariate data by a given factor\n### Aliases: wideByFactor\n### Keywords: manip misc\n\n### ** Examples\n\nn <- 10\nf <- 2\ntmp <- data.frame(y1=rnorm(n=n),\n y2=rnorm(n=n),\n f1=factor(rep(letters[1:f], n/2)),\n f2=factor(c(rep(\"M\", n/2), rep(\"F\", n/2))),\n c1=1:n,\n c2=2*(1:n))\n\nwideByFactor(x=tmp, factor=\"f1\", common=c(\"c1\", \"c2\", \"f2\"))\nwideByFactor(x=tmp, factor=\"f1\", common=c(\"c1\", \"c2\"))\n\n\n"} {"package":"gdata","topic":"write.fwf","snippet":"### Name: write.fwf\n### Title: Write object to file in fixed width format\n### Aliases: write.fwf\n### Keywords: print file\n\n### ** Examples\n\n## Some data\nnum <- round(c(733070.345678, 1214213.78765456, 553823.798765678,\n 1085022.8876545678, 571063.88765456, 606718.3876545678,\n 1053686.6, 971024.187656, 631193.398765456, 879431.1),\n digits=3)\n\ntestData <- data.frame(num1=c(1:10, NA),\n num2=c(NA, seq(from=1, to=5.5, by=0.5)),\n num3=c(NA, num),\n int1=c(as.integer(1:4), NA, as.integer(4:9)),\n fac1=factor(c(NA, letters[1:9], \"hjh\")),\n fac2=factor(c(letters[6:15], NA)),\n cha1=c(letters[17:26], NA),\n cha2=c(NA, \"longer\", letters[25:17]),\n stringsAsFactors=FALSE)\nlevels(testData$fac1) <- c(levels(testData$fac1), \"unusedLevel\")\ntestData$Date <- as.Date(\"1900-1-1\")\ntestData$Date[2] <- NA\ntestData$POSIXt <- as.POSIXct(strptime(\"1900-1-1 01:01:01\",\n format=\"%Y-%m-%d %H:%M:%S\"))\ntestData$POSIXt[5] <- NA\n\n## Default\nwrite.fwf(x=testData)\n\n## NA should be -\nwrite.fwf(x=testData, na=\"-\")\n## NA should be -NA-\nwrite.fwf(x=testData, na=\"-NA-\")\n\n## Some other separator than space\nwrite.fwf(x=testData[, 1:4], sep=\"-mySep-\")\n\n## Force wider columns\nwrite.fwf(x=testData[, 1:5], width=20)\n\n## Show effect of 'scientific' option\ntestData$num3 <- testData$num3 * 1e8\nwrite.fwf(testData, scientific=TRUE)\nwrite.fwf(testData, scientific=FALSE)\ntestData$num3 <- testData$num3 / 1e8\n\n## Write to file and report format and fixed width information\nfile <- tempfile()\nformatInfo <- write.fwf(x=testData, file=file, formatInfo=TRUE)\nformatInfo\n\n## Read exported data back to R (note +1 due to separator)\n## - without header\nread.fwf(file=file, widths=formatInfo$width + 1, header=FALSE, skip=1,\n strip.white=TRUE)\n\n## - with header, via postimport modfication\ntmp <- read.fwf(file=file, widths=formatInfo$width + 1, skip=1,\n strip.white=TRUE)\ncolnames(tmp) <- read.table(file=file, nrow=1, as.is=TRUE)\ntmp\n\n## - with header, persuading read.fwf to accept header properly\n## (thanks to Marc Schwartz)\nread.fwf(file=file, widths=formatInfo$width + 1, strip.white=TRUE,\n skip=1, col.names=read.table(file=file, nrow=1, as.is=TRUE))\n\n## - with header, using quotes\nwrite.fwf(x=testData, file=file, quote=TRUE)\nread.table(file=file, header=TRUE, strip.white=TRUE)\n\n## Tidy up\nunlink(file)\n\n\n"} {"package":"genero","topic":"genero","snippet":"### Name: genero\n### Title: Panel component for shiny panels layout\n### Aliases: genero\n\n### ** Examples\n\ngenero(c(\"Juan\", \"Pablo\", \"Camila\", \"Mariana\"))\n\n\n\n\n"} {"package":"genero","topic":"names_gender_es","snippet":"### Name: names_gender_es\n### Title: Names with gender in Spanish\n### Aliases: names_gender_es\n### Keywords: datasets\n\n### ** Examples\n\nnames_gender_es\n\n\n"} {"package":"genero","topic":"names_gender_pt","snippet":"### Name: names_gender_pt\n### Title: Names with gender in Portuguese\n### Aliases: names_gender_pt\n### Keywords: datasets\n\n### ** Examples\n\nnames_gender_pt\n\n\n"} {"package":"gaselect","topic":"genAlg","snippet":"### Name: genAlg\n### Title: Genetic algorithm for variable subset selection\n### Aliases: genAlg\n\n### ** Examples\n\nctrl <- genAlgControl(populationSize = 100, numGenerations = 15, minVariables = 5,\n maxVariables = 12, verbosity = 1)\n\nevaluatorSRCV <- evaluatorPLS(numReplications = 2, innerSegments = 7, testSetSize = 0.4,\n numThreads = 1)\n\nevaluatorRDCV <- evaluatorPLS(numReplications = 2, innerSegments = 5, outerSegments = 3,\n numThreads = 1)\n\n# Generate demo-data\nset.seed(12345)\nX <- matrix(rnorm(10000, sd = 1:5), ncol = 50, byrow = TRUE)\ny <- drop(-1.2 + rowSums(X[, seq(1, 43, length = 8)]) + rnorm(nrow(X), 1.5));\n\nresultSRCV <- genAlg(y, X, control = ctrl, evaluator = evaluatorSRCV, seed = 123)\nresultRDCV <- genAlg(y, X, control = ctrl, evaluator = evaluatorRDCV, seed = 123)\n\nsubsets(resultSRCV, 1:5)\nsubsets(resultRDCV, 1:5)\n\n\n"} {"package":"gaselect","topic":"genAlgControl","snippet":"### Name: genAlgControl\n### Title: Set control arguments for the genetic algorithm\n### Aliases: genAlgControl\n\n### ** Examples\n\nctrl <- genAlgControl(populationSize = 100, numGenerations = 15, minVariables = 5,\n maxVariables = 12, verbosity = 1)\n\nevaluatorSRCV <- evaluatorPLS(numReplications = 2, innerSegments = 7, testSetSize = 0.4,\n numThreads = 1)\n\nevaluatorRDCV <- evaluatorPLS(numReplications = 2, innerSegments = 5, outerSegments = 3,\n numThreads = 1)\n\n# Generate demo-data\nset.seed(12345)\nX <- matrix(rnorm(10000, sd = 1:5), ncol = 50, byrow = TRUE)\ny <- drop(-1.2 + rowSums(X[, seq(1, 43, length = 8)]) + rnorm(nrow(X), 1.5));\n\nresultSRCV <- genAlg(y, X, control = ctrl, evaluator = evaluatorSRCV, seed = 123)\nresultRDCV <- genAlg(y, X, control = ctrl, evaluator = evaluatorRDCV, seed = 123)\n\nsubsets(resultSRCV, 1:5)\nsubsets(resultRDCV, 1:5)\n\n\n"} {"package":"gaselect","topic":"evaluatorFit","snippet":"### Name: evaluatorFit\n### Title: Fit Evaluator\n### Aliases: evaluatorFit\n\n### ** Examples\n\nctrl <- genAlgControl(populationSize = 200, numGenerations = 30, minVariables = 5,\n maxVariables = 12, verbosity = 1)\nevaluator <- evaluatorFit(statistic = \"BIC\", numThreads = 1)\n\n# Generate demo-data\nset.seed(12345)\nX <- matrix(rnorm(10000, sd = 1:5), ncol = 50, byrow = TRUE)\ny <- drop(-1.2 + rowSums(X[, seq(1, 43, length = 8)]) + rnorm(nrow(X), 1.5));\n\nresult <- genAlg(y, X, control = ctrl, evaluator = evaluator, seed = 123)\n\nsubsets(result, 1:5)\n\n\n"} {"package":"gaselect","topic":"evaluatorLM","snippet":"### Name: evaluatorLM\n### Title: LM Evaluator\n### Aliases: evaluatorLM\n\n### ** Examples\n\nctrl <- genAlgControl(populationSize = 200, numGenerations = 30, minVariables = 5,\n maxVariables = 12, verbosity = 1)\nevaluator <- evaluatorLM(statistic = \"BIC\", numThreads = 1)\n\n# Generate demo-data\nset.seed(12345)\nX <- matrix(rnorm(10000, sd = 1:5), ncol = 50, byrow = TRUE)\ny <- drop(-1.2 + rowSums(X[, seq(1, 43, length = 8)]) + rnorm(nrow(X), 1.5));\n\nresult <- genAlg(y, X, control = ctrl, evaluator = evaluator, seed = 123)\n\nsubsets(result, 1:5)\n\n\n"} {"package":"gaselect","topic":"evaluatorPLS","snippet":"### Name: evaluatorPLS\n### Title: PLS Evaluator\n### Aliases: evaluatorPLS\n\n### ** Examples\n\nctrl <- genAlgControl(populationSize = 100, numGenerations = 15, minVariables = 5,\n maxVariables = 12, verbosity = 1)\n\nevaluatorSRCV <- evaluatorPLS(numReplications = 2, innerSegments = 7, testSetSize = 0.4,\n numThreads = 1)\n\nevaluatorRDCV <- evaluatorPLS(numReplications = 2, innerSegments = 5, outerSegments = 3,\n numThreads = 1)\n\n# Generate demo-data\nset.seed(12345)\nX <- matrix(rnorm(10000, sd = 1:5), ncol = 50, byrow = TRUE)\ny <- drop(-1.2 + rowSums(X[, seq(1, 43, length = 8)]) + rnorm(nrow(X), 1.5));\n\nresultSRCV <- genAlg(y, X, control = ctrl, evaluator = evaluatorSRCV, seed = 123)\nresultRDCV <- genAlg(y, X, control = ctrl, evaluator = evaluatorRDCV, seed = 123)\n\nsubsets(resultSRCV, 1:5)\nsubsets(resultRDCV, 1:5)\n\n\n"} {"package":"gaselect","topic":"evaluatorUserFunction","snippet":"### Name: evaluatorUserFunction\n### Title: User Defined Evaluator\n### Aliases: evaluatorUserFunction\n\n### ** Examples\n\nctrl <- genAlgControl(populationSize = 100, numGenerations = 10, minVariables = 5,\n maxVariables = 12, verbosity = 1)\n\n# Use the BIC of a linear model to evaluate the fitness of a variable subset\nevalFUN <- function(y, X) {\n\t\treturn(BIC(lm(y ~ X)));\n}\n\n# Dummy function that returns the residuals standard deviation and not the SEP\nsepFUN <- function(genAlg) {\n return(apply(genAlg@subsets, 2, function(subset) {\n\t\tm <- lm(genAlg@response ~ genAlg@covariates[, subset]);\n\t\treturn(sd(m$residuals));\n\t}));\n}\n\nevaluator <- evaluatorUserFunction(FUN = evalFUN, sepFUN = sepFUN)\n\n# Generate demo-data\nset.seed(12345)\nX <- matrix(rnorm(10000, sd = 1:5), ncol = 50, byrow = TRUE)\ny <- drop(-1.2 + rowSums(X[, seq(1, 43, length = 8)]) + rnorm(nrow(X), 1.5));\n\nresult <- genAlg(y, X, control = ctrl, evaluator = evaluator, seed = 123)\n\nsubsets(result, 1:5)\n\n\n"} {"package":"gaselect","topic":"fitness","snippet":"### Name: fitness\n### Title: Get the fitness of a variable subset\n### Aliases: fitness\n\n### ** Examples\n\nctrl <- genAlgControl(populationSize = 100, numGenerations = 15, minVariables = 5,\n maxVariables = 12, verbosity = 1)\n\nevaluator <- evaluatorPLS(numReplications = 2, innerSegments = 7, testSetSize = 0.4,\n numThreads = 1)\n\n# Generate demo-data\nset.seed(12345)\nX <- matrix(rnorm(10000, sd = 1:5), ncol = 50, byrow = TRUE)\ny <- drop(-1.2 + rowSums(X[, seq(1, 43, length = 8)]) + rnorm(nrow(X), 1.5));\n\nresult <- genAlg(y, X, control = ctrl, evaluator = evaluator, seed = 123)\n\nfitness(result) # Get fitness of the found subsets\n\nh <- fitnessEvolution(result) # Get average fitness as well as the fitness of the\n # best chromosome for each generation (at raw scale!)\n\nplot(h[, \"mean\"], type = \"l\", col = 1, ylim = c(-7, -1))\nlines(h[, \"mean\"] - h[, \"std.dev\"], type = \"l\", col = \"gray30\", lty = 2)\nlines(h[, \"mean\"] + h[, \"std.dev\"], type = \"l\", col = \"gray30\", lty = 2)\nlines(h[, \"best\"], type = \"l\", col = 2)\n\n\n"} {"package":"gaselect","topic":"fitnessEvolution","snippet":"### Name: fitnessEvolution\n### Title: Get the evolution of the fitness\n### Aliases: fitnessEvolution\n\n### ** Examples\n\nctrl <- genAlgControl(populationSize = 100, numGenerations = 15, minVariables = 5,\n maxVariables = 12, verbosity = 1)\n\nevaluator <- evaluatorPLS(numReplications = 2, innerSegments = 7, testSetSize = 0.4,\n numThreads = 1)\n\n# Generate demo-data\nset.seed(12345)\nX <- matrix(rnorm(10000, sd = 1:5), ncol = 50, byrow = TRUE)\ny <- drop(-1.2 + rowSums(X[, seq(1, 43, length = 8)]) + rnorm(nrow(X), 1.5));\n\nresult <- genAlg(y, X, control = ctrl, evaluator = evaluator, seed = 123)\n\nfitness(result) # Get fitness of the found subsets\n\nh <- fitnessEvolution(result) # Get average fitness as well as the fitness of the\n # best chromosome for each generation (at raw scale!)\n\nplot(h[, \"mean\"], type = \"l\", col = 1, ylim = c(-7, -1))\nlines(h[, \"mean\"] - h[, \"std.dev\"], type = \"l\", col = \"gray30\", lty = 2)\nlines(h[, \"mean\"] + h[, \"std.dev\"], type = \"l\", col = \"gray30\", lty = 2)\nlines(h[, \"best\"], type = \"l\", col = 2)\n\n\n"} {"package":"gaselect","topic":"subsets","snippet":"### Name: subsets\n### Title: Get the found variable subset(s)\n### Aliases: subsets\n\n### ** Examples\n\nctrl <- genAlgControl(populationSize = 200, numGenerations = 15, minVariables = 5,\n maxVariables = 12, verbosity = 1)\n\nevaluator <- evaluatorPLS(numReplications = 2, innerSegments = 7, testSetSize = 0.4,\n numThreads = 1)\n\n# Generate demo-data\nset.seed(12345)\nX <- matrix(rnorm(10000, sd = 1:5), ncol = 50, byrow = TRUE)\ny <- drop(-1.2 + rowSums(X[, seq(1, 43, length = 8)]) + rnorm(nrow(X), 1.5));\n\nresult <- genAlg(y, X, control = ctrl, evaluator = evaluator, seed = 123)\n\nsubsets(result, names = TRUE, indices = 1:5) # best 5 variable subsets as a list of names\nresult@subsets[ , 1:5] # best 5 variable subsets as a logical matrix with the subsets in the columns\n\n\n"} {"package":"pxweb","topic":"http_was_redirected","snippet":"### Name: http_was_redirected\n### Title: http_was_redirected\n### Aliases: http_was_redirected\n### Keywords: internal\n\n### ** Examples\n\n## Not run: \n##D r <- httr::GET(\"http://httpbin.org/redirect/2\")\n##D pxweb:::http_was_redirected(r)\n## End(Not run)\n\n\n"} {"package":"pxweb","topic":"pxweb","snippet":"### Name: pxweb\n### Title: S3 constructor for 'pxweb' api object.\n### Aliases: pxweb is.pxweb print.pxweb\n### Keywords: internal\n\n### ** Examples\n\n## Not run: \n##D pxapi_1 <- pxweb(url =\"https://api.scb.se/OV0104/v1/doris/sv/ssd/START/ME/ME0104/ME0104C/ME0104T24\")\n##D pxapi_2 <- pxweb(url =\"https://api.scb.se/OV0104/v1/doris/sv\")\n## End(Not run)\n\n\n\n"} {"package":"pxweb","topic":"pxweb_api_catalogue","snippet":"### Name: pxweb_api_catalogue\n### Title: Get the PXWEB API catalogue\n### Aliases: pxweb_api_catalogue pxweb_api_catalogue_from_json\n### pxweb_api_catalogue_from_github pxweb_api_catalogue_path\n### Keywords: internal\n\n### ** Examples\n\npxweb_api_catalogue()\n\n\n\n"} {"package":"pxweb","topic":"pxweb_data_comments","snippet":"### Name: pxweb_data_comments\n### Title: Construct a 'pxweb_data_comments' object.\n### Aliases: pxweb_data_comments pxweb_data_comments.pxweb_data\n### Keywords: internal\n\n### ** Examples\n\n## Not run: \n##D url <- \"https://api.scb.se/OV0104/v1/doris/en/ssd/BE/BE0101/BE0101A/BefolkningNy\"\n##D json_query <- \n##D file.path(system.file(package = \"pxweb\"), \"extdata\", \"examples\", \"json_query_example.json\")\n##D pxd <- pxweb_get(url = url, query = json_query)\n##D pxdcs <- pxweb_data_comments(x = pxd)\n##D pxdc_df <- as.data.frame(pxdcs, stringsAsFactors = TRUE)\n## End(Not run)\n\n\n"} {"package":"pxweb","topic":"pxweb_explorer","snippet":"### Name: pxweb_explorer\n### Title: Create a 'pxweb_explorer' object.\n### Aliases: pxweb_explorer pxweb_explorer.NULL pxweb_explorer.character\n### pxweb_explorer.pxweb pxweb_explorer.pxweb_api_catalogue_entry\n### assert_pxweb_explorer print.pxweb_explorer print_bar\n### pxe_print_choices\n### Keywords: internal\n\n### ** Examples\n\n## The functions below are internal generic functions\n## x <- pxweb_explorer()\n## url <- \"api.scb.se\"\n## x <- pxweb_explorer(x = url)\n## url <- \"https://api.scb.se/OV0104/v1/doris/en/ssd/BE/BE0101/BE0101A/\"\n## x <- pxweb_explorer(x = url)\n## url <- \"https://api.scb.se/OV0104/v1/doris/en/ssd/BE/BE0101/BE0101A/BefolkningNy\"\n## x <- pxweb_explorer(x = url)\n\n\n\n"} {"package":"pxweb","topic":"pxweb_get","snippet":"### Name: pxweb_get\n### Title: Do a GET call to PXWEB API\n### Aliases: pxweb_get\n\n### ** Examples\n\n## Not run: \n##D url <- \"https://api.scb.se/OV0104/v1/doris/sv/ssd/BE/BE0101/BE0101A/BefolkningNy\"\n##D px_meta_data <- pxweb_get(url)\n##D \n##D url <- \"https://api.scb.se/OV0104/v1/doris/sv/ssd/BE/BE0101\"\n##D px_levels <- pxweb_get(url)\n##D \n##D url <- \"https://api.scb.se/OV0104/v1/doris/sv\"\n##D px_levels <- pxweb_get(url)\n##D \n##D url <- \"https://api.scb.se/OV0104/v1/doris/sv/ssd/BE/BE0101/BE0101A/BefolkningNy\"\n##D query <- file.path(system.file(package = \"pxweb\"),\n##D \"extdata\", \"examples\", \"json_query_example.json\")\n##D px_data <- pxweb_get(url = url, query = query)\n##D \n##D # Convert to data.frame\n##D as.data.frame(px_data, column.name.type = \"text\", variable.value.type = \"text\")\n##D \n##D # Get raw data\n##D as.matrix(px_data, column.name.type = \"code\", variable.value.type = \"code\")\n##D \n##D # Get data comments\n##D pxweb_data_comments(px_data)\n##D \n##D # Get jsonstat data\n##D jstat <- query <- file.path(system.file(package = \"pxweb\"),\n##D \"extdata\", \"examples\", \"json-stat_query_example.json\")\n##D jstat_data <- pxweb_get(url = url, query = query)\n##D \n##D # Get very large datasets (multiple downloads needed)\n##D big_query <- file.path(system.file(package = \"pxweb\"),\n##D \"extdata\", \"examples\", \"json_big_query_example.json\")\n##D px_data <- pxweb_get(url = url, query = big_query)\n## End(Not run)\n\n\n\n"} {"package":"pxweb","topic":"pxweb_get_data","snippet":"### Name: pxweb_get_data\n### Title: Do a GET call to PXWEB API and return a data.frame\n### Aliases: pxweb_get_data\n\n### ** Examples\n\n## Not run: \n##D url <- \"https://api.scb.se/OV0104/v1/doris/sv/ssd/BE/BE0101/BE0101A/BefolkningNy\"\n##D query <- file.path(system.file(package = \"pxweb\"),\n##D \"extdata\", \"examples\", \"json_query_example.json\")\n##D df <- pxweb_get_data(url = url, query = query)\n## End(Not run)\n\n\n\n"} {"package":"pxweb","topic":"pxweb_interactive","snippet":"### Name: pxweb_interactive\n### Title: Find and download data interactively from a PXWEB API\n### Aliases: pxweb_interactive interactive_pxweb\n\n### ** Examples\n\npxweb_api_catalogue() # List apis\n\n## The examples below can only be run in interactive mode\n## x <- pxweb_interactive()\n## x <- pxweb_interactive(x = \"api.scb.se\")\n## x <- pxweb_interactive(x = \"https://api.scb.se/OV0104/v1/doris/en/ssd/BE/BE0101/\")\n## x <- pxweb_interactive(x = \"https://api.scb.se/OV0104/v1/doris/en/ssd/BE/BE0101/BE0101A/\")\n\n\n\n"} {"package":"pxweb","topic":"pxweb_query","snippet":"### Name: pxweb_query\n### Title: Create a PXWEB query\n### Aliases: pxweb_query pxweb_query.character pxweb_query.json\n### pxweb_query.pxweb_query pxweb_query.list pxweb_query.response\n### pxweb_query.pxweb_explorer\n### Keywords: internal\n\n### ** Examples\n\ndims <- list(Alue = c(\"*\"),\n \"Asuntokunnan koko\" = c(\"*\"),\n Talotyyppi = c(\"S\"),\n Vuosi = c(\"*\"))\npxq1 <- pxweb_query(dims)\n\njson_query <- file.path(system.file(package = \"pxweb\"), \n \"extdata\", \"examples\", \"json_query_example.json\")\npxq2 <- pxweb_query(json_query)\n\n\n\n\n"} {"package":"pxweb","topic":"pxweb_query_as_json","snippet":"### Name: pxweb_query_as_json\n### Title: Convert a 'pxweb_query' object to a 'json' string\n### Aliases: pxweb_query_as_json\n\n### ** Examples\n\njson_query <- file.path(system.file(package = \"pxweb\"), \n \"extdata\", \"examples\", \"json_query_example.json\")\npxq <- pxweb_query(json_query)\njson <- pxweb_query_as_json(pxq, pretty = TRUE)\n\n\n\n"} {"package":"pxweb","topic":"pxweb_test_api","snippet":"### Name: pxweb_test_api\n### Title: Test a full or a part of a PXWEB api.\n### Aliases: pxweb_test_api\n\n### ** Examples\n\n## Not run: \n##D url <- \"https://bank.stat.gl/api/v1/en/Greenland/BE/BE01\"\n##D res <- pxweb_test_api(url)\n##D res <- pxweb_test_api(url, test_type=\"touch\")\n## End(Not run)\n\n\n"} {"package":"pxweb","topic":"pxweb_validate_query_with_metadata","snippet":"### Name: pxweb_validate_query_with_metadata\n### Title: Validate a 'pxweb_query' with a 'pxweb_metadata' object\n### Aliases: pxweb_validate_query_with_metadata\n\n### ** Examples\n\n## Not run: \n##D url <- \"https://api.scb.se/OV0104/v1/doris/sv/ssd/BE/BE0101/BE0101A/BefolkningNy\"\n##D json_query <- file.path(system.file(package = \"pxweb\"), \n##D \"extdata\", \"examples\", \"json_query_example.json\")\n##D pxq <- pxweb_query(json_query)\n##D pxweb_validate_query_with_metadata(pxq, pxweb_get(url))\n## End(Not run)\n\n\n\n"} {"package":"STREAK","topic":"receptorAbundanceEstimation","snippet":"### Name: receptorAbundanceEstimation\n### Title: Receptor abundance estimation for single cell RNA-sequencing\n### (scRNA-seq) data using gene set scoring and thresholding.\n### Aliases: receptorAbundanceEstimation\n\n### ** Examples\n\ndata(\"train.malt.rna.mat\")\ndata(\"train.malt.adt.mat\")\nreceptor.geneset.matrix.out <- receptorGeneSetConstruction(train.rnaseq =\n train.malt.rna.mat[1:100,1:80],\n train.citeseq =\n train.malt.adt.mat[1:100,1:2],\n rank.range.end = 70,\n min.consec.diff = 0.01,\n rep.consec.diff = 2,\n manual.rank = NULL,\n seed.rsvd = 1)\ndim(receptor.geneset.matrix.out)\nhead(receptor.geneset.matrix.out)\ndata(\"target.malt.rna.mat\")\nreceptor.abundance.estimates.out <- receptorAbundanceEstimation(target.rnaseq =\n target.malt.rna.mat[1:200,1:80],\n receptor.geneset.matrix =\n receptor.geneset.matrix.out,\n num.genes = 10, rank.range.end = 70,\n min.consec.diff = 0.01,\n rep.consec.diff = 2,\n manual.rank = NULL, seed.rsvd = 1,\n max.num.clusters = 4, seed.ckmeans = 2)\ndim(receptor.abundance.estimates.out)\nhead(receptor.abundance.estimates.out)\n\n\n"} {"package":"STREAK","topic":"receptorGeneSetConstruction","snippet":"### Name: receptorGeneSetConstruction\n### Title: Gene sets weights membership matrix construction for receptor\n### abundance estimation.\n### Aliases: receptorGeneSetConstruction\n\n### ** Examples\n\ndata(\"train.malt.rna.mat\")\ndata(\"train.malt.adt.mat\")\nreceptor.geneset.matrix.out <- receptorGeneSetConstruction(train.rnaseq =\n train.malt.rna.mat[1:100,1:80],\n train.citeseq =\n train.malt.adt.mat[1:100,1:2],\n rank.range.end = 70,\n min.consec.diff = 0.01,\n rep.consec.diff = 2,\n manual.rank = NULL, seed.rsvd = 1)\ndim(receptor.geneset.matrix.out)\nhead(receptor.geneset.matrix.out)\n\n\n"} {"package":"lowpassFilter","topic":"getConvolution","snippet":"### Name: helpFunctionsFilter\n### Title: Convolved piecewise constant signals\n### Aliases: getConvolution getSignalJump getConvolutionJump getSignalPeak\n### getConvolutionPeak\n### Keywords: nonparametric\n\n### ** Examples\n\n# creating and plotting a signal with a single jump at 0 from 0 to 1\ntime <- seq(-2, 13, 0.01)\nsignal <- getSignalJump(time, 0, 0, 1)\nplot(time, signal, type = \"l\")\n\n# setting up the filter\nfilter <- lowpassFilter(param = list(pole = 4, cutoff = 0.1))\n\n# convolution with the truncated filter\nconvolution <- getConvolutionJump(time, 0, 0, 1, filter)\nlines(time, convolution, col = \"red\")\n\n# without truncating the filter, looks almost equal\nconvolution <- getConvolutionJump(time, 0, 0, 1, filter, truncated = FALSE)\nlines(time, convolution, col = \"blue\")\n\n\n# creating and plotting a signal with a single peak with jumps\n# at 0 and at 3 from 0 to 1 to 0\ntime <- seq(-2, 16, 0.01)\nsignal <- getSignalPeak(time, 0, 3, 1, 0, 0)\nplot(time, signal, type = \"l\")\n\n# convolution with the truncated filter\nconvolution <- getConvolutionPeak(time, 0, 3, 1, 0, 0, filter)\nlines(time, convolution, col = \"red\")\n\n# without truncating the filter, looks almost equal\nconvolution <- getConvolutionPeak(time, 0, 3, 1, 0, 0, filter, truncated = FALSE)\nlines(time, convolution, col = \"blue\")\n\n\n# doing the same with getConvolution\n# signal can also be an object of class stepblock instead,\n# e.g. constructed by stepR::stepblock\nsignal <- data.frame(value = c(0, 1, 0), leftEnd = c(-2, 0, 3), rightEnd = c(0, 3, 16))\n\nconvolution <- getConvolution(time, signal, filter)\nlines(time, convolution, col = \"red\")\n\nconvolution <- getConvolution(time, signal, filter, truncated = FALSE)\nlines(time, convolution, col = \"blue\")\n\n\n# more complicated signal\ntime <- seq(-2, 21, 0.01)\nsignal <- data.frame(value = c(0, 10, 0, 50, 0), leftEnd = c(-2, 0, 3, 6, 8),\n rightEnd = c(0, 3, 6, 8, 21))\n\nconvolution <- getConvolution(time, signal, filter)\nplot(time, convolution, col = \"red\", type = \"l\")\n\nconvolution <- getConvolution(time, signal, filter, truncated = FALSE)\nlines(time, convolution, col = \"blue\")\n\n\n"} {"package":"lowpassFilter","topic":"lowpassFilter-package","snippet":"### Name: lowpassFilter-package\n### Title: Lowpass Filtering\n### Aliases: lowpassFilter-package\n### Keywords: package ts nonparametric\n\n### ** Examples\n\n# creates a lowpass filter\nfilter <- lowpassFilter(type = \"bessel\", param = list(pole = 4, cutoff = 0.1), sr = 1e4)\ntime <- 1:4000 / filter$sr\n\n# creates a piecewise constant signal with a single peak\nstepfun <- getSignalPeak(time, cp1 = 0.2, cp2 = 0.2 + 3 / filter$sr, \n value = 20, leftValue = 40, rightValue = 40)\n\n# computes the convolution of the signal with the kernel of the lowpass filter\nsignal <- getConvolutionPeak(time, cp1 = 0.2, cp2 = 0.2 + 3 / filter$sr, \n value = 20, leftValue = 40, rightValue = 40,\n filter = filter)\n\n# generates random numbers that are filtered \ndata <- randomGenerationMA(n = 4000, filter = filter, signal = signal, noise = 1.4)\n\n# generated data\nplot(time, data, pch = 16)\n\n# zoom into the single peak\nplot(time, data, pch = 16, xlim = c(0.199, 0.202), ylim = c(19, 45))\nlines(time, stepfun, col = \"blue\", type = \"s\", lwd = 2)\nlines(time, signal, col = \"red\", lwd = 2)\n\n# use of data randomGeneration instead\ndata <- randomGeneration(n = 4000, filter = filter, signal = signal, noise = 1.4)\n\n# similar result\nplot(time, data, pch = 16, xlim = c(0.199, 0.202), ylim = c(19, 45))\nlines(time, stepfun, col = \"blue\", type = \"s\", lwd = 2)\nlines(time, signal, col = \"red\", lwd = 2)\n\n\n"} {"package":"lowpassFilter","topic":"lowpassFilter","snippet":"### Name: lowpassFilter\n### Title: Lowpass filtering\n### Aliases: lowpassFilter print.lowpassFilter\n### Keywords: ts\n\n### ** Examples\n\nfilter <- lowpassFilter(type = \"bessel\", param = list(pole = 4L, cutoff = 1e3 / 1e4),\n sr = 1e4)\n\n# filter kernel, truncated version\nplot(filter$kernfun, xlim = c(0, 20 / filter$sr))\nt <- seq(0, 20 / filter$sr, 0.01 / filter$sr)\n# truncated version looks very similar\nlines(t, filter$truncatedKernfun(t), col = \"red\")\n\n# filter$len (== 11) is chosen automatically\n# this ensures that filter$acf < 1e-3 for this lag and at all larger lags\nplot(filter$acfun, xlim = c(0, 20 / filter$sr), ylim = c(-0.003, 0.003))\nabline(h = 0.001, lty = \"22\")\nabline(h = -0.001, lty = \"22\")\n\nabline(v = (filter$len - 1L) / filter$sr, col = \"grey\")\nabline(v = filter$len / filter$sr, col = \"red\")\n\n# filter with sr == 1\nfilter <- lowpassFilter(type = \"bessel\", param = list(pole = 4L, cutoff = 1e3 / 1e4))\n\n# filter kernel and its truncated version\nplot(filter$kernfun, xlim = c(0, 20 / filter$sr))\nt <- seq(0, 20 / filter$sr, 0.01 / filter$sr)\n# truncated version looks very similar\nlines(t, filter$truncatedKernfun(t), col = \"red\")\n# digitised filter\npoints((0:filter$len + 0.5) / filter$sr, filter$kern, col = \"red\", pch = 16)\n\n# without a shift\nfilter <- lowpassFilter(type = \"bessel\", param = list(pole = 4L, cutoff = 1e3 / 1e4),\n shift = 0)\n# filter$kern starts with zero\npoints(0:filter$len / filter$sr, filter$kern, col = \"blue\", pch = 16)\n\n# much shorter filter\nfilter <- lowpassFilter(type = \"bessel\", param = list(pole = 4L, cutoff = 1e3 / 1e4),\n len = 4L)\npoints((0:filter$len + 0.5) / filter$sr, filter$kern, col = \"darkgreen\", pch = 16)\n\n\n"} {"package":"lowpassFilter","topic":"randomGeneration","snippet":"### Name: randomGeneration\n### Title: Random number generation\n### Aliases: randomGeneration randomGenerationMA\n### Keywords: nonparametric\n\n### ** Examples\n\nfilter <- lowpassFilter(type = \"bessel\", param = list(pole = 4, cutoff = 0.1), sr = 1e4)\ntime <- 1:4000 / filter$sr\nstepfun <- getSignalPeak(time, cp1 = 0.2, cp2 = 0.2 + 3 / filter$sr, \n value = 20, leftValue = 40, rightValue = 40)\nsignal <- getConvolutionPeak(time, cp1 = 0.2, cp2 = 0.2 + 3 / filter$sr, \n value = 20, leftValue = 40, rightValue = 40, filter = filter)\ndata <- randomGenerationMA(n = 4000, filter = filter, signal = signal, noise = 1.4)\n\n# generated data\nplot(time, data, pch = 16)\n\n# zoom into the single peak\nplot(time, data, pch = 16, xlim = c(0.199, 0.202), ylim = c(19, 45))\nlines(time, stepfun, col = \"blue\", type = \"s\", lwd = 2)\nlines(time, signal, col = \"red\", lwd = 2)\n\n# use of randomGeneration instead\ndata <- randomGeneration(n = 4000, filter = filter, signal = signal, noise = 1.4)\n\n# similar result\nplot(time, data, pch = 16, xlim = c(0.199, 0.202), ylim = c(19, 45))\nlines(time, stepfun, col = \"blue\", type = \"s\", lwd = 2)\nlines(time, signal, col = \"red\", lwd = 2)\n\n## heterogeneous noise\n# manual creation of an object of class 'stepblock'\n# instead the function stepblock in the package stepR can be used\nnoise <- data.frame(leftEnd = c(0, 0.2, 0.2 + 3 / filter$sr),\n rightEnd = c(0.2, 0.2 + 3 / filter$sr, 0.4),\n value = c(1, 30, 1))\nattr(noise, \"x0\") <- 0\nclass(noise) <- c(\"stepblock\", class(noise))\n\ndata <- randomGeneration(n = 4000, filter = filter, signal = signal, noise = noise)\n\nplot(time, data, pch = 16, xlim = c(0.199, 0.202), ylim = c(19, 45))\nlines(time, stepfun, col = \"blue\", type = \"s\", lwd = 2)\nlines(time, signal, col = \"red\", lwd = 2)\n\n\n"} {"package":"oncomsm","topic":"check_data","snippet":"### Name: check_data\n### Title: Check a visits data set for correct format\n### Aliases: check_data\n\n### ** Examples\n\ntbl <- data.frame(group_id = \"A\", subject_id = \"A1\", t = 0, state = \"stable\")\nmdl <- create_srpmodel(A = define_srp_prior())\ncheck_data(tbl, mdl)\n\n\n"} {"package":"oncomsm","topic":"compute_pfs","snippet":"### Name: compute_pfs\n### Title: Compute progression-free-survival rate given sample\n### Aliases: compute_pfs\n\n### ** Examples\n\nmdl <- create_srpmodel(A = define_srp_prior())\nsmpl <- sample_prior(mdl, nsim = 500, seed = 34L)\ndplyr::filter(\n compute_pfs(mdl, t = seq(0, 12), parameter_sample = smpl),\n iter == 1\n)\n\n\n\n"} {"package":"oncomsm","topic":"parameter_sample_to_tibble","snippet":"### Name: parameter_sample_to_tibble\n### Title: Convert parameter sample to data table\n### Aliases: parameter_sample_to_tibble\n\n### ** Examples\n\nmdl <- create_srpmodel(A = define_srp_prior())\nsmpl <- sample_prior(mdl, seed = 3647L)\nparameter_sample_to_tibble(mdl, smpl)\n\n\n\n"} {"package":"oncomsm","topic":"plot.srpmodel","snippet":"### Name: plot.srpmodel\n### Title: Summary plot of model prior\n### Aliases: plot.srpmodel\n\n### ** Examples\n\n## Not run: \n##D mdl <- create_srpmodel(A = define_srp_prior())\n##D plot(mdl)\n## End(Not run)\n\n\n"} {"package":"oncomsm","topic":"plot_mstate","snippet":"### Name: plot_mstate\n### Title: Swimmer plot of multi-state data\n### Aliases: plot_mstate\n\n### ** Examples\n\nmdl <- create_srpmodel(A = define_srp_prior())\ntbl_visits <- sample_predictive(mdl, n_per_group = 5L, nsim = 1, seed = 468L)\ntbl_mstate <- visits_to_mstate(tbl_visits, mdl)\nplot_mstate(tbl_mstate, mdl)\n\n\n\n"} {"package":"oncomsm","topic":"plot_pfs","snippet":"### Name: plot_pfs\n### Title: Plot progression-free-survival function\n### Aliases: plot_pfs\n\n### ** Examples\n\n## Not run: \n##D mdl <- create_srpmodel(A = define_srp_prior())\n##D plot_pfs(mdl)\n## End(Not run)\n\n\n"} {"package":"oncomsm","topic":"plot_response_probability","snippet":"### Name: plot_response_probability\n### Title: Plot the response probability distributions\n### Aliases: plot_response_probability\n\n### ** Examples\n\nmdl <- create_srpmodel(A = define_srp_prior())\nplot_response_probability(mdl)\n\n\n\n"} {"package":"oncomsm","topic":"plot_transition_times","snippet":"### Name: plot_transition_times\n### Title: Plot the transition times of a model\n### Aliases: plot_transition_times\n\n### ** Examples\n\n## Not run: \n##D mdl <- create_srpmodel(A = define_srp_prior())\n##D plot_transition_times(mdl)\n## End(Not run)\n\n\n"} {"package":"oncomsm","topic":"print.srpmodel","snippet":"### Name: print.srpmodel\n### Title: Print an srpmodel\n### Aliases: print.srpmodel format.srpmodel\n\n### ** Examples\n\nprint(create_srpmodel(A = define_srp_prior()))\nformat(create_srpmodel(A = define_srp_prior()))\n\n\n"} {"package":"oncomsm","topic":"sample_posterior","snippet":"### Name: sample_posterior\n### Title: Sample parameters from a model\n### Aliases: sample_posterior sample_prior\n\n### ** Examples\n\nmdl <- create_srpmodel(A = define_srp_prior())\ntbl <- tibble::tibble(\n subject_id = c(\"A1\", \"A1\"),\n group_id = c(\"A\", \"A\"),\n t = c(0, 1.5),\n state = c(\"stable\", \"response\")\n)\nsample_posterior(mdl, tbl, seed = 42L)\n\nsample_prior(mdl, seed = 42L)\n\n\n\n"} {"package":"oncomsm","topic":"impute","snippet":"### Name: impute\n### Title: Sample visits from predictive distribution\n### Aliases: impute sample_predictive\n\n### ** Examples\n\nmdl <- create_srpmodel(A = define_srp_prior())\ntbl <- tibble::tibble(\n subject_id = c(\"A1\", \"A1\"),\n group_id = c(\"A\", \"A\"),\n t = c(0, 1.5),\n state = c(\"stable\", \"stable\")\n)\nimpute(mdl, tbl, 1L, seed = 38L)\n\nsample_predictive(mdl, 1L, 20L, seed = 38L)\n\n\n\n"} {"package":"oncomsm","topic":"simulate_decision_rule","snippet":"### Name: simulate_decision_rule\n### Title: Simulate results under a custom decision rule\n### Aliases: simulate_decision_rule\n\n### ** Examples\n\nmdl <- create_srpmodel(A = define_srp_prior())\nrule <- function(model, data) {\n tibble::tibble(decision = sample(c(0,1), 1))\n}\nsimulate_decision_rule(mdl, 5, rule, nsim = 3)\n\n\n\n"} {"package":"oncomsm","topic":"srpmodel","snippet":"### Name: srpmodel\n### Title: A stable-response-progression model\n### Aliases: srpmodel srp-model define_srp_prior create_srpmodel\n\n### ** Examples\n\n# a model with prior 25% response rate and variance equivalent to\n# 10 data points (i.e. a Beta(2.5, 7.5) distribution).\ngrp <- define_srp_prior(p_mean = 0.25, p_n = 10)\nattr(grp, \"recruitment_rate\")\n\n# a model with two groups and different priors on the respective response\n# probabilities\nmdl <- create_srpmodel(\n A = define_srp_prior(),\n B = define_srp_prior(p_mean = 0.33, p_n = 10)\n)\nmdl$median_t\n\n\n\n"} {"package":"oncomsm","topic":"visits_to_mstate","snippet":"### Name: visits_to_mstate\n### Title: Convert cross-sectional visit data to multi-state format\n### Aliases: visits_to_mstate\n\n### ** Examples\n\nmdl <- create_srpmodel(A = define_srp_prior())\ntbl_visits <- sample_predictive(mdl, n_per_group = 5L, nsim = 1, seed = 468L)\nvisits_to_mstate(tbl_visits, mdl)\n\n\n\n"} {"package":"idarps","topic":"boxplot_w_points","snippet":"### Name: boxplot_w_points\n### Title: boxplot_w_points\n### Aliases: boxplot_w_points\n\n### ** Examples\n\nx <- rnorm(20, mean = 5)\ny <- rnorm(20, mean = 10)\nz <- rnorm(20, mean = 15)\nboxplot_w_points(x, main = \"test\")\nboxplot_w_points(x, y, names = c(\"x\", \"y\"), las = 1, main = \"Data\")\nboxplot_w_points(x, y, z, names = c(\"x\", \"y\", \"z\"), horizontal = TRUE, las = 1, main = \"Data\")\nboxplot_w_points(x, y, z, names = c(\"x\", \"y\", \"z\"), horizontal = FALSE, las = 1, main = \"Data\")\n\n\n"} {"package":"idarps","topic":"hist_compare_to_normal","snippet":"### Name: hist_compare_to_normal\n### Title: hist_compare_to_normal\n### Aliases: hist_compare_to_normal\n\n### ** Examples\n\nn <- 1000\nx <- rnorm(n = n)\nhist_compare_to_normal(x)\nx2 <- rexp(n, rate = 25)\nhist_compare_to_normal(x2, legend_position = \"topright\")\n\n\n"} {"package":"mmeln","topic":"dmnorm","snippet":"### Name: dmnorm\n### Title: Multivariate Normal Density Function\n### Aliases: dmnorm\n### Keywords: density multivariate normal\n\n### ** Examples\n\n\ndmnorm(1:3,1:3,diag(3))\n\n\n\n"} {"package":"mmeln","topic":"estim","snippet":"### Name: estim\n### Title: Maximum Likelihood estimation of the model parameters\n### Aliases: estim estim.mmeln estimmmelnCS1 estimmmelnIND1 I.CS1 I.IND1\n### IE.CS1 IE.IND1 Xinv covNA.wt estimloc.disp.CS1 estimloc.disp.IND1\n### logit pfQ.intermediate.CS1\n### Keywords: mmeln mixture normal multivariate\n\n### ** Examples\n\ndata(exY)\n### estimation of the parameters of the mixture\ntemps=0:2\nmmeln1=mmeln(Y, G = 3, form.loc = list(~temps, ~temps + I(temps^2),\n ~temps + I(temps^2)), form.mel = ~SEXE, cov = \"CS\")\nmmelnSOL1=estim(mmeln1,mu = list(c(1,1), c(2,0,0), c(3,0,0)),\n tau = c(0,0,0,0), sigma = list(c(1,0), c(1,0), c(1,0)))\n\n\n"} {"package":"mmeln","topic":"mmeln.package","snippet":"### Name: mmeln-package\n### Title: Estimation of Multinormal Mixture Distribution\n### Aliases: mmeln.package mmeln-package\n### Keywords: package\n\n### ** Examples\n\n### load an example.\ndata(exY)\n\n### estimation of the parameters of the mixture.\n\ntemps <- factor(1:3)\nmmeln1 <- mmeln(Y, G = 2, form.loc = ~temps-1, form.mel = ~1, cov = \"CS\")\nmix1 <- estim(mmeln1, mu = list(rep(1,3), rep(2,3)), tau = c(0),\n sigma = list(c(1,.6), c(1,.6)), iterlim = 100,tol = 1e-6)\nmix1\nanova(mix1)\nplot(mix1,main=\"Mixture of multivariate normal\")\n\n\n"} {"package":"mmeln","topic":"mmeln","snippet":"### Name: mmeln\n### Title: mmeln : mixture of multivariate normal\n### Aliases: mmeln\n### Keywords: mmeln mixture normal multivariate\n\n### ** Examples\n\ndata(exY)\n### estimation of the parameters of the mixture\ntemps <- 0:2\nmmeln1 <- mmeln(Y, G = 3,\n form.loc = list(~temps, ~temps + I(temps^2), ~temps + I(temps^2)),\n form.mel = ~SEXE, cov = \"CS\")\n\n\n"} {"package":"mmeln","topic":"plot.mmeln","snippet":"### Name: plot.mmeln,logLik.mmeln,anova.mmeln,print.mmeln\n### Title: Utility methods for objects of class mmeln\n### Aliases: plot.mmeln logLik.mmeln anova.mmeln print.mmelnSOL cov.tsf\n### multnm\n### Keywords: mmeln mixture normal multivariate\n\n### ** Examples\n\n\n#### load an example.\ndata(exY)\n\n### estimation of the parameters of the mixture\ntemps=1:3\nmmeln1=mmeln(Y,G=2,form.loc=~factor(temps)-1,form.mel=~1,cov=\"CS\")\nmmeln2=mmeln(Y,G=2,form.loc=list(~temps,~I((temps-2)^2)),form.mel=~1,cov=\"CS\")\n\nmix1=estim(mmeln1,mu=list(rep(1,3),rep(2,3)),tau=c(0)\n ,sigma=list(c(1,.4),c(1,.4)),iterlim=100,tol=1e-6)\n\nmix2=estim(mmeln2,mu=list(c(2,1),c(5,-1)),tau=c(0)\n ,sigma=list(c(1,.4),c(1,.4)),iterlim=100,tol=1e-6)\n\n\nmix1\nmix2\n\nanova(mix1,mix2)\nplot(mix1,main=\"Mixture of multivariate normal\")\nplot(mix2,main=\"Mixture of multivariate normal\")\n\n\n"} {"package":"mmeln","topic":"post.mmeln","snippet":"### Name: post.mmeln,entropy.mmeln\n### Title: Posterior probabilities, entropy for mmeln object\n### Aliases: post.mmeln post entropy.mmeln entropy\n### Keywords: mmeln mixture normal multivariate\n\n### ** Examples\n\n#### load an example.\ndata(exY)\n\n### estimation of the parameters of the mixture\ntemps <- factor(1:3)\nmmeln1 <- mmeln(Y, G = 2, form.loc = ~temps - 1, form.mel = ~1, cov = \"CS\")\nmix1 <- estim(mmeln1, mu = list(rep(1,3),rep(2,3)), tau = c(0),\n sigma = list(c(1, .4), c(1, .4)), iterlim = 100, tol = 1e-6)\npost(mix1)\nentropy(mix1)\n\n\n"} {"package":"getspanel","topic":"break_uncertainty","snippet":"### Name: break_uncertainty\n### Title: Estimate Breakdate Uncertainty\n### Aliases: break_uncertainty\n\n### ** Examples\n\n## No test: \ndata(EU_emissions_road)\n\n# Group specification\nEU15 <- c(\"Austria\", \"Germany\", \"Denmark\", \"Spain\", \"Finland\", \"Belgium\",\n \"France\", \"United Kingdom\", \"Ireland\", \"Italy\", \"Luxembourg\",\n \"Netherlands\", \"Greece\", \"Portugal\", \"Sweden\")\n\n# Prepare sample and data\nEU_emissions_road_short <- EU_emissions_road[\nEU_emissions_road$country %in% EU15 &\nEU_emissions_road$year >= 2000,\n]\n\n# Run\nresult <- isatpanel(\n data = EU_emissions_road_short,\n formula = ltransport.emissions ~ lgdp + I(lgdp^2) + lpop,\n index = c(\"country\", \"year\"),\n effect = \"twoways\",\n fesis = TRUE,\n plot = FALSE,\n t.pval = 0.01\n)\n\nbreak_uncertainty(result)\n## End(No test)\n\n\n"} {"package":"getspanel","topic":"get_indicators","snippet":"### Name: get_indicators\n### Title: Extract the retained indicators from an 'isatpanel' object\n### Aliases: get_indicators\n\n### ** Examples\n\n## No test: \ndata(EU_emissions_road)\n\n# Group specification\nEU15 <- c(\"Austria\", \"Germany\", \"Denmark\", \"Spain\", \"Finland\", \"Belgium\",\n \"France\", \"United Kingdom\", \"Ireland\", \"Italy\", \"Luxembourg\",\n \"Netherlands\", \"Greece\", \"Portugal\", \"Sweden\")\n\n# Prepare sample and data\nEU_emissions_road_short <- EU_emissions_road[\nEU_emissions_road$country %in% EU15 &\nEU_emissions_road$year >= 2000,\n]\n\n# Run\nresult <- isatpanel(\n data = EU_emissions_road_short,\n formula = ltransport.emissions ~ lgdp + I(lgdp^2) + lpop,\n index = c(\"country\", \"year\"),\n effect = \"twoways\",\n fesis = TRUE,\n plot = FALSE,\n t.pval = 0.01\n)\nplot(result)\nplot_grid(result)\n\n# print the retained indicators\nget_indicators(result)\n## End(No test)\n\n\n"} {"package":"getspanel","topic":"isatpanel","snippet":"### Name: isatpanel\n### Title: Indicator Saturation for Panel Data\n### Aliases: isatpanel\n\n### ** Examples\n\n## No test: \ndata(EU_emissions_road)\n\n# Group specification\nEU15 <- c(\"Austria\", \"Germany\", \"Denmark\", \"Spain\", \"Finland\", \"Belgium\",\n \"France\", \"United Kingdom\", \"Ireland\", \"Italy\", \"Luxembourg\",\n \"Netherlands\", \"Greece\", \"Portugal\", \"Sweden\")\n\n# Prepare sample and data\nEU_emissions_road_short <- EU_emissions_road[\nEU_emissions_road$country %in% EU15 &\nEU_emissions_road$year >= 2000,\n]\n\n# Run\nresult <- isatpanel(\n data = EU_emissions_road_short,\n formula = ltransport.emissions ~ lgdp + I(lgdp^2) + lpop,\n index = c(\"country\", \"year\"),\n effect = \"twoways\",\n fesis = TRUE,\n plot = FALSE,\n t.pval = 0.01\n)\nplot(result)\nplot_grid(result)\n\n# print the retained indicators\nget_indicators(result)\n## End(No test)\n\n\n"} {"package":"getspanel","topic":"plot_counterfactual","snippet":"### Name: plot_counterfactual\n### Title: Plot the Counterfactual Path\n### Aliases: plot_counterfactual\n\n### ** Examples\n\n## No test: \ndata(EU_emissions_road)\n\n# Group specification\nEU15 <- c(\"Austria\", \"Germany\", \"Denmark\", \"Spain\", \"Finland\", \"Belgium\",\n \"France\", \"United Kingdom\", \"Ireland\", \"Italy\", \"Luxembourg\",\n \"Netherlands\", \"Greece\", \"Portugal\", \"Sweden\")\n\n# Prepare sample and data\nEU_emissions_road_short <- EU_emissions_road[\nEU_emissions_road$country %in% EU15 &\nEU_emissions_road$year >= 2000,\n]\n\n# Run\nresult <- isatpanel(\n data = EU_emissions_road_short,\n formula = ltransport.emissions ~ lgdp + I(lgdp^2) + lpop,\n index = c(\"country\", \"year\"),\n effect = \"twoways\",\n fesis = TRUE,\n plot = FALSE,\n t.pval = 0.01\n)\nplot(result)\nplot_grid(result)\nplot_counterfactual(result)\n## End(No test)\n\n\n"} {"package":"getspanel","topic":"plot_grid","snippet":"### Name: plot_grid\n### Title: Plotting an isatpanel object\n### Aliases: plot_grid\n\n### ** Examples\n\n## No test: \ndata(EU_emissions_road)\n\n# Group specification\nEU15 <- c(\"Austria\", \"Germany\", \"Denmark\", \"Spain\", \"Finland\", \"Belgium\",\n \"France\", \"United Kingdom\", \"Ireland\", \"Italy\", \"Luxembourg\",\n \"Netherlands\", \"Greece\", \"Portugal\", \"Sweden\")\n\n# Prepare sample and data\nEU_emissions_road_short <- EU_emissions_road[\nEU_emissions_road$country %in% EU15 &\nEU_emissions_road$year >= 2000,\n]\n\n# Run\nresult <- isatpanel(\n data = EU_emissions_road_short,\n formula = ltransport.emissions ~ lgdp + I(lgdp^2) + lpop,\n index = c(\"country\", \"year\"),\n effect = \"twoways\",\n fesis = TRUE,\n plot = FALSE,\n t.pval = 0.01\n)\nplot(result)\nplot_grid(result)\n## End(No test)\n\n\n"} {"package":"getspanel","topic":"plot_residuals","snippet":"### Name: plot_residuals\n### Title: Plot Residuals from 'isatpanel' against OLS\n### Aliases: plot_residuals\n\n### ** Examples\n\n## No test: \ndata(EU_emissions_road)\n\n# Group specification\nEU15 <- c(\"Austria\", \"Germany\", \"Denmark\", \"Spain\", \"Finland\", \"Belgium\",\n \"France\", \"United Kingdom\", \"Ireland\", \"Italy\", \"Luxembourg\",\n \"Netherlands\", \"Greece\", \"Portugal\", \"Sweden\")\n\n# Prepare sample and data\nEU_emissions_road_short <- EU_emissions_road[\nEU_emissions_road$country %in% EU15 &\nEU_emissions_road$year >= 2000,\n]\n\n# Run\nresult <- isatpanel(\n data = EU_emissions_road_short,\n formula = ltransport.emissions ~ lgdp + I(lgdp^2) + lpop,\n index = c(\"country\", \"year\"),\n effect = \"twoways\",\n fesis = TRUE,\n plot = FALSE,\n t.pval = 0.01\n)\nplot(result)\nplot_residuals(result)\n## End(No test)\n\n\n"} {"package":"getspanel","topic":"robust_isatpanel","snippet":"### Name: robust_isatpanel\n### Title: Get robust Standard Errors for the isatpanel result\n### Aliases: robust_isatpanel\n\n### ** Examples\n\n## No test: \ndata(EU_emissions_road)\n\n# Group specification\nEU15 <- c(\"Austria\", \"Germany\", \"Denmark\", \"Spain\", \"Finland\", \"Belgium\",\n \"France\", \"United Kingdom\", \"Ireland\", \"Italy\", \"Luxembourg\",\n \"Netherlands\", \"Greece\", \"Portugal\", \"Sweden\")\n\n# Prepare sample and data\nEU_emissions_road_short <- EU_emissions_road[\nEU_emissions_road$country %in% EU15 &\nEU_emissions_road$year >= 2000,\n]\n\n# Run\nresult <- isatpanel(\n data = EU_emissions_road_short,\n formula = ltransport.emissions ~ lgdp + I(lgdp^2) + lpop,\n index = c(\"country\", \"year\"),\n effect = \"twoways\",\n fesis = TRUE,\n plot = FALSE,\n t.pval = 0.01\n)\nrobust_isatpanel(result)\n## End(No test)\n\n\n\n"} {"package":"lazysql","topic":"date_between","snippet":"### Name: date_between\n### Title: Create SQL string to select date between two given dates\n### Aliases: date_between\n\n### ** Examples\n\ndate1 <- as.Date(\"2016-02-22\")\ndate2 <- as.Date(\"2016-02-11\")\n\n# SQL expression for a date range\n(sql_expr1 <- lazysql::date_between(\"STD_1\", c(date1, date2)))\n\n# SQL expression for a single date\n(sql_expr2 <- lazysql::date_between(\"STD_1\", date1))\n\n# sample SQL statements\npaste(\"select * from TEST_TABLE where\", sql_expr1)\n\npaste(\"select * from TEST_TABLE where\", sql_expr2)\n\n\n\n"} {"package":"lazysql","topic":"in_condition","snippet":"### Name: in_condition\n### Title: Create SQL string to select values included in a set of given\n### values\n### Aliases: in_condition\n\n### ** Examples\n\n# SQL expressions\nlazysql::in_condition(\"COL_1\", 1:3)\n\nlazysql::in_condition(\"COL_1\", 1:3, \"not\")\n\nlazysql::in_condition(\"COL_1\", LETTERS[2:3])\n\nlazysql::in_condition(\"COL_1\", LETTERS[2:3], \"not\")\n\n\n\n"} {"package":"lazysql","topic":"natural_key","snippet":"### Name: natural_key\n### Title: Create SQL string for joining on matching natural keys\n### Aliases: natural_key\n\n### ** Examples\n\n# SQL expression\n(sql_expr <- lazysql::natural_key(c(\"TAB1\", \"tab_2\"),c(\"COL1\", \"col_2\")))\n\n# sample SQL JOIN statement\npaste(\"select * from TAB1, TAB2 where\", sql_expr)\n\n\n\n"} {"package":"lazysql","topic":"valid_identifier_regex","snippet":"### Name: valid_identifier_regex\n### Title: Regex pattern to validate SQL identifier names\n### Aliases: valid_identifier_regex\n\n### ** Examples\n\nlazysql::valid_identifier_regex()\n\n\n"} {"package":"intRinsic","topic":"Hidalgo","snippet":"### Name: Hidalgo\n### Title: Fit the 'Hidalgo' model\n### Aliases: Hidalgo print.Hidalgo plot.Hidalgo summary.Hidalgo\n### print.summary.Hidalgo\n\n### ** Examples\n\n## No test: \nX <- replicate(5,rnorm(500))\nX[1:250,1:2] <- 0\nX[1:250,] <- X[1:250,] + 4\noracle <- rep(1:2,rep(250,2))\n# this is just a short example\n# increase the number of iterations to improve mixing and convergence\nh_out <- Hidalgo(X, nsim = 500, burn_in = 500)\nplot(h_out, type = \"B\")\nid_by_class(h_out, oracle)\n## End(No test)\n\n\n\n\n"} {"package":"intRinsic","topic":"Swissroll","snippet":"### Name: Swissroll\n### Title: Generates a noise-free Swiss roll dataset\n### Aliases: Swissroll\n\n### ** Examples\n\nData <- Swissroll(1000)\n\n\n\n"} {"package":"intRinsic","topic":"clustering","snippet":"### Name: clustering\n### Title: Posterior similarity matrix and partition estimation\n### Aliases: clustering print.hidalgo_psm plot.hidalgo_psm\n\n### ** Examples\n\n## No test: \nlibrary(salso)\nX <- replicate(5,rnorm(500))\nX[1:250,1:2] <- 0\nh_out <- Hidalgo(X)\nclustering(h_out)\n## End(No test)\n\n\n"} {"package":"intRinsic","topic":"compute_mus","snippet":"### Name: compute_mus\n### Title: Compute the ratio statistics needed for the intrinsic dimension\n### estimation\n### Aliases: compute_mus print.mus print.mus_Nq plot.mus\n\n### ** Examples\n\nX <- replicate(2,rnorm(1000))\nmu <- compute_mus(X, n1 = 1, n2 = 2)\nmudots <- compute_mus(X, n1 = 4, n2 = 8)\npre_hidalgo <- compute_mus(X, n1 = 4, n2 = 8, Nq = TRUE, q = 3)\n\n\n"} {"package":"intRinsic","topic":"generalized_ratios_distribution","snippet":"### Name: generalized_ratios_distribution\n### Title: The Generalized Ratio distribution\n### Aliases: generalized_ratios_distribution dgera rgera\n\n### ** Examples\n\ndraws <- rgera(100,3,5,2)\ndensity <- dgera(3,3,5,2)\n\n\n\n"} {"package":"intRinsic","topic":"gride","snippet":"### Name: gride\n### Title: 'Gride': the Generalized Ratios ID Estimator\n### Aliases: gride print.gride_bayes summary.gride_bayes\n### print.summary.gride_bayes plot.gride_bayes print.gride_mle\n### summary.gride_mle print.summary.gride_mle plot.gride_mle\n\n### ** Examples\n\n## No test: \n X <- replicate(2,rnorm(500))\n dm <- as.matrix(dist(X,method = \"manhattan\"))\n res <- gride(X, nsim = 500)\n res\n plot(res)\n gride(dist_mat = dm, method = \"bayes\", upper_D =10,\n nsim = 500, burn_in = 100)\n## End(No test)\n\n\n"} {"package":"intRinsic","topic":"gride_evolution","snippet":"### Name: gride_evolution\n### Title: 'Gride' evolution based on Maximum Likelihood Estimation\n### Aliases: gride_evolution print.gride_evolution plot.gride_evolution\n\n### ** Examples\n\n## No test: \nX <- replicate(5,rnorm(10000,0,.1))\ngride_evolution(X = X,vec_n1 = 2^(0:5),vec_n2 = 2^(1:6))\n## End(No test)\n\n\n\n"} {"package":"intRinsic","topic":"id_by_class","snippet":"### Name: id_by_class\n### Title: Stratification of the 'id' by an external categorical variable\n### Aliases: id_by_class print.hidalgo_class\n\n### ** Examples\n\n## No test: \nX <- replicate(5,rnorm(500))\nX[1:250,1:2] <- 0\noracle <- rep(1:2,rep(250,2))\nh_out <- Hidalgo(X)\nid_by_class(h_out,oracle)\n## End(No test)\n\n\n\n"} {"package":"intRinsic","topic":"twonn","snippet":"### Name: twonn\n### Title: 'TWO-NN' estimator\n### Aliases: twonn print.twonn_bayes summary.twonn_bayes\n### print.summary.twonn_bayes plot.twonn_bayes print.twonn_linfit\n### summary.twonn_linfit print.summary.twonn_linfit plot.twonn_linfit\n### print.twonn_mle summary.twonn_mle print.summary.twonn_mle\n### plot.twonn_mle\n\n### ** Examples\n\n# dataset with 1000 observations and id = 2\nX <- replicate(2,rnorm(1000))\ntwonn(X)\n# dataset with 1000 observations and id = 3\nY <- replicate(3,runif(1000))\n# Bayesian and least squares estimate from distance matrix\ndm <- as.matrix(dist(Y,method = \"manhattan\"))\ntwonn(dist_mat = dm,method = \"bayes\")\ntwonn(dist_mat = dm,method = \"linfit\")\n\n\n\n"} {"package":"intRinsic","topic":"twonn_decimation","snippet":"### Name: twonn_decimation\n### Title: Estimate the decimated 'TWO-NN' evolution with halving steps or\n### vector of proportions\n### Aliases: twonn_decimation print.twonn_dec_prop plot.twonn_dec_prop\n### print.twonn_dec_by plot.twonn_dec_by\n\n### ** Examples\n\nX <- replicate(4,rnorm(1000))\ntwonn_decimation(X,,method = \"proportions\",\n proportions = c(1,.5,.2,.1,.01))\n\n\n\n"} {"package":"rmatio","topic":"read.mat","snippet":"### Name: read.mat\n### Title: Read Matlab file\n### Aliases: read.mat\n\n### ** Examples\n\n## Read a version 4 MAT file with little-endian byte ordering\nfilename <- system.file(\"extdata/matio_test_cases_v4_le.mat\",\n package = \"rmatio\")\nm <- read.mat(filename)\n\n## View content\nstr(m)\n\n## Read a version 4 MAT file with big-endian byte ordering.\nfilename <- system.file(\"extdata/matio_test_cases_v4_be.mat\",\n package = \"rmatio\")\nm <- read.mat(filename)\n\n## View content\nstr(m)\n\n## Read a compressed version 5 MAT file\nfilename <- system.file(\"extdata/matio_test_cases_compressed_le.mat\",\n package = \"rmatio\")\nm <- read.mat(filename)\n\n## View content\nstr(m)\n\n\n"} {"package":"rmatio","topic":"write.mat","snippet":"### Name: write.mat\n### Title: Write Matlab file\n### Aliases: write.mat write.mat,list-method\n### Keywords: methods\n\n### ** Examples\n\n## Not run: \n##D library(Matrix)\n##D filename <- tempfile(fileext = \".mat\")\n##D \n##D ## Example how to read and write an integer vector with rmatio\n##D write.mat(list(a = 1:5), filename = filename)\n##D a <- as.integer(read.mat(filename)[[\"a\"]])\n##D \n##D stopifnot(identical(a, 1:5))\n##D \n##D unlink(filename)\n##D \n##D ## Read a compressed version 5 MAT file\n##D m <- read.mat(system.file(\"extdata/matio_test_cases_compressed_le.mat\",\n##D package = \"rmatio\"))\n##D \n##D ## Write an uncompressed version 5 MAT file\n##D write.mat(m, filename = \"test-uncompressed.mat\", compression = FALSE,\n##D version = \"MAT5\")\n##D \n##D ## Write a compressed version 5 MAT file\n##D write.mat(m, filename = \"test-compressed.mat\", compression = TRUE,\n##D version = \"MAT5\")\n##D \n##D ## Check that the content of the files are identical\n##D identical(read.mat(\"test-uncompressed.mat\"),\n##D read.mat(\"test-compressed.mat\"))\n##D \n##D unlink(\"test-uncompressed.mat\")\n##D unlink(\"test-compressed.mat\")\n##D \n##D ## Example how to read and write a S4 class with rmatio\n##D ## Create 'DemoS4Mat' class\n##D setClass(\"DemoS4Mat\",\n##D representation(a = \"dgCMatrix\",\n##D b = \"integer\",\n##D c = \"matrix\",\n##D d = \"numeric\"))\n##D \n##D ## Create a function to coerce a 'DemoS4Mat' object to a list.\n##D setAs(from = \"DemoS4Mat\",\n##D to = \"list\",\n##D def = function(from) {\n##D return(list(a = from@a,\n##D b = from@b,\n##D c = from@c,\n##D d = from@d))\n##D }\n##D )\n##D \n##D ## Create a function to coerce a list to a 'DemoS4Mat' object.\n##D setAs(from = \"list\",\n##D to = \"DemoS4Mat\",\n##D def = function(from) {\n##D new(\"DemoS4Mat\",\n##D a = from[[\"a\"]],\n##D b = as.integer(from[[\"b\"]]),\n##D c = from[[\"c\"]],\n##D d = from[[\"d\"]])\n##D }\n##D )\n##D \n##D ## Define a method to write a 'DemoS4Mat' object to a MAT file.\n##D setMethod(\"write.mat\",\n##D signature(object = \"DemoS4Mat\"),\n##D function(object,\n##D filename,\n##D compression,\n##D version) {\n##D ## Coerce the 'DemoS4Mat' object to a list and\n##D ## call 'rmatio' 'write.mat' with the list.\n##D write.mat(as(object, \"list\"),\n##D filename,\n##D compression,\n##D version)\n##D }\n##D )\n##D \n##D ## Create a new 'DemoS4Mat' object\n##D demoS4mat <- new(\"DemoS4Mat\",\n##D a = Matrix(c(0, 0, 0, 0, 0, 0, 1, 0, 0,\n##D 0, 0, 0, 0, 0, 0, 0, 1, 0,\n##D 0, 0, 0, 0, 0, 0, 0, 0, 1),\n##D nrow = 3,\n##D ncol = 9,\n##D byrow = TRUE,\n##D sparse = TRUE),\n##D b = 1:5,\n##D c = matrix(as.numeric(1:9), nrow = 3),\n##D d = c(6.0, 7.0, 8.0))\n##D \n##D ## Write to MAT file\n##D write.mat(demoS4mat, filename)\n##D \n##D ## Read the MAT file\n##D demoS4mat_2 <- as(read.mat(filename), \"DemoS4Mat\")\n##D \n##D ## Check result\n##D stopifnot(identical(demoS4mat, demoS4mat_2))\n##D \n##D unlink(filename)\n## End(Not run)\n\n\n"} {"package":"neptune","topic":"neptune_add","snippet":"### Name: neptune_add\n### Title: Adds the provided tag or tags to the run's tags.\n### Aliases: neptune_add\n### Keywords: interface\n\n### ** Examples\n\n## Not run: \n##D # We are using api token for an anonymous user neptuner. For your projects use your private token.\n##D run <- neptune_init(api_token = 'ANONYMOUS',\n##D project = \"common-r/quickstarts\")\n##D neptune_add(run['sys/tags'], 'lgbm')\n## End(Not run)\n\n\n"} {"package":"neptune","topic":"neptune_assign","snippet":"### Name: neptune_assign\n### Title: Assigns the provided value to the field.\n### Aliases: neptune_assign\n### Keywords: interface\n\n### ** Examples\n\n## Not run: \n##D # We are using api token for an anonymous user neptuner. For your projects use your private token.\n##D run <- neptune_init(api_token = 'ANONYMOUS',\n##D project = \"common-r/quickstarts\")\n##D neptune_assign(run['parameters'], list(epochs=100, lr=0.01))\n## End(Not run)\n\n\n"} {"package":"neptune","topic":"neptune_clear","snippet":"### Name: neptune_clear\n### Title: Removes all tags from the StringSet.\n### Aliases: neptune_clear\n### Keywords: interface\n\n### ** Examples\n\n ## Not run: \n##D run <- neptune_init(api_token = 'ANONYMOUS',\n##D project = \"common-r/quickstarts\")\n##D neptune_clear(run['sys/tags'])\n##D \n## End(Not run)\n\n\n"} {"package":"neptune","topic":"neptune_delete_files","snippet":"### Name: neptune_delete_files\n### Title: Delete the file or files specified by paths from the FileSet\n### stored on the Neptune servers.\n### Aliases: neptune_delete_files\n### Keywords: interface\n\n### ** Examples\n\n ## Not run: \n##D run <- neptune_init(api_token = 'ANONYMOUS',\n##D project = \"common-r/quickstarts\")\n##D neptune_delete_files(run['artifacts/images'], \"path/to/file\")\n##D \n## End(Not run)\n\n\n"} {"package":"neptune","topic":"neptune_download","snippet":"### Name: neptune_download\n### Title: Downloads all the files that are referenced in the field.\n### Aliases: neptune_download\n### Keywords: interface\n\n### ** Examples\n\n## Not run: \n##D # We are using api token for an anonymous user neptuner. For your projects use your private token.\n##D run <- neptune_init(api_token = 'ANONYMOUS',\n##D project = \"common-r/quickstarts\",\n##D run='AR-2', # Neptune Run ID of a run with artifact\n##D mode='read-only')\n##D neptune_download(run['artifacts/images'], destination='datasets/train/images')\n## End(Not run)\n\n\n"} {"package":"neptune","topic":"neptune_download_last","snippet":"### Name: neptune_download_last\n### Title: Downloads the last File stored in the series from Neptune\n### servers and save it locally.\n### Aliases: neptune_download_last\n### Keywords: interface\n\n### ** Examples\n\n ## Not run: \n##D run <- neptune_init(api_token = 'ANONYMOUS',\n##D project = \"common-r/quickstarts\")\n##D df <- neptune_download_last(run['train/predictions'])\n##D \n## End(Not run)\n\n\n"} {"package":"neptune","topic":"neptune_exists","snippet":"### Name: neptune_exists\n### Title: Checks if there is any field or namespace under the specified\n### path.\n### Aliases: neptune_exists\n### Keywords: interface\n\n### ** Examples\n\n ## Not run: \n##D # We are using api token for an anonymous user neptuner. \n##D # For your projects use your private token.\n##D run <- neptune_init(api_token = 'ANONYMOUS',\n##D project = \"common-r/quickstarts\")\n##D run['lr'] <- 0.001\n##D neptune_exists(run, 'lr')\n##D neptune_exists(run, 'notlr')\n##D \n## End(Not run)\n\n\n"} {"package":"neptune","topic":"neptune_fetch","snippet":"### Name: neptune_fetch\n### Title: Fetch values of all non-File Atom fields as a named list.\n### Aliases: neptune_fetch\n### Keywords: interface\n\n### ** Examples\n\n## Not run: \n##D # We are using api token for an anonymous user neptuner. For your projects use your private token.\n##D resumed_run <- neptune_init(api_token = 'ANONYMOUS',\n##D project = \"common-r/quickstarts\",\n##D run=\"HEL-3\")\n##D params <- neptune_fetch(resumed_run['model/paramaters'])\n##D run_data <- neptune_fetch(resumed_run)\n##D print(run_data)\n##D # this will print out all Atom attributes stored in run as a dict\n## End(Not run)\n\n\n"} {"package":"neptune","topic":"neptune_fetch_files_list","snippet":"### Name: neptune_fetch_files_list\n### Title: Fetches a list of artifact files from the Neptune servers.\n### Aliases: neptune_fetch_files_list\n### Keywords: interface\n\n### ** Examples\n\n ## Not run: \n##D run <- neptune_init(project='',\n##D api_token='',\n##D run='AR-2', # Neptune Run ID of a run with artifact\n##D mode='read-only')\n##D artifact_list <- neptune_fetch_files_list(run['artifacts/images'])\n##D artifact_list[[1]]$file_hash\n##D artifact_list[[1]]$file_path\n##D artifact_list[[1]]$metadata['last_modified']\n##D \n## End(Not run)\n\n\n"} {"package":"neptune","topic":"neptune_fetch_hash","snippet":"### Name: neptune_fetch_hash\n### Title: Fetches the Hash of the artifact from Neptune servers.\n### Aliases: neptune_fetch_hash\n### Keywords: interface\n\n### ** Examples\n\n ## Not run: \n##D # We are using api token for an anonymous user neptuner. \n##D # For your projects use your private token.\n##D run <- neptune_init(api_token = 'ANONYMOUS',\n##D project = \"common-r/quickstarts\")\n##D neptune_fetch_hash(run['artifacts/images'])\n##D \n## End(Not run)\n\n\n"} {"package":"neptune","topic":"neptune_fetch_last","snippet":"### Name: neptune_fetch_last\n### Title: Fetches last value stored in the series from Neptune servers.\n### Aliases: neptune_fetch_last\n### Keywords: interface\n\n### ** Examples\n\n ## Not run: \n##D run <- neptune_init(api_token = 'ANONYMOUS',\n##D project = \"common-r/quickstarts\")\n##D last_auc_value <- neptune_fetch_last(run['metrics/auc'])\n##D \n## End(Not run)\n\n\n"} {"package":"neptune","topic":"neptune_fetch_runs_table","snippet":"### Name: neptune_fetch_runs_table\n### Title: Retrieve runs matching the specified criteria.\n### Aliases: neptune_fetch_runs_table\n### Keywords: interface\n\n### ** Examples\n\n## Not run: \n##D # We are using api token for an anonymous user neptuner. For your projects use your private token.\n##D run <- neptune_init(api_token = 'ANONYMOUS',\n##D project = \"common-r/quickstarts\")\n##D df <- neptune_fetch_runs(\"common-r/quickstarts\")\n## End(Not run)\n\n\n"} {"package":"neptune","topic":"neptune_fetch_values","snippet":"### Name: neptune_fetch_values\n### Title: Fetches all values stored in the series from Neptune servers.\n### Aliases: neptune_fetch_values\n### Keywords: interface\n\n### ** Examples\n\n ## Not run: \n##D run <- neptune_init(api_token = 'ANONYMOUS',\n##D project = \"common-r/quickstarts\")\n##D df <- neptune_fetch_values(run['metrics/auc'])\n##D \n## End(Not run)\n\n\n"} {"package":"neptune","topic":"neptune_file_as_html","snippet":"### Name: neptune_file_as_html\n### Title: Converts an object to an HTML File value object.\n### Aliases: neptune_file_as_html\n### Keywords: interface\n\n### ** Examples\n\n## Not run: \n##D # We are using api token for an anonymous user neptuner. For your projects use your private token.\n##D run <- neptune_init(api_token = 'ANONYMOUS',\n##D project = \"common-r/quickstarts\")\n##D df <- data.frame(prediction = runif(10), index = 1:10)\n##D neptune_upload(run['evaluation/predictions'], neptune_file_as_html(df))\n## End(Not run)\n\n\n"} {"package":"neptune","topic":"neptune_file_as_image","snippet":"### Name: neptune_file_as_image\n### Title: Static method for converting image objects or image-like objects\n### to an image File value object.\n### Aliases: neptune_file_as_image\n### Keywords: interface\n\n### ** Examples\n\n## Not run: \n##D # We are using api token for an anonymous user neptuner. For your projects use your private token.\n##D run <- neptune_init(api_token = 'ANONYMOUS',\n##D project = \"common-r/quickstarts\")\n##D image <- matrix(runif(16*16), nrow = 16)\n##D neptune_log(run['test/sample_images'], neptune_file_as_image(reticulate::np_array(image)))\n## End(Not run)\n\n\n"} {"package":"neptune","topic":"neptune_get_run_url","snippet":"### Name: neptune_get_run_url\n### Title: Returns a direct link to run in Neptune. It's the same link that\n### is printed at the moment of initialization of the run.\n### Aliases: neptune_get_run_url\n### Keywords: interface\n\n### ** Examples\n\n ## Not run: \n##D # We are using api token for an anonymous user neptuner. \n##D # For your projects use your private token.\n##D run <- neptune_init(api_token = 'ANONYMOUS',\n##D project = \"common-r/quickstarts\")\n##D neptune_get_run_url(run)\n##D \n## End(Not run)\n\n\n"} {"package":"neptune","topic":"neptune_get_structure","snippet":"### Name: neptune_get_structure\n### Title: Returns a run's metadata structure in form of a named list.\n### Aliases: neptune_get_structure\n### Keywords: interface\n\n### ** Examples\n\n## Not run: \n##D # We are using api token for an anonymous user neptuner. For your projects use your private token.\n##D run <- neptune_init(api_token = 'ANONYMOUS',\n##D project = \"common-r/quickstarts\")\n##D str <- neptune_get_structure(run)\n## End(Not run)\n\n\n"} {"package":"neptune","topic":"neptune_init","snippet":"### Name: neptune_init\n### Title: Starts a new tracked run, and append it to the top of the Runs\n### table view.\n### Aliases: neptune_init\n### Keywords: interface\n\n### ** Examples\n\n## Not run: \n##D # We are using api token for an anonymous user neptuner. For your projects use your private token.\n##D run <- neptune_init(api_token = 'ANONYMOUS',\n##D project = \"common-r/quickstarts\")\n## End(Not run)\n\n\n"} {"package":"neptune","topic":"neptune_install","snippet":"### Name: neptune_install\n### Title: Install neptune-client along with required python enviroment.\n### Aliases: neptune_install\n### Keywords: interface\n\n### ** Examples\n\n## Not run: \n##D neptune_install()\n## End(Not run)\n\n\n"} {"package":"neptune","topic":"neptune_log","snippet":"### Name: neptune_log\n### Title: Logs the provided number or a collection of numbers.\n### Aliases: neptune_log\n### Keywords: interface\n\n### ** Examples\n\n## Not run: \n##D # We are using api token for an anonymous user neptuner. For your projects use your private token.\n##D run <- neptune_init(api_token = 'ANONYMOUS',\n##D project = \"common-r/quickstarts\")\n##D neptune_log(run['loss'], 0.1)\n## End(Not run)\n\n\n"} {"package":"neptune","topic":"neptune_pop","snippet":"### Name: neptune_pop\n### Title: Removes the field or whole namespace stored under the path\n### completely and all data associated with them.\n### Aliases: neptune_pop\n### Keywords: interface\n\n### ** Examples\n\n ## Not run: \n##D # We are using api token for an anonymous user neptuner. \n##D # For your projects use your private token.\n##D run <- neptune_init(api_token = 'ANONYMOUS',\n##D project = \"common-r/quickstarts\")\n##D run['lr'] <- 0.001\n##D neptune_pop(run, 'lr')\n##D \n## End(Not run)\n\n\n"} {"package":"neptune","topic":"neptune_print_structure","snippet":"### Name: neptune_print_structure\n### Title: Pretty prints the structure of the run's metadata. Paths are\n### ordered lexicographically and the whole structure is neatly colored.\n### Aliases: neptune_print_structure\n### Keywords: interface\n\n### ** Examples\n\n## Not run: \n##D # We are using api token for an anonymous user neptuner. For your projects use your private token.\n##D run <- neptune_init(api_token = 'ANONYMOUS',\n##D project = \"common-r/quickstarts\")\n##D neptune_print_structure(run)\n## End(Not run)\n\n\n"} {"package":"neptune","topic":"neptune_remove","snippet":"### Name: neptune_remove\n### Title: Removes the provided tag or tags from the set.\n### Aliases: neptune_remove\n### Keywords: interface\n\n### ** Examples\n\n## Not run: \n##D # We are using api token for an anonymous user neptuner. For your projects use your private token.\n##D run <- neptune_init(api_token = 'ANONYMOUS',\n##D project = \"common-r/quickstarts\")\n##D neptune_add(run['Sys/tags'], 'some_tag')\n##D neptune_remove(run['Sys/tags'], 'some_tag')\n## End(Not run)\n\n\n"} {"package":"neptune","topic":"neptune_set_api_token","snippet":"### Name: neptune_set_api_token\n### Title: Sets NEPTUNE_API_TOKEN environment variables\n### Aliases: neptune_set_api_token\n### Keywords: interface\n\n### ** Examples\n\nneptune_set_api_token(\"ANONYMOUS\")\n\n\n"} {"package":"neptune","topic":"neptune_stop","snippet":"### Name: neptune_stop\n### Title: Stop neptune run.\n### Aliases: neptune_stop\n### Keywords: interface\n\n### ** Examples\n\n## Not run: \n##D # We are using api token for an anonymous user neptuner. For your projects use your private token.\n##D run <- neptune_init(api_token = 'ANONYMOUS',\n##D project = \"common-r/quickstarts\")\n##D neptune_stop(run)\n## End(Not run)\n\n\n"} {"package":"neptune","topic":"neptune_sync","snippet":"### Name: neptune_sync\n### Title: Synchronizes the run with with Neptune servers.\n### Aliases: neptune_sync\n### Keywords: interface\n\n### ** Examples\n\n## Not run: \n##D # We are using api token for an anonymous user neptuner. For your projects use your private token.\n##D run <- neptune_init(api_token = 'ANONYMOUS',\n##D project = \"common-r/quickstarts\")\n##D run['lr'] <- 0.001\n##D neptune_sync(run)\n## End(Not run)\n\n\n"} {"package":"neptune","topic":"neptune_track_files","snippet":"### Name: neptune_track_files\n### Title: Saves the artifact metadata.\n### Aliases: neptune_track_files\n### Keywords: interface\n\n### ** Examples\n\n## Not run: \n##D # We are using api token for an anonymous user neptuner. \n##D # For your projects use your private token.\n##D run <- neptune_init(api_token = 'ANONYMOUS',\n##D project = \"common-r/quickstarts\")\n##D neptune_track_files(run['artifacts/images'], 'datasets/train/images')\n## End(Not run)\n\n\n"} {"package":"neptune","topic":"neptune_upload","snippet":"### Name: neptune_upload\n### Title: Uploads provided file under specified field path\n### Aliases: neptune_upload\n### Keywords: interface\n\n### ** Examples\n\n## Not run: \n##D # We are using api token for an anonymous user neptuner. For your projects use your private token.\n##D run <- neptune_init(api_token = 'ANONYMOUS',\n##D project = \"common-r/quickstarts\")\n##D neptune_upload(run['model'], \"model.RData\")\n## End(Not run)\n\n\n"} {"package":"neptune","topic":"neptune_upload_files","snippet":"### Name: neptune_upload_files\n### Title: Uploads the provided file or files and stores them inside the\n### FileSet.\n### Aliases: neptune_upload_files\n### Keywords: interface\n\n### ** Examples\n\n ## Not run: \n##D run <- neptune_init(api_token = 'ANONYMOUS',\n##D project = \"common-r/quickstarts\")\n##D neptune_upload_files(run['artifacts/images'], \"path/to/file\")\n##D \n## End(Not run)\n\n\n"} {"package":"neptune","topic":"neptune_wait","snippet":"### Name: neptune_wait\n### Title: Wait for all the tracking calls to finish.\n### Aliases: neptune_wait\n### Keywords: interface\n\n### ** Examples\n\n ## Not run: \n##D # We are using api token for an anonymous user neptuner. \n##D # For your projects use your private token.\n##D run <- neptune_init(api_token = 'ANONYMOUS',\n##D project = \"common-r/quickstarts\")\n##D neptune_wait(run)\n##D \n## End(Not run)\n\n\n"} {"package":"neptune","topic":"[.neptune.new.metadata_containers.run.Run","snippet":"### Name: [.neptune.new.metadata_containers.run.Run\n### Title: Field lookup\n### Aliases: [.neptune.new.metadata_containers.run.Run\n### Keywords: interface\n\n### ** Examples\n\n## Not run: \n##D # We are using api token for an anonymous user neptuner. For your projects use your private token.\n##D run <- neptune_init(api_token = 'ANONYMOUS',\n##D project = \"common-r/quickstarts\")\n##D run['epochs'] <- 100\n##D run['epochs']\n## End(Not run)\n\n\n"} {"package":"neptune","topic":"[<-.neptune.new.metadata_containers.run.Run","snippet":"### Name: [<-.neptune.new.metadata_containers.run.Run\n### Title: Assigns the provided value to the field.\n### Aliases: [<-.neptune.new.metadata_containers.run.Run\n### Keywords: interface\n\n### ** Examples\n\n## Not run: \n##D # We are using api token for an anonymous user neptuner. For your projects use your private token.\n##D run <- neptune_init(api_token = 'ANONYMOUS',\n##D project = \"common-r/quickstarts\")\n##D run['epochs'] <- 100\n## End(Not run)\n\n\n"} {"package":"monobinShiny","topic":"algo.ui","snippet":"### Name: algo.ui\n### Title: Server side for monobin functions' inputs\n### Aliases: algo.ui\n\n### ** Examples\n\nif \t(interactive()) {\n\talgo.ui(id = \"monobin\")\n\t}\n\n\n"} {"package":"monobinShiny","topic":"check.vars","snippet":"### Name: check.vars\n### Title: Check for categorical variables when importing the data\n### Aliases: check.vars\n\n### ** Examples\n\nif \t(interactive()) {\n\tcheck.msg <- check.vars(tbl = rv$db)\n\t}\n\n\n"} {"package":"monobinShiny","topic":"cum.ui","snippet":"### Name: cum.ui\n### Title: cum.bin - monobin functions' inputs\n### Aliases: cum.ui\n\n### ** Examples\n\nif \t(interactive()) {\n\toutput$algo.args <- renderUI({tagList(switch(algo.select, \"cum.bin\" = cum.ui(id = id),\n\t\t\t\t\t\t\t \"iso.bin\" = iso.ui(id = id),\n\t\t\t\t\t\t\t \"ndr.bin\" = ndr.sts.ui(id = id),\n\t\t\t\t\t\t\t \"sts.bin\" = ndr.sts.ui(id = id),\n\t\t\t\t\t\t\t \"pct.bin\" = pct.ui(id = id),\n\t\t\t\t\t\t\t \"woe.bin\" = woe.ui(id = id),\n\t\t\t\t\t\t \"mdt.bin\" = mdt.ui(id = id)))\n\t\t})\t\n\t}\n\n\n"} {"package":"monobinShiny","topic":"desc.report","snippet":"### Name: desc.report\n### Title: Descriptive statistics report\n### Aliases: desc.report\n\n### ** Examples\n\nif \t(interactive()) {\n\tsrv$desc.stat <- withProgress(message = \"Running descriptive statistics report\", \n\t\t\t\t\t\t value = 0, {\n\t\t\t\tdesc.report(target = \"qual\", \n\t\t\t\t\t\trf = rf, \n\t\t\t\t\t\tsc = sc, \n\t\t\t\t\t\tsc.method = sc.method, \n\t\t\t\t\t\tdb = isolate(rv$db))\n\t\t\t\t})\n\t}\n\n\n\n"} {"package":"monobinShiny","topic":"desc.stat","snippet":"### Name: desc.stat\n### Title: Descriptive statistics\n### Aliases: desc.stat\n\n### ** Examples\n\nsuppressMessages(library(monobinShiny))\ndata(gcd)\ndesc.stat(x = gcd$age, y = gcd$qual)\ngcd$age[1:10] <- NA\ngcd$age[50:75] <- Inf\ndesc.stat(x = gcd$age, y = gcd$qual, sc.method = \"together\")\ndesc.stat(x = gcd$age, y = gcd$qual, sc.method = \"separately\")\n\n\n\n"} {"package":"monobinShiny","topic":"di.server","snippet":"### Name: di.server\n### Title: Descriptive statistics and imputation module - server side\n### Aliases: di.server\n\n### ** Examples\n\nif \t(interactive()) {\n\tdi.server(id = \"desc.imputation\")\n\t}\n\n\n"} {"package":"monobinShiny","topic":"di.ui","snippet":"### Name: di.ui\n### Title: Descriptive statistics and imputation module - user interface\n### Aliases: di.ui\n\n### ** Examples\n\nif \t(interactive()) {\n\tdi.ui(id = \"desc.imputation\")\n\t}\n\n\n"} {"package":"monobinShiny","topic":"dm.server","snippet":"### Name: dm.server\n### Title: Data manager module - server side\n### Aliases: dm.server\n\n### ** Examples\n\nif \t(interactive()) {\n\tdm.server(id = \"data.manager\")\n\t}\n\n\n"} {"package":"monobinShiny","topic":"dm.ui","snippet":"### Name: dm.ui\n### Title: Data manager module - user interface\n### Aliases: dm.ui\n\n### ** Examples\n\nif \t(interactive()) {\n\tdm.ui(id = \"data.manager\")\n\t}\n\n\n"} {"package":"monobinShiny","topic":"hide.dwnl.buttons","snippet":"### Name: hide.dwnl.buttons\n### Title: Hide download buttons from descriptive statistics module\n### Aliases: hide.dwnl.buttons\n\n### ** Examples\n\nif \t(interactive()) {\nobserveEvent(rv$dwnl.sync, {\n\thide.dwnl.buttons(id = \"desc.imputation\")\n\t}, ignoreInit = TRUE)\n\t}\n\n\n"} {"package":"monobinShiny","topic":"iso.ui","snippet":"### Name: iso.ui\n### Title: iso.bin - monobin functions' inputs\n### Aliases: iso.ui\n\n### ** Examples\n\nif \t(interactive()) {\n\toutput$algo.args <- renderUI({tagList(switch(algo.select, \"cum.bin\" = cum.ui(id = id),\n\t\t\t\t\t\t\t \"iso.bin\" = iso.ui(id = id),\n\t\t\t\t\t\t\t \"ndr.bin\" = ndr.sts.ui(id = id),\n\t\t\t\t\t\t\t \"sts.bin\" = ndr.sts.ui(id = id),\n\t\t\t\t\t\t\t \"pct.bin\" = pct.ui(id = id),\n\t\t\t\t\t\t\t \"woe.bin\" = woe.ui(id = id),\n\t\t\t\t\t\t \"mdt.bin\" = mdt.ui(id = id)))\n\t\t})\t\n\t}\n\n\n"} {"package":"monobinShiny","topic":"mb.server","snippet":"### Name: mb.server\n### Title: Monobin module - server side\n### Aliases: mb.server\n\n### ** Examples\n\nif \t(interactive()) {\n\tmb.server(id = \"monobin\")\n\t}\n\n\n"} {"package":"monobinShiny","topic":"mb.ui","snippet":"### Name: mb.ui\n### Title: Monobin module - user interface\n### Aliases: mb.ui\n\n### ** Examples\n\nif \t(interactive()) {\n\tmb.ui(id = \"monobin\")\n\t}\n\n\n"} {"package":"monobinShiny","topic":"mdt.ui","snippet":"### Name: mdt.ui\n### Title: mdt.bin - monobin functions' inputs\n### Aliases: mdt.ui\n\n### ** Examples\n\nif \t(interactive()) {\n\toutput$algo.args <- renderUI({tagList(switch(algo.select, \"cum.bin\" = cum.ui(id = id),\n\t\t\t\t\t\t\t \"iso.bin\" = iso.ui(id = id),\n\t\t\t\t\t\t\t \"ndr.bin\" = ndr.sts.ui(id = id),\n\t\t\t\t\t\t\t \"sts.bin\" = ndr.sts.ui(id = id),\n\t\t\t\t\t\t\t \"pct.bin\" = pct.ui(id = id),\n\t\t\t\t\t\t\t \"woe.bin\" = woe.ui(id = id),\n\t\t\t\t\t\t \"mdt.bin\" = mdt.ui(id = id)))\n\t\t})\t\n\t}\n\n\n"} {"package":"monobinShiny","topic":"mono.inputs.check","snippet":"### Name: mono.inputs.check\n### Title: Check for numeric arguments - monobin module\n### Aliases: mono.inputs.check\n\n### ** Examples\n\nif \t(interactive()) {\n\tnum.inp <- mono.inputs.check(x = bin.algo, args.e = args.e)\n\t}\n\n\n\n"} {"package":"monobinShiny","topic":"monobin.fun","snippet":"### Name: monobin.fun\n### Title: Evaluation expression of the selected monobin function and its\n### arguments\n### Aliases: monobin.fun\n\n### ** Examples\n\nif \t(interactive()) {\n\texpr.eval <- monobin.fun(x = algo)\n\t}\nmonobin.fun(x = \"ndr.bin\")\n\n\n\n\n"} {"package":"monobinShiny","topic":"monobin.run","snippet":"### Name: monobin.run\n### Title: Run monobin algorithm for the selected inputs\n### Aliases: monobin.run\n\n### ** Examples\n\nif \t(interactive()) {\n\ttbls <- withProgress(message = \"Running the binning algorithm\", \n\t\t\t\t\t value = 0, {\n\t\t\t \t suppressWarnings(\n\t\t\t \t monobin.run(algo = bin.algo, \n\t\t\t\t\t\t target.n = isolate(input$trg.select), \n\t\t\t\t\t\t rf = isolate(input$rf.select), \n\t\t\t\t\t\t sc = scr.check.res[[1]], \n\t\t\t\t\t\t args.e = args.e, \n\t\t\t\t\t\t db = isolate(rv$db))\n\t\t )})\n\n\t}\n\n\n\n"} {"package":"monobinShiny","topic":"monobinShinyApp","snippet":"### Name: monobinShinyApp\n### Title: Starts shiny application for the monobin package\n### Aliases: monobinShinyApp\n\n### ** Examples\n\nif \t(interactive()) {\n\tsuppressMessages(library(monobinShiny))\t\n\tmonobinShinyApp()\n\t}\n\n\n"} {"package":"monobinShiny","topic":"ndr.sts.ui","snippet":"### Name: ndr.sts.ui\n### Title: ndr.bin / sts.bin - monobin functions' inputs\n### Aliases: ndr.sts.ui\n\n### ** Examples\n\nif \t(interactive()) {\n\toutput$algo.args <- renderUI({tagList(switch(algo.select, \"cum.bin\" = cum.ui(id = id),\n\t\t\t\t\t\t\t \"iso.bin\" = iso.ui(id = id),\n\t\t\t\t\t\t\t \"ndr.bin\" = ndr.sts.ui(id = id),\n\t\t\t\t\t\t\t \"sts.bin\" = ndr.sts.ui(id = id),\n\t\t\t\t\t\t\t \"pct.bin\" = pct.ui(id = id),\n\t\t\t\t\t\t\t \"woe.bin\" = woe.ui(id = id),\n\t\t\t\t\t\t \"mdt.bin\" = mdt.ui(id = id)))\n\t\t})\t\n\t}\n\n\n"} {"package":"monobinShiny","topic":"num.inputs","snippet":"### Name: num.inputs\n### Title: Numeric arguments - monobin module\n### Aliases: num.inputs\n\n### ** Examples\n\nif \t(interactive()) {\n\tinp.indx <- num.inputs(x = x)\n\t}\nnum.inputs(x = \"cum.bin\")\n\n\n\n"} {"package":"monobinShiny","topic":"out.impute","snippet":"### Name: out.impute\n### Title: Outliers imputation\n### Aliases: out.impute\n\n### ** Examples\n\nif \t(interactive()) {\n\timp.res <- suppressWarnings(\n\t\t\tout.impute(tbl = rv$db, \t\t\n\t\t\t\t rf = input$rf.out,\n\t\t\t\t ub = upper.pct,\n\t\t\t\t lb = lower.pct,\n\t\t\t\t sc = sca.check.res[[1]])\n\t\t\t)\n\t}\n\n\n\n"} {"package":"monobinShiny","topic":"pct.ui","snippet":"### Name: pct.ui\n### Title: pct.bin - monobin functions' inputs\n### Aliases: pct.ui\n\n### ** Examples\n\nif \t(interactive()) {\n\toutput$algo.args <- renderUI({tagList(switch(algo.select, \"cum.bin\" = cum.ui(id = id),\n\t\t\t\t\t\t\t \"iso.bin\" = iso.ui(id = id),\n\t\t\t\t\t\t\t \"ndr.bin\" = ndr.sts.ui(id = id),\n\t\t\t\t\t\t\t \"sts.bin\" = ndr.sts.ui(id = id),\n\t\t\t\t\t\t\t \"pct.bin\" = pct.ui(id = id),\n\t\t\t\t\t\t\t \"woe.bin\" = woe.ui(id = id),\n\t\t\t\t\t\t \"mdt.bin\" = mdt.ui(id = id)))\n\t\t})\t\n\t}\n\n\n"} {"package":"monobinShiny","topic":"sc.check","snippet":"### Name: sc.check\n### Title: Special cases - check input values\n### Aliases: sc.check\n\n### ** Examples\n\nif \t(interactive()) {\n\tsca.check.res <- sc.check(x = input$sc.all)\n\tscr.check.res <- sc.check(x = input$sc.replace)\n\t}\nsc.check(x = \"NA, NaN, Inf\")\nsc.check(x = \"NA, abc\")\nsc.check(x = \"NaN, abc\")\nsc.check(x = \"Inf, abc\")\nsc.check(x = \"9999999999, abc\")\nsc.check(x = \"NA, NaN, Inf, 9999999999\")\n\n\n"} {"package":"monobinShiny","topic":"sc.impute","snippet":"### Name: sc.impute\n### Title: Special case imputation\n### Aliases: sc.impute\n\n### ** Examples\n\nif \t(interactive()) {\n\timp.res <- suppressWarnings(\n\t\t sc.impute(tbl = rv$db, \n\t\t\t\t rf = rf, \n\t\t\t\t sc = sca.check.res[[1]],\n\t\t\t\t sc.replace = scr.check.res[[1]], \n\t\t\t\t imp.method = imp.method)\n\t\t\t)\n\t}\n\n\n\n"} {"package":"monobinShiny","topic":"sync.m23","snippet":"### Name: sync.m23\n### Title: Sync between descriptive statistics and monobin module after\n### data import\n### Aliases: sync.m23\n\n### ** Examples\n\nif \t(interactive()) {\nobserveEvent(rv$sync, {\n\tsync.m23(id = \"desc.imputation\", \n\t\t num.rf = rv$num.rf,\n\t\t module = \"desc\")\n\tsync.m23(id = \"monobin\", \n\t\t num.rf = rv$num.rf,\n\t\t module = \"monobin\")\n\trv$rf.imp <- NULL\n\trv$rf.out <- NULL\n\t}, ignoreInit = TRUE)\n\t}\n\n\n"} {"package":"monobinShiny","topic":"sync.m23.imp","snippet":"### Name: sync.m23.imp\n### Title: Sync between descriptive statistics and monobin module after\n### imputation process\n### Aliases: sync.m23.imp\n\n### ** Examples\n\nif \t(interactive()) {\nobserveEvent(rv$sync2, {\n\trf.update.2 <- c(rv$num.rf[!rv$num.rf%in%rv$target.select.2], rv$rf.imp, rv$rf.out)\n\tsync.m23.imp(id = \"desc.imputation\", \n\t\t num.rf = rf.update.2,\n\t\t module = \"desc\")\n\t}, ignoreInit = TRUE)\n\t}\n\n\n"} {"package":"monobinShiny","topic":"upd.dm","snippet":"### Name: upd.dm\n### Title: Update data manager UI output\n### Aliases: upd.dm\n\n### ** Examples\n\nif \t(interactive()) {\nobserveEvent(rv$dm.uptd, {\n\tupd.dm(id = \"data.manager\", dummy = rv$import.dummy)\n\t}, ignoreInit = TRUE)\n\t}\n\n\n"} {"package":"monobinShiny","topic":"upd.si.m23","snippet":"### Name: upd.si.m23\n### Title: Sync between descriptive statistics and monobin module\n### Aliases: upd.si.m23\n\n### ** Examples\n\nif \t(interactive()) {\nupd.si.m23(upd.rf = upd.rf, \n\t num.rf = num.rf, \t\n\t session = session)\n\t}\n\n\n"} {"package":"monobinShiny","topic":"woe.ui","snippet":"### Name: woe.ui\n### Title: woe.bin - monobin functions' inputs\n### Aliases: woe.ui\n\n### ** Examples\n\nif \t(interactive()) {\n\toutput$algo.args <- renderUI({tagList(switch(algo.select, \"cum.bin\" = cum.ui(id = id),\n\t\t\t\t\t\t\t \"iso.bin\" = iso.ui(id = id),\n\t\t\t\t\t\t\t \"ndr.bin\" = ndr.sts.ui(id = id),\n\t\t\t\t\t\t\t \"sts.bin\" = ndr.sts.ui(id = id),\n\t\t\t\t\t\t\t \"pct.bin\" = pct.ui(id = id),\n\t\t\t\t\t\t\t \"woe.bin\" = woe.ui(id = id),\n\t\t\t\t\t\t \"mdt.bin\" = mdt.ui(id = id)))\n\t\t})\t\n\t}\n\n\n"} {"package":"roptions","topic":"box.spread","snippet":"### Name: box.spread\n### Title: Box Spread Strategy Function\n### Aliases: box.spread\n\n### ** Examples\n\nbox.spread(100, 105, 95, 110, 3.2, 2.6, 1.1, 2.4)\n\n\n"} {"package":"roptions","topic":"butterfly.call","snippet":"### Name: butterfly.call\n### Title: Butterfly Call Spread Strategy Function\n### Aliases: butterfly.call\n\n### ** Examples\n\nbutterfly.call(100, 95, 105, 2.3, 1.25, 3.2, spread = 'long')\n\n\n"} {"package":"roptions","topic":"butterfly.put","snippet":"### Name: butterfly.put\n### Title: Butterfly Put Spread Strategy Function\n### Aliases: butterfly.put\n\n### ** Examples\n\nbutterfly.put(100, 105, 95, 2.2, 3.2, 1.25, spread = 'long')\n\n\n"} {"package":"roptions","topic":"call.delta","snippet":"### Name: call.delta\n### Title: Call Delta\n### Aliases: call.delta\n\n### ** Examples\n\ncall.delta(100, 105, 0.25, 0.35, 0.0488)\n\n\n"} {"package":"roptions","topic":"call.estimate","snippet":"### Name: call.estimate\n### Title: Option Greek and Estimated Premium of Call Option\n### Aliases: call.estimate\n\n### ** Examples\n\ncall.estimate(100, 105, 0.25, 0.35, 0.0488)\n\n\n"} {"package":"roptions","topic":"call.gamma","snippet":"### Name: call.gamma\n### Title: Call Gamma\n### Aliases: call.gamma\n\n### ** Examples\n\ncall.gamma(100, 105, 0.25, 0.35, 0.0488)\n\n\n"} {"package":"roptions","topic":"call.greek","snippet":"### Name: call.greek\n### Title: Specified Call Option Greek\n### Aliases: call.greek\n\n### ** Examples\n\ncall.greek('delta', 100, 105, 0.25, 0.35, 0.0488)\n\n\n"} {"package":"roptions","topic":"call.minorgreek","snippet":"### Name: call.minorgreek\n### Title: Specified Minor Option Greek\n### Aliases: call.minorgreek\n\n### ** Examples\n\ncall.minorgreek('lambda', 100, 105, 0.25, 0.35, 0.0488)\n\n\n"} {"package":"roptions","topic":"call.premium.est","snippet":"### Name: call.premium.est\n### Title: Estimated Premium of Option Contract\n### Aliases: call.premium.est\n\n### ** Examples\n\ncall.premium.est(100, 105, 0.25, 0.35, 0.0488)\n\n\n"} {"package":"roptions","topic":"call.rho","snippet":"### Name: call.rho\n### Title: Call Rho\n### Aliases: call.rho\n\n### ** Examples\n\ncall.rho(100, 105, 0.25, 0.35, 0.0488)\n\n\n"} {"package":"roptions","topic":"call.spread","snippet":"### Name: call.spread\n### Title: Bull/Bear Call Spread Strategy Function\n### Aliases: call.spread\n\n### ** Examples\n\ncall.spread(1.2, 3.2, 100, 105)\n\n\n"} {"package":"roptions","topic":"call.theta","snippet":"### Name: call.theta\n### Title: Call Theta\n### Aliases: call.theta\n\n### ** Examples\n\ncall.theta(100, 105, 0.25, 0.35, 0.0488)\n\n\n"} {"package":"roptions","topic":"call.vega","snippet":"### Name: call.vega\n### Title: Call Vega\n### Aliases: call.vega\n\n### ** Examples\n\ncall.vega(100, 105, 0.25, 0.35, 0.0488)\n\n\n"} {"package":"roptions","topic":"cont.rate","snippet":"### Name: cont.rate\n### Title: Continous Rate\n### Aliases: cont.rate\n\n### ** Examples\n\ncont.rate(0.025, 4)\n\n\n"} {"package":"roptions","topic":"iron.condour","snippet":"### Name: iron.condour\n### Title: Iron Condour Strategy Function\n### Aliases: iron.condour\n\n### ** Examples\n\niron.condour(100, 95, 105, 102, 2.3, 1.25, 3.2, 2.3)\n\n\n"} {"package":"roptions","topic":"put.delta","snippet":"### Name: put.delta\n### Title: Put Delta\n### Aliases: put.delta\n\n### ** Examples\n\nput.delta(100, 105, 0.25, 0.35, 0.0488)\n\n\n"} {"package":"roptions","topic":"put.estimate","snippet":"### Name: put.estimate\n### Title: Option Greek and Estimated Premium of Put Option\n### Aliases: put.estimate\n\n### ** Examples\n\nput.estimate(100, 105, 0.25, 0.35, 0.0488)\n\n\n"} {"package":"roptions","topic":"put.gamma","snippet":"### Name: put.gamma\n### Title: Put Gamma\n### Aliases: put.gamma\n\n### ** Examples\n\nput.gamma(100, 105, 0.25, 0.35, 0.0488)\n\n\n"} {"package":"roptions","topic":"put.greek","snippet":"### Name: put.greek\n### Title: Put Greeks\n### Aliases: put.greek\n\n### ** Examples\n\nput.greek('delta', 100, 105, 0.25, 0.35, 0.0488)\n\n\n"} {"package":"roptions","topic":"put.minorgreek","snippet":"### Name: put.minorgreek\n### Title: Specified Minor Option Greek\n### Aliases: put.minorgreek\n\n### ** Examples\n\nput.minorgreek('lambda', 100, 105, 0.25, 0.35, 0.0488)\n\n\n"} {"package":"roptions","topic":"put.premium.est","snippet":"### Name: put.premium.est\n### Title: Estimated Premium of Put Option\n### Aliases: put.premium.est\n\n### ** Examples\n\nput.premium.est(100, 105, 0.25, 0.35, 0.0488)\n\n\n"} {"package":"roptions","topic":"put.rho","snippet":"### Name: put.rho\n### Title: Put Rho\n### Aliases: put.rho\n\n### ** Examples\n\nput.rho(100, 105, 0.25, 0.35, 0.0488)\n\n\n"} {"package":"roptions","topic":"put.spread","snippet":"### Name: put.spread\n### Title: Bull/Bear Put Spread Strategy Function\n### Aliases: put.spread\n\n### ** Examples\n\nput.spread(1.2, 3.2, 100, 105)\n\n\n"} {"package":"roptions","topic":"put.theta","snippet":"### Name: put.theta\n### Title: Put Theta\n### Aliases: put.theta\n\n### ** Examples\n\nput.theta(100, 105, 0.25, 0.35, 0.0488)\n\n\n"} {"package":"roptions","topic":"put.vega","snippet":"### Name: put.vega\n### Title: Put Vega\n### Aliases: put.vega\n\n### ** Examples\n\nput.vega(100, 105, 0.25, 0.35, 0.0488)\n\n\n"} {"package":"roptions","topic":"straddle.long","snippet":"### Name: straddle.long\n### Title: Long Straddle Strategy Function\n### Aliases: straddle.long\n\n### ** Examples\n\nstraddle.long(1.2, 3.2, 100)\n\n\n"} {"package":"roptions","topic":"straddle.short","snippet":"### Name: straddle.short\n### Title: Short Straddle Strategy Function\n### Aliases: straddle.short\n\n### ** Examples\n\nstraddle.short(1.2, 3.2, 100)\n\n\n"} {"package":"roptions","topic":"strangle.long","snippet":"### Name: strangle.long\n### Title: Long Strangle Strategy Function\n### Aliases: strangle.long\n\n### ** Examples\n\nstrangle.long(1.2, 3.2, 100, 105)\n\n\n"} {"package":"roptions","topic":"strangle.short","snippet":"### Name: strangle.short\n### Title: Short Strangle Strategy Function\n### Aliases: strangle.short\n\n### ** Examples\n\nstrangle.short(1.2, 3.2, 100, 105)\n\n\n"} {"package":"jdenticon","topic":"jdenticon","snippet":"### Name: jdenticon\n### Title: Create a Jdenticon.\n### Aliases: jdenticon\n\n### ** Examples\n\n## Not run: \n##D jdenticon(value = 'mango')\n## End(Not run)\n\n\n\n"} {"package":"jdenticon","topic":"jdenticon_npm_install","snippet":"### Name: jdenticon_npm_install\n### Title: Install jdenticon npm dependency.\n### Aliases: jdenticon_npm_install\n\n### ** Examples\n\n## Not run: \n##D jdenticon_npm_install(force = TRUE)\n## End(Not run)\n\n\n\n"} {"package":"NADA2","topic":"ATS","snippet":"### Name: ATS\n### Title: Akritas-Theil-Sen line for censored data\n### Aliases: ATS\n### Keywords: censored trend\n\n### ** Examples\n\n# Both y and x are censored\ndata(PbHeron)\nwith(PbHeron, ATS(Blood, BloodCen, Kidney, KidneyCen))\n\n# x is not censored\ndata(Brumbaugh)\nwith(Brumbaugh,ATS(Hg, HgCen, PctWetland))\n\n\n"} {"package":"NADA2","topic":"ATSmini","snippet":"### Name: ATSmini\n### Title: Kendall's tau and ATS line for censored data\n### Aliases: ATSmini\n\n### ** Examples\n\n# x may not be censored. Use the ATS function when x is censored.\ndata(Brumbaugh)\n\nwith(Brumbaugh, ATSmini(Hg, HgCen, SedLOI))\n\n\n"} {"package":"NADA2","topic":"ROSci","snippet":"### Name: ROSci\n### Title: Computes confidence intervals on regression on order statistics\n### (ROS) mean\n### Aliases: ROSci\n\n### ** Examples\n\ndata(Brumbaugh)\nmyros <- NADA::ros(Brumbaugh$Hg,Brumbaugh$HgCen)\n\nsummary(myros)\n\n# ROS Mean\nmean(myros$modeled)\n\n# 95% CI around the ROS mean\nROSci(myros)\n\n\n"} {"package":"NADA2","topic":"Usc","snippet":"### Name: Usc\n### Title: U-scores for (non-interval, sinle-column) Censored Data\n### Aliases: Usc\n\n### ** Examples\n\ndata(Brumbaugh)\nuscore(Brumbaugh$Hg,Brumbaugh$HgCen)\n\n\n"} {"package":"NADA2","topic":"Usci","snippet":"### Name: Usci\n### Title: Interval-censored U-Score\n### Aliases: Usci\n\n### ** Examples\n\n\ndata(Brumbaugh)\n\n# for demonstration purposes create a lower end concentration interval\nBrumbaugh$lowHg<-Brumbaugh$Hg*(1-Brumbaugh$HgCen)\n\nwith(Brumbaugh,Usci(lowHg,Hg))\n\n\n"} {"package":"NADA2","topic":"anosimPlot","snippet":"### Name: anosimPlot\n### Title: Permutation Analysis of Similarity (anosim) for Censored Data\n### Aliases: anosimPlot\n\n### ** Examples\n\ndata(PbHeron)\n\n# ROS model for each group\nPbHeron.high <- with(subset(PbHeron,DosageGroup==\"High\"),NADA::ros(Blood,BloodCen))\nPbHeron.high <- data.frame(PbHeron.high)\nPbHeron.high$DosageGroup <- \"High\"\n\nPbHeron.low <- with(subset(PbHeron,DosageGroup==\"Low\"),NADA::ros(Blood,BloodCen))\nPbHeron.low <- data.frame(PbHeron.low)\nPbHeron.low$DosageGroup <- \"Low\"\n\nPbHeron.ros=rbind(PbHeron.high,PbHeron.low)\n\n# ANOSIM analysis\nlibrary(vegan)\nPbHeron.anosim <- with(PbHeron.ros,anosim(modeled,DosageGroup))\nsummary(PbHeron.anosim)\n\n# Plot\nanosimPlot(PbHeron.anosim)\n\n\n"} {"package":"NADA2","topic":"bestaic","snippet":"### Name: bestaic\n### Title: Find the lowest AIC multiple regression model\n### Aliases: bestaic\n\n### ** Examples\n\n\ndata(Brumbaugh)\n\n# Multiple regression\nbestaic(Brumbaugh$Hg, Brumbaugh$HgCen, Brumbaugh[, c(\"SedMeHg\",\"PctWetland\", \"SedAVS\")])\n\n\n"} {"package":"NADA2","topic":"binaryClust","snippet":"### Name: binaryClust\n### Title: Cluster Matrix of Binary Censored Data\n### Aliases: binaryClust\n\n### ** Examples\n\ndata(PbHeron)\n\n# without group specified\nbinaryClust(PbHeron[,4:15])\n\n# With Group argument\nbinaryClust(PbHeron[,4:15],group=PbHeron$DosageGroup)\n\n\n"} {"package":"NADA2","topic":"binaryDiss","snippet":"### Name: binaryDiss\n### Title: Binary dissimilarity coefficient matrix\n### Aliases: binaryDiss\n\n### ** Examples\n\ndata(PbHeron)\n\nbinaryDiss(PbHeron$LiverCen)\n\n\n"} {"package":"NADA2","topic":"binaryMDS","snippet":"### Name: binaryMDS\n### Title: Plot Nonmetric Multidimensional Scaling of binary censored data\n### Aliases: binaryMDS\n\n### ** Examples\n\n## No test: \ndata(PbHeron)\n\n# without group specified\nbinaryMDS(PbHeron[,4:15])\n\n# With Group argument\nbinaryMDS(PbHeron[,4:15],group=PbHeron$DosageGroup)\n## End(No test)\n\n\n"} {"package":"NADA2","topic":"binarySim","snippet":"### Name: binarySim\n### Title: Binary similarity coefficient matrix\n### Aliases: binarySim\n\n### ** Examples\n\ndata(PbHeron)\n\nbinarySim(PbHeron$LiverCen)\n\n\n\n"} {"package":"NADA2","topic":"cboxplot","snippet":"### Name: cboxplot\n### Title: Draws censored boxplots\n### Aliases: cboxplot\n\n### ** Examples\n\ndata(PbHeron)\ncboxplot(PbHeron$Liver,PbHeron$LiverCen,PbHeron$Group)\n\n\n"} {"package":"NADA2","topic":"cen1way","snippet":"### Name: cen1way\n### Title: Peto-Peto one-factor test\n### Aliases: cen1way\n\n### ** Examples\n\ndata(PbHeron)\n\n# Two Groups\ncen1way(PbHeron$Liver,PbHeron$LiverCen,PbHeron$DosageGroup)\n\n# More than two groups\ncen1way(PbHeron$Liver,PbHeron$LiverCen,PbHeron$Group)\n\n\n"} {"package":"NADA2","topic":"cen2means","snippet":"### Name: cen2means\n### Title: Censored data two-group test for difference in means\n### Aliases: cen2means\n\n### ** Examples\n\n\ndata(PbHeron)\ncen2means(PbHeron$Liver,PbHeron$LiverCen,PbHeron$DosageGroup)\n\n\n"} {"package":"NADA2","topic":"cen2way","snippet":"### Name: cen2way\n### Title: Parametric Two Factor Fixed Effects ANOVA for censored data\n### Aliases: cen2way\n\n### ** Examples\n\ndata(Gales_Creek)\nGales_Creek$Period <- c(rep(\"early\", 35), rep(\"middle\", 12), rep(\"late\", 16))\nwith(Gales_Creek,cen2way(TCr, CrND, Season, Period))\n\n\n\n"} {"package":"NADA2","topic":"cenCompareCdfs","snippet":"### Name: cenCompareCdfs\n### Title: Comparison of empirical cdf of censored data\n### Aliases: cenCompareCdfs\n\n### ** Examples\n\n\ndata(Brumbaugh)\ncenCompareCdfs(Brumbaugh$Hg,Brumbaugh$HgCen)\n\n# With Weibull distribution\ncenCompareCdfs(Brumbaugh$Hg,Brumbaugh$HgCen,dist3=\"weibull\")\n\n# Using an distribution not supported by this function (yet)\n# you will get an error message\n## Not run: cenCompareCdfs(Brumbaugh$Hg,Brumbaugh$HgCen,dist3=\"beta\")\n\n# With Yname specified\ncenCompareCdfs(Brumbaugh$Hg,Brumbaugh$HgCen,Yname=\"TCE Conc (ug/L)\\nLong Island, NY USA\")\n\n\n"} {"package":"NADA2","topic":"cenCompareQQ","snippet":"### Name: cenCompareQQ\n### Title: Censored Q-Q Plot comparison\n### Aliases: cenCompareQQ\n\n### ** Examples\n\n\ndata(Brumbaugh)\n## No test: \ncenCompareQQ(Brumbaugh$Hg,Brumbaugh$HgCen)\n## End(No test)\n\n\n"} {"package":"NADA2","topic":"cenPredInt","snippet":"### Name: cenPredInt\n### Title: Prediction interval for censored data\n### Aliases: cenPredInt\n### Keywords: interval prediction\n\n### ** Examples\n\ndata(PbHeron)\n\n# Default\ncenPredInt(PbHeron$Liver,PbHeron$LiverCen)\n\n# User defined confidence coefficient\ncenPredInt(PbHeron$Liver,PbHeron$LiverCen, conf=0.5)\n\n# User defined confidence coefficient outside of acceptable range\n# the procedure will stop and give an error.\n# cenPredInt(PbHeron$Liver,PbHeron$LiverCen, conf=1.1)\n\n# User defined prediction interval type\ncenPredInt(PbHeron$Liver,PbHeron$LiverCen,pi.type=\"lower\")\ncenPredInt(PbHeron$Liver,PbHeron$LiverCen,pi.type=\"upper\")\n\n\n\n"} {"package":"NADA2","topic":"cenQQ","snippet":"### Name: cenQQ\n### Title: Q-Q Plot censored data\n### Aliases: cenQQ\n\n### ** Examples\n\n## No test: \ndata(Brumbaugh)\ncenQQ(Brumbaugh$Hg,Brumbaugh$HgCen)\n\n# User defined distribution\ncenQQ(Brumbaugh$Hg,Brumbaugh$HgCen,dist=\"gamma\")\n## End(No test)\n\n\n"} {"package":"NADA2","topic":"cenTolInt","snippet":"### Name: cenTolInt\n### Title: Upper Tolerance interval for censored data\n### Aliases: cenTolInt\n\n### ** Examples\n\n\ndata(PbHeron)\n\n# Default\ncenTolInt(PbHeron$Liver,PbHeron$LiverCen)\n\n# User defined conficence interval\ncenTolInt(PbHeron$Liver,PbHeron$LiverCen,conf=0.75)\n\n# User defined percentile\ncenTolInt(PbHeron$Liver,PbHeron$LiverCen,cover=0.5)\n\n# inputs outside acceptable ranges\n# Will result in errors/warnings\n# cenTolInt(PbHeron$Liver,PbHeron$LiverCen,cover=1.25)\n# cenTolInt(PbHeron$Liver,PbHeron$LiverCen,conf=1.1)\n# cenTolInt(PbHeron$Liver,PbHeron$LiverCen,method.fit=\"ROS\")\n\n\n\n"} {"package":"NADA2","topic":"cen_ecdf","snippet":"### Name: cen_ecdf\n### Title: Censored Empirical Cumulative Distribution Function\n### Aliases: cen_ecdf\n### Keywords: CDF ECDF\n\n### ** Examples\n\ndata(PbHeron)\n\n# with groups\nwith(PbHeron, cen_ecdf(Liver, LiverCen, DosageGroup))\n\n# all data\nwith(PbHeron, cen_ecdf(Liver, LiverCen))\n\n\n"} {"package":"NADA2","topic":"cen_paired","snippet":"### Name: cen_paired\n### Title: Censored data paired t-test\n### Aliases: cen_paired\n\n### ** Examples\n\n\ndata(atrazine)\n\ncen_paired(atrazine$June,atrazine$JuneCen,atrazine$Sept,atrazine$SeptCen)\n\n# Comparing standard/guieline value\ncen_paired(atrazine$June, atrazine$JuneCen, 0.01, alternative = \"greater\")\n\n\n"} {"package":"NADA2","topic":"cen_signedranktest","snippet":"### Name: cen_signedranktest\n### Title: Wilcoxcon Signed-Rank test for censored data\n### Aliases: cen_signedranktest\n\n### ** Examples\n\n\ndata(atrazine)\n\ncen_signedranktest(atrazine$June,atrazine$JuneCen,atrazine$Sept,atrazine$SeptCen)\n\n\n"} {"package":"NADA2","topic":"cen_signtest","snippet":"### Name: cen_signtest\n### Title: Sign test for censored data\n### Aliases: cen_signtest\n\n### ** Examples\n\n\ndata(atrazine)\n\ncen_signtest(atrazine$June,atrazine$JuneCen,atrazine$Sept,atrazine$SeptCen)\n\n\n"} {"package":"NADA2","topic":"cencorreg","snippet":"### Name: cencorreg\n### Title: Correlation and Regression with censored data\n### Aliases: cencorreg\n\n### ** Examples\n\n\ndata(Brumbaugh)\n\n# One variable\ncencorreg(Brumbaugh$Hg,Brumbaugh$HgCen,Brumbaugh$SedMeHg)\n\n# One variable with pred.plot=T\ncencorreg(Brumbaugh$Hg,Brumbaugh$HgCen,Brumbaugh$SedMeHg,pred.plot=TRUE)\n\n# More than one variable for demostration purposes\ncencorreg(Brumbaugh$Hg,Brumbaugh$HgCen,Brumbaugh[,c(\"SedMeHg\",\"PctWetland\")])\n\n\n\n"} {"package":"NADA2","topic":"cenperm2","snippet":"### Name: cenperm2\n### Title: Censored two-group permutation test\n### Aliases: cenperm2\n### Keywords: difference permutation test\n\n### ** Examples\n\ndata(PbHeron)\ncenperm2(PbHeron$Liver,PbHeron$LiverCen,PbHeron$DosageGroup,alternative=\"t\")\n\n\n"} {"package":"NADA2","topic":"cenpermanova","snippet":"### Name: cenpermanova\n### Title: Censored data one-factor permutation test\n### Aliases: cenpermanova\n\n### ** Examples\n\n\ndata(PbHeron)\ncenpermanova(PbHeron$Liver,PbHeron$LiverCen,PbHeron$DosageGroup)\n\n\n"} {"package":"NADA2","topic":"cenregQQ","snippet":"### Name: cenregQQ\n### Title: Q-Q plot of censored regression residuals\n### Aliases: cenregQQ\n\n### ** Examples\n\ndata(Brumbaugh)\n\n# One variable\ncenregQQ(Brumbaugh$Hg,Brumbaugh$HgCen,Brumbaugh$PctWetland)\n\n# More than one variable for demostration purposes\ncenregQQ(Brumbaugh$Hg,Brumbaugh$HgCen,Brumbaugh[,c(\"PctWetland\",\"SedLOI\",\"Weight\")])\n\n\n"} {"package":"NADA2","topic":"censeaken","snippet":"### Name: censeaken\n### Title: Seasonal Kendall permutation test on censored data\n### Aliases: censeaken\n\n### ** Examples\n\n## No test: \ndata(Brumbaugh)\n\n# Artificial time and season variables for demonstration purposes\nBrumbaugh$time=1:nrow(Brumbaugh)\nBrumbaugh$sea=as.factor(round(runif(nrow(Brumbaugh),1,4),0))\n\nwith(Brumbaugh,censeaken(time,Hg,HgCen,sea,seaplots = TRUE))\n## End(No test)\n\n\n"} {"package":"NADA2","topic":"centrend","snippet":"### Name: centrend\n### Title: Trend analysis of censored data with a covariate\n### Aliases: centrend\n### Keywords: GAM analysis spline trend\n\n### ** Examples\n\n\ndata(Brumbaugh)\n\nBrumbaugh$time=1:nrow(Brumbaugh)\n\nwith(Brumbaugh,centrend(Hg,HgCen,SedTotHg,time.var=time))\n\n\n"} {"package":"NADA2","topic":"centrendsea","snippet":"### Name: centrendsea\n### Title: Trend analysis of censored data with a covariate and seasonal\n### blocks\n### Aliases: centrendsea\n### Keywords: GAM Kendall Seasonal analysis spline trend\n\n### ** Examples\n\n\ndata(Gales_Creek)\nwith(Gales_Creek,centrendsea(TCr,CrND,discharge,dectime,Season))\n\n\n"} {"package":"NADA2","topic":"cfit","snippet":"### Name: cfit\n### Title: Compute an ECDF and Distribution Parameters for Censored Data\n### Aliases: cfit\n\n### ** Examples\n\n\ndata(Brumbaugh)\n\ncfit(Brumbaugh$Hg,Brumbaugh$HgCen)\n\n\n\n"} {"package":"NADA2","topic":"computeS","snippet":"### Name: computeS\n### Title: Kendall's S-statistic for permutations of censored data\n### Aliases: computeS\n\n### ** Examples\n\ndata(Brumbaugh)\n\n#Artifical time and season variables for demonstration purposes\nBrumbaugh$time=1:nrow(Brumbaugh)\nBrumbaugh$sea=as.factor(round(runif(nrow(Brumbaugh),1,4),0))\n\n\nwith(Brumbaugh,computeS(time,Hg,HgCen,sea,R=100))\n\n\n\n"} {"package":"NADA2","topic":"equivalent_n","snippet":"### Name: equivalent_n\n### Title: Censored data sample size\n### Aliases: equivalent_n\n### Keywords: Sample Size censored\n\n### ** Examples\n\ndata(Brumbaugh)\n\nequivalent_n(Brumbaugh$Hg,Brumbaugh$HgCen)\n\n\n"} {"package":"NADA2","topic":"kenplot","snippet":"### Name: kenplot\n### Title: Plot robust median ATS line for censored data\n### Aliases: kenplot\n\n### ** Examples\n\n## No test: \n# Both y and x are censored\ndata(PbHeron)\nwith(PbHeron, kenplot(Blood, BloodCen, Kidney, KidneyCen))\n\n# x is not censored\ndata(Brumbaugh)\nwith(Brumbaugh, kenplot(Hg, HgCen, PctWetland,rep(0, times=length(PctWetland))))\n## End(No test)\n\n\n"} {"package":"NADA2","topic":"ordranks","snippet":"### Name: ordranks\n### Title: Computes ranks of data with one or multiple detection limits\n### Aliases: ordranks\n\n### ** Examples\n\nlibrary(NADA) #For example data\ndata(PbHeron)\n\nordranks(PbHeron[,4:15])\n\n\n\n"} {"package":"NADA2","topic":"partplots","snippet":"### Name: partplots\n### Title: Partial plots for censored MLE regression\n### Aliases: partplots\n\n### ** Examples\n\n\ndata(Brumbaugh)\n\n# For demostration purposes\npartplots (Brumbaugh$Hg,Brumbaugh$HgCen,Brumbaugh[,c(\"SedMeHg\",\"PctWetland\")])\n\n\n"} {"package":"NADA2","topic":"ppw.test","snippet":"### Name: ppw.test\n### Title: Test for difference in left-censored samples\n### Aliases: ppw.test\n\n### ** Examples\n\ndata(PbHeron)\nppw.test(PbHeron$Liver,PbHeron$LiverCen,PbHeron$Bone,PbHeron$BoneCen)\n\n\n\n"} {"package":"NADA2","topic":"uMDS","snippet":"### Name: uMDS\n### Title: Plot U-score Nonmetric Multidimensional Scaling of censored data\n### Aliases: uMDS\n\n### ** Examples\n\ndata(PbHeron)\n\nPbHeron.u <- uscores(PbHeron[,4:15])\nuMDS(PbHeron.u)\n\n# With group specific\nuMDS(PbHeron.u,group=PbHeron$DosageGroup)\n\n\n"} {"package":"NADA2","topic":"uscore","snippet":"### Name: uscore\n### Title: U-score (individual value)\n### Aliases: uscore\n\n### ** Examples\n\ndata(Brumbaugh)\nuscore(Brumbaugh$Hg,Brumbaugh$HgCen)\n\n\n"} {"package":"NADA2","topic":"uscores","snippet":"### Name: uscores\n### Title: Uscores for multiple columns of censored data\n### Aliases: uscores\n\n### ** Examples\n\ndata(PbHeron)\n\nuscores(PbHeron[,4:15])\n\n\n"} {"package":"POSTm","topic":"p.adjust","snippet":"### Name: p.adjust\n### Title: Adjust P-values for Multiple Comparisons\n### Aliases: p.adjust p.adjust.POST\n\n### ** Examples\n\n\ndata(\"POSTmData\")\n\ny <- as.integer(x = metadata[,\"GC\"] == \"BV\")\nX <- metadata[,\"mRace\"]\n\nresult <- post(y = y, \n X = X, \n OTU = otu[,1:20], \n tree = otutree,\n cValues = seq(0,0.05,by=0.01))\n\n p.adjust(p = result, method = c(\"BH\",\"BY\"))\n\n\n\n"} {"package":"POSTm","topic":"plot","snippet":"### Name: plot\n### Title: Plot Phylogenies with Significant OTU\n### Aliases: plot plot.POST\n\n### ** Examples\n\n\ndata(\"POSTmData\")\n\ny <- as.integer(x = metadata[,\"GC\"] == \"BV\")\nX <- metadata[,\"mRace\"]\n\nresult <- post(y = y, \n X = X, \n OTU = otu[,1:20], \n tree = otutree,\n cValues = seq(0,0.05,by=0.01))\n\nplot(x = result)\n\n\n\n"} {"package":"POSTm","topic":"post","snippet":"### Name: post\n### Title: Phylogeney-Guided OTU-Specific Association Test for Microbiome\n### Data\n### Aliases: post\n\n### ** Examples\n\n\ndata(\"POSTmData\")\n\ny <- as.integer(x = metadata[,\"GC\"] == \"BV\")\nX <- metadata[,\"mRace\"]\n\nresult <- post(y = y, \n X = X, \n OTU = otu[,1:20], \n tree = otutree,\n cValues = seq(0,0.05,by=0.01))\n\n\n\n"} {"package":"POSTm","topic":"print","snippet":"### Name: print\n### Title: Print the Primary Results of a post() Analysis\n### Aliases: print print.POST\n\n### ** Examples\n\n\ndata(\"POSTmData\")\n\ny <- as.integer(x = metadata[,\"GC\"] == \"BV\")\nX <- metadata[,\"mRace\"]\n\nresult <- post(y = y, \n X = X, \n OTU = otu[,1:20], \n tree = otutree,\n cValues = seq(0,0.05,by=0.01))\n\nprint(x = result)\n\n\n\n"} {"package":"backtest","topic":"backtest-class","snippet":"### Name: backtest-class\n### Title: Class \"backtest\"\n### Aliases: backtest-class show,backtest-method summary,backtest-method\n### summaryStats,backtest-method means,backtest-method\n### counts,backtest-method totalCounts,backtest-method\n### marginals,backtest-method naCounts,backtest-method\n### turnover,backtest-method ci,backtest-method\n### plot,backtest,missing-method means counts summaryStats totalCounts\n### marginals naCounts turnover ci plot\n### Keywords: classes\n\n### ** Examples\n\n\ndata(starmine)\nbt <- backtest(starmine, in.var = \"smi\", ret.var = \"ret.0.1.m\", by.period = FALSE)\n\n## Summary for a pooled backtest\n\nsummary(bt)\n\n## A natural backtest\n\nbt <- backtest(starmine, in.var = \"smi\", ret.var = \"ret.0.1.m\",\n date.var = \"date\", id.var = \"id\", natural = TRUE, by.period = FALSE)\n\n## Summary for a natural backtest\n\nsummary(bt)\n\n## Other access methods\n\nmeans(bt)\ncounts(bt)\nmarginals(bt)\nnaCounts(bt)\n\n## Plotting methods\n\nplot(bt, type = \"turnover\")\nplot(bt, type = \"return\")\nplot(bt, type = \"cumreturn\")\n\n\n\n"} {"package":"backtest","topic":"backtest","snippet":"### Name: backtest\n### Title: Creating an Object of Class Backtest\n### Aliases: backtest\n### Keywords: file\n\n### ** Examples\n\n\ndata(starmine)\n\n## Backtest with 1 'in.var' and 1 'ret.var'\n\nbt <- backtest(starmine, in.var = \"smi\", ret.var = \"ret.0.1.m\", by.period = FALSE)\nsummary(bt)\n\n## Backtest with 2 'in.var' values, 1 'ret.var', and a 'by.var'\n\nbt <- backtest(starmine, in.var = c(\"smi\", \"cap.usd\"),\n ret.var = \"ret.0.1.m\", by.var = \"sector\", by.period = FALSE)\nsummary(bt)\n\n## Backtest with 1 'in.var', 1 'by.var', and 1 'ret.var'. Number of\n## buckets changed from default of 5 to 4. Change in number of buckets\n## only affects the 'in.var' because the 'by.var' column in 'starmine'\n## contains character data. For each value in this column there is a\n## unique category.\n\nbt <- backtest(starmine, in.var = \"smi\", by.var = \"sector\",\n ret.var = \"ret.0.1.m\", buckets = 4, by.period = FALSE)\nsummary(bt)\n\n## Backtest with 1 'in.var', multiple 'ret.var', and a\n## universe restriction\n\nbt <- backtest(starmine, in.var = \"smi\",\n ret.var = c(\"ret.0.1.m\", \"ret.0.6.m\"),\n universe = sector == \"HiTec\", by.period = FALSE)\nsummary(bt)\n\n## Running a natural backtest with 2 'in.vars', 1 'ret.var'\n## 10 buckets\n\nbt <- backtest(starmine, in.var = c(\"smi\",\"cap.usd\"),\n ret.var = \"ret.0.1.m\", date.var = \"date\",\n id.var = \"id\", buckets = 10,\n natural = TRUE, by.period = FALSE)\nsummary(bt)\n\n## The same backtest, but calculating quantiles within periods.\n\nbt <- backtest(starmine, in.var = c(\"smi\",\"cap.usd\"),\n ret.var = \"ret.0.1.m\", date.var = \"date\",\n id.var = \"id\", buckets = 10,\n natural = TRUE, by.period = TRUE)\nsummary(bt)\n\nplot(bt, type = \"turnover\")\nplot(bt, type = \"return\")\nplot(bt, type = \"cumreturn\")\nplot(bt, type = \"cumreturn.split\")\n\n\n\n\n"} {"package":"backtest","topic":"starmine","snippet":"### Name: starmine\n### Title: StarMine Rankings, 1995\n### Aliases: starmine\n### Keywords: datasets\n\n### ** Examples\n\ndata(starmine)\nhead(starmine)\n\n\n"} {"package":"ssev","topic":"compute_sample_size","snippet":"### Name: compute_sample_size\n### Title: Compute sample size\n### Aliases: compute_sample_size\n\n### ** Examples\n\ncompute_sample_size(means=c(0,1), sds=2, N=100)\ncompute_sample_size(means=c(0,1), sds=2, N=10000, power=.9)\ncompute_sample_size(means=c(0,1), sds=c(1,2), N=10000)\ncompute_sample_size(proportions=c(.5,.7), N=5000)\n\n\n"} {"package":"stepR","topic":"MRC","snippet":"### Name: MRC\n### Title: Compute Multiresolution Criterion\n### Aliases: MRC MRCoeff MRC.pvalue MRC.quant MRC.simul chi chi.FFT MRC.FFT\n### MRCoeff.FFT kMRC.pvalue kMRC.quant kMRC.simul\n### Keywords: nonparametric\n\n### ** Examples\n\nset.seed(100)\nall.equal(MRC.simul(100, r = 100),\n sort(monteCarloSimulation(n = 100, r = 100, output = \"maximum\",\n penalty = \"none\", intervalSystem = \"dyaLen\")),\n check.attributes = FALSE)\n\n# simulate signal of 100 data points\nset.seed(100)\nf <- rep(c(0, 2, 0), c(60, 10, 30))\n# add gaussian noise\nx <- f + rnorm(100)\n# compute multiresolution criterion\nm <- MRC(x)\n# compute Monte-Carlo p-value based on 100 simulations\nMRC.pvalue(m[\"max\"], length(x), 100)\n# compute multiresolution coefficients\nM <- MRCoeff(x)\n## No test: \n# plot multiresolution coefficients, colours show p-values below 5% in 1% steps\nop <- par(mar = c(5, 4, 2, 4) + 0.1)\nimage(1:length(x), seq(min(x), max(x), length = ncol(M)), apply(M[,ncol(M):1], 1:2,\n MRC.pvalue, n = length(x), r = 100), breaks = (0:5) / 100,\n col = rgb(1, seq(0, 1, length = 5), 0, 0.75),\n xlab = \"location / left end of interval\", ylab =\"measurement\",\n main = \"Multiresolution Coefficients\",\n sub = paste(\"MRC p-value =\", signif(MRC.pvalue(m[\"max\"], length(x), 100), 3)))\naxis(4, min(x) + diff(range(x)) * ( pretty(1:ncol(M) - 1) ) / dim(M)[2],\n 2^pretty(1:ncol(M) - 1))\nmtext(\"interval lengths\", 4, 3)\n# plot signal and its mean\npoints(x)\nlines(f, lty = 2)\nabline(h = mean(x))\npar(op)\n## End(No test)\n\n\n"} {"package":"stepR","topic":"MRC.1000","snippet":"### Name: MRC.1000\n### Title: Values of the MRC statistic for 1,000 observations (all\n### intervals)\n### Aliases: MRC.1000\n### Keywords: datasets\n\n### ** Examples\n\n# threshold value for 95% confidence\nquantile(stepR::MRC.1000, .95)\n\n\n"} {"package":"stepR","topic":"MRC.asymptotic","snippet":"### Name: MRC.asymptotic\n### Title: \"Asymptotic\" values of the MRC statistic (all intervals)\n### Aliases: MRC.asymptotic\n### Keywords: datasets\n\n### ** Examples\n\n# \"asymptotic\" threshold value for 95% confidence\nquantile(stepR::MRC.asymptotic, .95)\n\n\n"} {"package":"stepR","topic":"MRC.asymptotic.dyadic","snippet":"### Name: MRC.asymptotic.dyadic\n### Title: \"Asymptotic\" values of the MRC statistic (dyadic intervals)\n### Aliases: MRC.asymptotic.dyadic\n### Keywords: datasets\n\n### ** Examples\n\n# \"asymptotic\" threshold value for 95% confidence\nquantile(stepR::MRC.asymptotic.dyadic, .95)\n\n\n"} {"package":"stepR","topic":"BesselPolynomial","snippet":"### Name: BesselPolynomial\n### Title: Bessel Polynomials\n### Aliases: BesselPolynomial\n### Keywords: math\n\n### ** Examples\n\n# 15 x^3 + 15 x^2 + 6 x + 1\nBesselPolynomial(3)\n\n\n"} {"package":"stepR","topic":"[.bounds","snippet":"### Name: bounds\n### Title: Bounds based on MRC\n### Aliases: [.bounds bounds bounds.MRC\n### Keywords: nonparametric\n\n### ** Examples\n\ny <- rnorm(100, c(rep(0, 50), rep(1, 50)), 0.5)\nb <- computeBounds(y, q = 4, intervalSystem = \"dyaLen\", penalty = \"none\")\nb <- b[order(b$li, b$ri), ]\nattr(b, \"row.names\") <- seq(along = b$li)\n\n# entries in bounds are recovered by computeBounds\nall.equal(bounds(y, q = 4)$bounds, b) # TRUE\n\n\n# simulate signal of 100 data points\nY <- rpois(100, 1:100 / 10)\n# compute bounds for intervals of dyadic lengths\nb <- bounds(Y, penalty=\"len\", family=\"poisson\", q=4)\n# compute bounds for all intervals\nb <- bounds(Y, penalty=\"len\", family=\"poisson\", q=4, lengths=1:100)\n\n\n"} {"package":"stepR","topic":"compareBlocks","snippet":"### Name: compareBlocks\n### Title: Compare fit blockwise with ground truth\n### Aliases: compareBlocks\n### Keywords: nonparametric\n\n### ** Examples\n\n# simulate two Gaussian hidden Markov models of length 1000 with 2 states each\n# with identical transition rates being 0.01 and 0.05, resp, signal-to-noise ratio is 5\nsim <- lapply(c(0.01, 0.05), function(rate)\n contMC(1e3, 0:1, matrix(c(0, rate, rate, 0), 2), param=1/5))\nplot(sim[[1]]$data)\nlines(sim[[1]]$cont, col=\"red\")\n# use smuceR to estimate fit\nfit <- lapply(sim, function(s) smuceR(s$data$y, s$data$x))\nlines(fit[[1]], col=\"blue\")\n# compare fit with (discretised) ground truth\ncompareBlocks(lapply(sim, function(s) s$discr), fit)\n\n\n"} {"package":"stepR","topic":"computeBounds","snippet":"### Name: computeBounds\n### Title: Computation of the bounds\n### Aliases: computeBounds\n### Keywords: nonparametric\n\n### ** Examples\n\n## Don't show: \nsavePathRcache <- R.cache::getCacheRootPath()\n\nR.cache::setCacheRootPath(path = file.path(R.cache::getCacheRootPath(), \"test\"))\n## End(Don't show)\n\ny <- c(rnorm(50), rnorm(50, 1))\n\n# the multiscale contraint\nbounds <- computeBounds(y, alpha = 0.5)\n\n# the order of the bounds depends on intervalSystem and lengths\n# to allow fast computation\n# if a specific order is required it can be reordered by order\n# b is ordered with increasing left indices and increasing right indices\nb <- bounds[order(bounds$li, bounds$ri), ]\nattr(b, \"row.names\") <- seq(along = b$li)\n\n# higher significance level for larger detection power, but less confidence\ncomputeBounds(y, alpha = 0.99)\n\n# smaller significance level for stronger confidence statements, but at\n# the risk of missing change-points\ncomputeBounds(y, alpha = 0.05)\n## No test: \n# different interval system, lengths, penalty and given parameter sd\ncomputeBounds(y, alpha = 0.5, intervalSystem = \"dyaLen\",\n lengths = c(1L, 2L, 4L, 8L), penalty = \"weights\",\n weights = c(0.4, 0.3, 0.2, 0.1), sd = 0.5)\n## End(No test) \n# with given q\nidentical(computeBounds(y, q = critVal(100L, alpha = 0.5)), bounds)\nidentical(computeBounds(y, q = critVal(100L, alpha = 0.5, output = \"value\")),\n bounds)\n## No test: \n# the above calls saved and (attempted to) load Monte-Carlo simulations and\n# simulated them for nq = 128 observations\n# in the following call no saving, no loading and simulation for n = 100\n# observations is required, progress of the simulation will be reported\ncomputeBounds(y, alpha = 0.5, messages = 1000L,\n options = list(simulation = \"vector\",\n load = list(), save = list()))\n \n# with given stat to compute q\nstat <- monteCarloSimulation(n = 128L)\nidentical(computeBounds(y, alpha = 0.5, stat = stat),\n computeBounds(y, alpha = 0.5, options = list(load = list())))\n## End(No test)\n\n## Don't show: \nunlink(R.cache::getCacheRootPath(), force = TRUE, recursive = TRUE)\n\nR.cache::setCacheRootPath(savePathRcache)\n## End(Don't show)\n\n\n"} {"package":"stepR","topic":"computeStat","snippet":"### Name: computeStat\n### Title: Computation of the multiscale statistic\n### Aliases: computeStat\n### Keywords: nonparametric\n\n### ** Examples\n\n## Don't show: \nsavePathRcache <- R.cache::getCacheRootPath()\n\nR.cache::setCacheRootPath(path = file.path(R.cache::getCacheRootPath(), \"test\"))\n## End(Don't show)\n\ny <- rnorm(100)\n# for the default signal = 0 a signal constant 0 is assumed\nidentical(computeStat(y), computeStat(y,\n signal = list(leftIndex = 1L, rightIndex = 100L, value = 0)))\n\n# different constant value\nret <- computeStat(y, signal = 1)\n# penalised multiscale statistic\nidentical(ret$maximum, computeStat(y, signal = 1, output = \"maximum\"))\n# multiscale vector of penalised statistics\nidentical(ret$stat, computeStat(y, signal = 1, output = \"vector\"))\n\ny <- c(rnorm(50), rnorm(50, 1))\n# true signal\ncomputeStat(y, signal = list(leftIndex = c(1L, 51L), rightIndex = c(50L, 100L),\n value = c(0, 1)))\n\n# fit satisfies the multiscale contraint, i.e.\n# the penalised multiscale statistic is not larger than the used global quantile 1\ncomputeStat(y, signal = stepFit(y, q = 1), output = \"maximum\") <= 1\n\n# different interval system, lengths, penalty, given parameter sd\n# and computed for an increased number of observations nq\ncomputeStat(y, signal = list(leftIndex = c(1L, 51L), rightIndex = c(50L, 100L),\n value = c(0, 1)), nq = 128, sd = 0.5,\n intervalSystem = \"dyaLen\", lengths = c(1L, 2L, 4L, 8L), penalty = \"none\")\n\n# family \"hsmuce\"\ncomputeStat(y, signal = mean(y), family = \"hsmuce\")\n\n# family \"mDependentPS\"\nsignal <- list(leftIndex = c(1L, 13L), rightIndex = c(12L, 17L), value = c(0, -1))\ny <- c(rep(0, 13), rep(-1, 4)) + \n as.numeric(arima.sim(n = 17, list(ar = c(), ma = c(0.8, 0.5, 0.3)), sd = 1))\ncovariances <- as.numeric(ARMAacf(ar = c(), ma = c(0.8, 0.5, 0.3), lag.max = 3))\ncomputeStat(y, signal = signal, family = \"mDependentPS\", covariances = covariances)\n\n## Don't show: \nunlink(R.cache::getCacheRootPath(), force = TRUE, recursive = TRUE)\n\nR.cache::setCacheRootPath(savePathRcache)\n## End(Don't show)\n\n\n"} {"package":"stepR","topic":"contMC","snippet":"### Name: contMC\n### Title: Continuous time Markov chain\n### Aliases: contMC\n### Keywords: nonparametric\n\n### ** Examples\n\n# Simulate filtered ion channel recording with two states\nset.seed(9)\n# sampling rate 10 kHz\nsampling <- 1e4\n# tenfold oversampling\nover <- 10\n# 1 kHz 4-pole Bessel-filter, adjusted for oversampling\ncutoff <- 1e3\ndf <- dfilter(\"bessel\", list(pole=4, cutoff=cutoff / sampling / over))\n# two states, leaving state 1 at 1 Hz, state 2 at 10 Hz\nrates <- rbind(c(0, 1e0), c(1e1, 0))\n# simulate 5 s, level 0 corresponds to state 1, level 1 to state 2\n# noise level is 0.1 after filtering\nsim <- contMC(5 * sampling, 0:1, rates, sampling=sampling, family=\"gaussKern\",\n param = list(df=df, over=over, sd=0.1))\nsim$cont\nplot(sim$data, pch = \".\")\nlines(sim$discr, col = \"red\")\n# noise level after filtering, estimated from first block\nsd(sim$data$y[1:sim$discr$rightIndex[1]])\n# show autocovariance in first block\nacf(ts(sim$data$y[1:sim$discr$rightIndex[1]], freq=sampling), type = \"cov\")\n# power spectrum in first block\ns <- spec.pgram(ts(sim$data$y[1:sim$discr$rightIndex[1]], freq=sampling), spans=c(200,90))\n# cutoff frequency is where power spectrum is halved\nabline(v=cutoff, h=s$spec[1] / 2, lty = 2)\n\n\n"} {"package":"stepR","topic":"critVal","snippet":"### Name: critVal\n### Title: Critical values\n### Aliases: critVal\n### Keywords: nonparametric\n\n### ** Examples\n\n## Don't show: \nsavePathRcache <- R.cache::getCacheRootPath()\n\nR.cache::setCacheRootPath(path = file.path(R.cache::getCacheRootPath(), \"test\"))\n## End(Don't show)\n\n# vector of critical values\nqVector <- critVal(100L, alpha = 0.5)\n# global quantile\nqValue <- critVal(100L, alpha = 0.5, output = \"value\")\n\n# vector can be computed from the global quantile\nidentical(critVal(100L, q = qValue), qVector)\n\n# for a conservative significance level, stronger confidence statements\ncritVal(100L, alpha = 0.05)\ncritVal(100L, alpha = 0.05, output = \"value\")\n\n# higher significance level for larger detection power, but less confidence\ncritVal(100L, alpha = 0.99)\ncritVal(100L, alpha = 0.99, output = \"value\")\n## No test: \n# different parametric family, different intervalSystem, a subset of lengths,\n# different penalty and given weights\nq <- critVal(100L, alpha = 0.05, family = \"hsmuce\", intervalSystem = \"dyaLen\",\n lengths = c(2L, 4L, 16L, 32L), penalty = \"weights\",\n weights = c(0.4, 0.3, 0.2, 0.1))\n\n# vector of critical values can be given by a vector of length n\nvec <- 1:100\nvec[c(2L, 4L, 16L, 32L)] <- q\nattr(vec, \"n\") <- 128L\nidentical(critVal(100L, q = vec, family = \"hsmuce\", intervalSystem = \"dyaLen\",\n lengths = c(2L, 4L, 16L, 32L)), q)\n\n# with a given monte-Carlo simulation for nq = 128 observations\nstat <- monteCarloSimulation(128)\ncritVal(n = 100L, alpha = 0.05, stat = stat)\n\n# the above calls saved and (attempted to) load Monte-Carlo simulations and\n# simulated them for nq = 128 observations\n# in the following call no saving, no loading and simulation for n = 100\n# observations is required, progress of the simulation will be reported\ncritVal(n = 100L, alpha = 0.05, messages = 1000L,\n options = list(simulation = \"vector\", load = list(), save = list()))\n\n# only type \"vector\" will be saved and loaded in the workspace\ncritVal(n = 100L, alpha = 0.05, messages = 1000L,\n options = list(simulation = \"vector\", load = list(workspace = \"vector\"),\n save = list(workspace = \"vector\")))\n\n# simulation of type \"matrix\" will be saved in a RDS file\n# saving of type \"vector\" is disabled by passing \"\",\n# different seed is set and number of simulations is reduced to r = 1e3\n# to allow faster computation at the price of a less precise result\nfile <- tempfile(pattern = \"file\", tmpdir = tempdir(), fileext = \".RDS\")\ncritVal(n = 100L, alpha = 0.05, seed = 1, r = 1e3,\n options = list(simulation = \"matrix\", load = list(),\n save = list(RDSfile = c(\"\", file))))\nidentical(readRDS(file), monteCarloSimulation(100L, seed = 1, r = 1e3))\n## End(No test)\n\n## Don't show: \nunlink(R.cache::getCacheRootPath(), force = TRUE, recursive = TRUE)\n\nR.cache::setCacheRootPath(savePathRcache)\n## End(Don't show)\n\n\n"} {"package":"stepR","topic":"dfilter","snippet":"### Name: dfilter\n### Title: Digital filters\n### Aliases: dfilter print.dfilter\n### Keywords: ts\n\n### ** Examples\n\n# 6-pole Bessel filter with cut-off frequency 1 / 100, with length 100 (too short!)\ndfilter(\"bessel\", list(pole = 6, cutoff = 1 / 100), 100)\n# custom filter: running mean of length 3\ndfilter(\"custom\", rep(1, 3))\ndfilter(\"custom\", rep(1, 3))$kern # normalised!\ndfilter(\"custom\", rep(1, 3))$step\n# Gaussian filter with bandwidth 3 and length 11 (from -5 to 5)\ndfilter(\"gauss\", 3, 11)\n\n\n"} {"package":"stepR","topic":"family","snippet":"### Name: family\n### Title: Family of distributions\n### Aliases: family\n### Keywords: distribution\n\n### ** Examples\n\n# illustrating different families fitted to the same binomial data set\nsize <- 200\nn <- 200\n# truth\np <- 10^seq(-3, -0.1, length = n)\n# data\ny <- rbinom(n, size, p)\nplot(y)\nlines(size * p, col = \"red\")\n# fit 4 jumps, binomial family\njumps <- 4\nbfit <- steppath(y, family = \"binomial\", param = size, max.blocks = jumps)\nlines(bfit[[jumps]], col = \"orange\")\n# Gaussian approximation with estimated variance\ngfit <- steppath(y, family = \"gauss\", max.blocks = jumps)\nlines(gfit[[jumps]], col = \"green3\", lty = 2)\n# Poisson approximation\npfit <- steppath(y, family = \"poisson\", max.blocks = jumps)\nlines(pfit[[jumps]], col = \"blue\", lty = 2)\nlegend(\"topleft\", legend = c(\"binomial\", \"gauss\", \"poisson\"), lwd = 2,\n col = c(\"orange\", \"green3\", \"blue\"))\n\n\n"} {"package":"stepR","topic":"intervalSystem","snippet":"### Name: intervalSystem\n### Title: Interval systems\n### Aliases: intervalSystem intervalsystem\n\n### ** Examples\n\n## Don't show: \nsavePathRcache <- R.cache::getCacheRootPath()\n\nR.cache::setCacheRootPath(path = file.path(R.cache::getCacheRootPath(), \"test\"))\n## End(Don't show)\n\ny <- c(rnorm(50), rnorm(50, 2))\n\n# interval system of all intervals and all lengths\nfit <- stepFit(y, alpha = 0.5, intervalSystem = \"all\", lengths = 1:100,\n jumpint = TRUE, confband = TRUE)\n\n# default for family \"gauss\" if number of observations is 1000 or less\nidentical(stepFit(y, alpha = 0.5, jumpint = TRUE, confband = TRUE), fit)\n\n# intervalSystem \"dyaLen\" and a subset of lengths\n## No test: \n!identical(stepFit(y, alpha = 0.5, intervalSystem = \"dyaLen\", lengths = c(2, 4, 16),\n jumpint = TRUE, confband = TRUE), fit)\n\n# default for lengths are all possible lengths of the interval system\n# and the parametric family\nidentical(stepFit(y, alpha = 0.5, intervalSystem = \"dyaPar\",\n jumpint = TRUE, confband = TRUE),\n stepFit(y, alpha = 0.5, intervalSystem = \"dyaPar\", lengths = 2^(0:6),\n jumpint = TRUE, confband = TRUE))\n\n# interval system \"dyaPar\" is default for parametric family \"hsmuce\"\n# length 1 is not possible for this parametric family\nidentical(stepFit(y, alpha = 0.5, family = \"hsmuce\",\n jumpint = TRUE, confband = TRUE),\n stepFit(y, alpha = 0.5, family = \"hsmuce\", intervalSystem = \"dyaPar\",\n lengths = 2^(1:6), jumpint = TRUE, confband = TRUE))\n\n# interval system \"dyaLen\" is default for parametric family \"mDependentPS\"\nidentical(stepFit(y, alpha = 0.5, family = \"mDependentPS\", covariances = c(1, 0.5),\n jumpint = TRUE, confband = TRUE),\n stepFit(y, alpha = 0.5, family = \"mDependentPS\", covariances = c(1, 0.5),\n intervalSystem = \"dyaLen\", lengths = 2^(0:6),\n jumpint = TRUE, confband = TRUE))\n## End(No test)\n\n## Don't show: \nunlink(R.cache::getCacheRootPath(), force = TRUE, recursive = TRUE)\n\nR.cache::setCacheRootPath(savePathRcache)\n## End(Don't show)\n\n\n"} {"package":"stepR","topic":"jsmurf","snippet":"### Name: jsmurf\n### Title: Reconstruct filtered piecewise constant functions with noise\n### Aliases: jsmurf\n### Keywords: nonparametric\n\n### ** Examples\n\n# simulate filtered ion channel recording with two states\nset.seed(9)\n# sampling rate 10 kHz\nsampling <- 1e4\n# tenfold oversampling\nover <- 10\n# 1 kHz 4-pole Bessel-filter, adjusted for oversampling\ncutoff <- 1e3\ndf.over <- dfilter(\"bessel\", list(pole=4, cutoff=cutoff / sampling / over))\n# two states, leaving state 1 at 10 Hz, state 2 at 20 Hz\nrates <- rbind(c(0, 10), c(20, 0))\n# simulate 0.5 s, level 0 corresponds to state 1, level 1 to state 2\n# noise level is 0.3 after filtering\nsim <- contMC(0.5 * sampling, 0:1, rates, sampling=sampling, family=\"gaussKern\",\n param = list(df=df.over, over=over, sd=0.3))\nplot(sim$data, pch = \".\")\nlines(sim$discr, col = \"red\")\n# fit using filter corresponding to sample rate\ndf <- dfilter(\"bessel\", list(pole=4, cutoff=cutoff / sampling))\nfit <- jsmurf(sim$data$y, sim$data$x, param=df, r=1e2)\nlines(fit, col = \"blue\")\n# fitted values take filter into account\nlines(sim$data$x, fitted(fit), col = \"green3\", lty = 2)\n\n\n"} {"package":"stepR","topic":"jumpint","snippet":"### Name: jumpint\n### Title: Confidence intervals for jumps and confidence bands for step\n### functions\n### Aliases: jumpint jumpint.stepfit points.jumpint confband\n### confband.stepfit lines.confband\n### Keywords: nonparametric\n\n### ** Examples\n\n# simulate Bernoulli data with four blocks\ny <- rbinom(200, 1, rep(c(0.1, 0.7, 0.3, 0.9), each=50))\n# fit step function\nsb <- stepbound(y, family=\"binomial\", param=1, confband=TRUE)\nplot(y, pch=\"|\")\nlines(sb)\n# confidence intervals for jumps\njumpint(sb)\npoints(jumpint(sb), col=\"blue\")\n# confidence band\nconfband(sb)\nlines(confband(sb), lty=2, col=\"blue\")\n\n\n"} {"package":"stepR","topic":"monteCarloSimulation","snippet":"### Name: monteCarloSimulation\n### Title: Monte Carlo simulation\n### Aliases: monteCarloSimulation\n### Keywords: nonparametric\n\n### ** Examples\n\n## Don't show: \nsavePathRcache <- R.cache::getCacheRootPath()\n\nR.cache::setCacheRootPath(path = file.path(R.cache::getCacheRootPath(), \"test\"))\n## End(Don't show)\n\n# monteCarloSimulation will be called in critVal, can be called explicitly\n# object of class MCSimulationVector\nstat <- monteCarloSimulation(n = 100L)\n## No test: \nidentical(critVal(n = 100L, alpha = 0.5, stat = stat),\n critVal(n = 100L, alpha = 0.5,\n options = list(load = list(), simulation = \"matrix\")))\n\n# object of class MCSimulationMaximum\nstat <- monteCarloSimulation(n = 100L, output = \"maximum\")\nidentical(critVal(n = 100L, alpha = 0.5, stat = stat),\n critVal(n = 100L, alpha = 0.5,\n options = list(load = list(), simulation = \"vector\")))\n\n# different interval system, lengths and penalty\nmonteCarloSimulation(n = 100L, output = \"maximum\", intervalSystem = \"dyaLen\",\n lengths = c(1L, 2L, 4L, 8L), penalty = \"log\")\n\n# with a different number of iterations, different seed,\n# reported progress and user written rand.gen function\nstat <- monteCarloSimulation(n = 100L, r = 1e3, seed = 1, messages = 100,\n rand.gen = function(data) {rnorm(100)})\n\n# the optional argument sd of parametric family \"gauss\" will be replaced by 1\nidentical(monteCarloSimulation(n = 100L, r = 1e3, sd = 5),\n monteCarloSimulation(n = 100L, r = 1e3, sd = 1))\n\n# simulation for family \"hsmuce\"\nmonteCarloSimulation(n = 100L, family = \"hsmuce\")\n\n# simulation for family \"mDependentGauss\"\n# covariances must be given (can also be given by correlations or filter)\nstat <- monteCarloSimulation(n = 100L, family = \"mDependentPS\",\n covariances = c(1, 0.5, 0.3))\n\n# variance will be standardized to 1\n# output might be on some systems even identical\nall.equal(monteCarloSimulation(n = 100L, family = \"mDependentPS\",\n covariances = c(2, 1, 0.6)), stat)\n## End(No test)\n## Don't show: \nunlink(R.cache::getCacheRootPath(), force = TRUE, recursive = TRUE)\n\nR.cache::setCacheRootPath(savePathRcache)\n## End(Don't show)\n\n\n"} {"package":"stepR","topic":"neighbours","snippet":"### Name: neighbours\n### Title: Neighbouring integers\n### Aliases: neighbours neighbors\n### Keywords: nonparametric\n\n### ** Examples\n\nneighbours(c(10, 0, 5), r = 1)\nneighbours(c(10, 0, 5), 0:15, r = 1)\n\n\n"} {"package":"stepR","topic":"parametricFamily","snippet":"### Name: parametricFamily\n### Title: Parametric families\n### Aliases: parametricFamily parametricfamily\n### Keywords: distribution\n\n### ** Examples\n\n## Don't show: \nsavePathRcache <- R.cache::getCacheRootPath()\n\nR.cache::setCacheRootPath(path = file.path(R.cache::getCacheRootPath(), \"test\"))\n## End(Don't show)\n\n# parametric family \"gauss\": independent gaussian errors with constant variance\nset.seed(1)\nx <- seq(1 / 100, 1, 1 / 100)\ny <- c(rnorm(50), rnorm(50, 2))\nplot(x, y, pch = 16, col = \"grey30\", ylim = c(-3, 5))\n\n# computation of SMUCE and its confidence statements\nfit <- stepFit(y, x = x, alpha = 0.5, family = \"gauss\",\n jumpint = TRUE, confband = TRUE)\nlines(fit, lwd = 3, col = \"red\", lty = \"22\")\n\n# confidence intervals for the change-point locations\npoints(jumpint(fit), col = \"red\")\n# confidence band\nlines(confband(fit), lty = \"22\", col = \"darkred\", lwd = 2)\n\n# \"gauss\" is default for family\nidentical(stepFit(y, x = x, alpha = 0.5, jumpint = TRUE, confband = TRUE), fit)\n# missing sd is estimated by sdrobnorm\nidentical(stepFit(y, x = x, alpha = 0.5, family = \"gauss\", sd = sdrobnorm(y),\n jumpint = TRUE, confband = TRUE), fit)\n\n# parametric family \"hsmuce\": independent gaussian errors with also\n# piecewise constant variance\n# estimaton that is robust against variance changes\nset.seed(1)\ny <- c(rnorm(50, 0, 1), rnorm(50, 1, 0.2))\nplot(x, y, pch = 16, col = \"grey30\", ylim = c(-2.5, 2))\n\n# computation of HSMUCE and its confidence statements\nfit <- stepFit(y, x = x, alpha = 0.5, family = \"hsmuce\",\n jumpint = TRUE, confband = TRUE)\nlines(fit, lwd = 3, col = \"red\", lty = \"22\")\n\n# confidence intervals for the change-point locations\npoints(jumpint(fit), col = \"red\")\n# confidence band\nlines(confband(fit), lty = \"22\", col = \"darkred\", lwd = 2)\n\n# for comparison SMUCE\nlines(stepFit(y, x = x, alpha = 0.5, jumpint = TRUE, confband = TRUE),\n lwd = 3, col = \"blue\", lty = \"22\")\n\n\n# parametric family \"mDependentPS\": m dependent observations with known covariances\n# observations are generated from a moving average process\nset.seed(1)\ny <- c(rep(0, 50), rep(2, 50)) +\n as.numeric(arima.sim(n = 100, list(ar = c(), ma = c(0.8, 0.5, 0.3)), sd = 0.5))\ncorrelations <- as.numeric(ARMAacf(ar = c(), ma = c(0.8, 0.5, 0.3), lag.max = 3))\ncovariances <- 0.5^2 * correlations\nplot(x, y, pch = 16, col = \"grey30\", ylim = c(-2, 4))\n\n# computation of SMUCE for dependent observations with given covariances\nfit <- stepFit(y, x = x, alpha = 0.5, family = \"mDependentPS\",\n covariances = covariances, jumpint = TRUE, confband = TRUE)\nlines(fit, lwd = 3, col = \"red\", lty = \"22\")\n\n# confidence intervals for the change-point locations\npoints(jumpint(fit), col = \"red\")\n# confidence band\nlines(confband(fit), lty = \"22\", col = \"darkred\", lwd = 2)\n\n# for comparison SMUCE for independent gaussian errors\nlines(stepFit(y, x = x, alpha = 0.5, jumpint = TRUE, confband = TRUE),\n lwd = 3, col = \"blue\", lty = \"22\")\n\n# covariance structure can also be given by correlations and sd\nidentical(stepFit(y, x = x, alpha = 0.5, family = \"mDependentPS\",\n correlations = correlations, sd = 0.5,\n jumpint = TRUE, confband = TRUE), fit)\n\n# if sd is missing it will be estimated by sdrobnorm\nidentical(stepFit(y, x = x, alpha = 0.5,family = \"mDependentPS\",\n correlations = correlations, jumpint = TRUE, confband = TRUE),\n stepFit(y, x = x, alpha = 0.5, family = \"mDependentPS\",\n correlations = correlations,\n sd = sdrobnorm(y, lag = length(correlations)),\n jumpint = TRUE, confband = TRUE))\n \n## Don't show: \nunlink(R.cache::getCacheRootPath(), force = TRUE, recursive = TRUE)\n\nR.cache::setCacheRootPath(savePathRcache)\n## End(Don't show) \n\n\n"} {"package":"stepR","topic":"penalty","snippet":"### Name: penalty\n### Title: Penalties\n### Aliases: penalty penalties\n\n### ** Examples\n\n## Don't show: \nsavePathRcache <- R.cache::getCacheRootPath()\n\nR.cache::setCacheRootPath(path = file.path(R.cache::getCacheRootPath(), \"test\"))\n## End(Don't show)\n\nset.seed(1)\ny <- c(rnorm(50), rnorm(50, 2))\n\n# penalty \"sqrt\"\nfit <- stepFit(y, alpha = 0.5, penalty = \"sqrt\", jumpint = TRUE, confband = TRUE)\n\n# default for family \"gauss\"\nidentical(stepFit(y, alpha = 0.5, jumpint = TRUE, confband = TRUE), fit)\n\n# penalty \"weights\"\n!identical(stepFit(y, alpha = 0.5, penalty = \"weights\",\n jumpint = TRUE, confband = TRUE), fit)\n\n# penalty \"weights\" is default for parametric family \"hsmuce\"\n# by default equal weights are chosen\nidentical(stepFit(y, alpha = 0.5, family = \"hsmuce\",\n jumpint = TRUE, confband = TRUE),\n stepFit(y, alpha = 0.5, family = \"hsmuce\", penalty = \"weights\",\n weights = rep(1 / 6, 6), jumpint = TRUE, confband = TRUE))\n\n# different weights\n!identical(stepFit(y, alpha = 0.5, family = \"hsmuce\", weights = 6:1 / sum(6:1),\n jumpint = TRUE, confband = TRUE),\n stepFit(y, alpha = 0.5, family = \"hsmuce\", penalty = \"weights\",\n weights = rep(1 / 6, 6), jumpint = TRUE, confband = TRUE))\n\n# penalty \"sqrt is default for parametric family \"mDependentPS\"\nidentical(stepFit(y, alpha = 0.5, family = \"mDependentPS\", covariances = c(1, 0.5),\n jumpint = TRUE, confband = TRUE),\n stepFit(y, alpha = 0.5, family = \"mDependentPS\", covariances = c(1, 0.5),\n penalty = \"sqrt\", jumpint = TRUE, confband = TRUE))\n\n## Don't show: \nunlink(R.cache::getCacheRootPath(), force = TRUE, recursive = TRUE)\n\nR.cache::setCacheRootPath(savePathRcache)\n## End(Don't show)\n\n\n"} {"package":"stepR","topic":"sdrobnorm","snippet":"### Name: sdrobnorm\n### Title: Robust standard deviation estimate\n### Aliases: sdrobnorm\n### Keywords: nonparametric\n\n### ** Examples\n\n# simulate data sample\ny <- rnorm(100, c(rep(1, 50), rep(10, 50)), 2)\n# estimate standard deviation\nsdrobnorm(y)\n\n\n"} {"package":"stepR","topic":"smuceR","snippet":"### Name: smuceR\n### Title: Piecewise constant regression with SMUCE\n### Aliases: smuceR thresh.smuceR\n### Keywords: nonparametric\n\n### ** Examples\n\ny <- rnorm(100, c(rep(0, 50), rep(1, 50)), 0.5)\n\n# fitted function, confidence intervals, and confidence band by stepFit\nall.equal(fitted(smuceR(y, q = 1)), fitted(stepFit(y, q = 1)))\nall.equal(fitted(smuceR(y, alpha = 0.5)),\n fitted(stepFit(y, q = as.numeric(quantile(stepR::MRC.1000, 0.5)))))\nall.equal(fitted(smuceR(y)), fitted(stepFit(y, q = thresh.smuceR(length(y)))))\n\nall.equal(jumpint(smuceR(y, q = 1, jumpint = TRUE)),\n jumpint(stepFit(y, q = 1, jumpint = TRUE)))\nall.equal(confband(smuceR(y, q = 1, confband = TRUE)),\n confband(stepFit(y, q = 1, confband = TRUE)),\n check.attributes = FALSE)\n \n\n# simulate poisson data with two levels\ny <- rpois(100, c(rep(1, 50), rep(4, 50)))\n# compute fit, q is chosen automatically\nfit <- smuceR(y, family=\"poisson\", confband = TRUE)\n# plot result\nplot(y)\nlines(fit)\n# plot confidence intervals for jumps on axis\npoints(jumpint(fit), col=\"blue\")\n# confidence band\nlines(confband(fit), lty=2, col=\"blue\")\n\n# simulate binomial data with two levels\ny <- rbinom(200,3,rep(c(0.1,0.7),c(110,90)))\n# compute fit, q is the 0.9-quantile of the (asymptotic) null distribution\nfit <- smuceR(y, alpha=0.1, family=\"binomial\", param=3, confband = TRUE)\n# plot result\nplot(y)\nlines(fit)\n# plot confidence intervals for jumps on axis\npoints(jumpint(fit), col=\"blue\")\n# confidence band\nlines(confband(fit), lty=2, col=\"blue\")\n\n\n"} {"package":"stepR","topic":"stepFit","snippet":"### Name: stepFit\n### Title: Piecewise constant multiscale inference\n### Aliases: stepFit\n### Keywords: nonparametric\n\n### ** Examples\n\n## Don't show: \nsavePathRcache <- R.cache::getCacheRootPath()\n\nR.cache::setCacheRootPath(path = file.path(R.cache::getCacheRootPath(), \"test\"))\n## End(Don't show)\n\n# generate random observations\ny <- c(rnorm(50), rnorm(50, 1))\nx <- seq(0.01, 1, 0.01)\nplot(x, y, pch = 16, col = \"grey30\", ylim = c(-3, 4))\n\n# computation of SMUCE and its confidence statements\nfit <- stepFit(y, x = x, alpha = 0.5, jumpint = TRUE, confband = TRUE)\nlines(fit, lwd = 3, col = \"red\", lty = \"22\")\n\n# confidence intervals for the change-point locations\npoints(jumpint(fit), col = \"red\")\n# confidence band\nlines(confband(fit), lty = \"22\", col = \"darkred\", lwd = 2)\n\n# higher significance level for larger detection power, but less confidence\nstepFit(y, x = x, alpha = 0.99, jumpint = TRUE, confband = TRUE)\n\n# smaller significance level for the small risk that the number of\n# change-points is overestimated with probability not more than 5%,\n# but smaller detection power\nstepFit(y, x = x, alpha = 0.05, jumpint = TRUE, confband = TRUE)\n## No test: \n# different interval system, lengths, penalty and given parameter sd\nstepFit(y, x = x, alpha = 0.5, intervalSystem = \"dyaLen\",\n lengths = c(1L, 2L, 4L, 8L), penalty = \"weights\",\n weights = c(0.4, 0.3, 0.2, 0.1), sd = 0.5,\n jumpint = TRUE, confband = TRUE)\n## End(No test) \n# with given q\nidentical(stepFit(y, x = x, q = critVal(100L, alpha = 0.5),\n jumpint = TRUE, confband = TRUE), fit)\nidentical(stepFit(y, x = x, q = critVal(100L, alpha = 0.5, output = \"value\"),\n jumpint = TRUE, confband = TRUE), fit)\n## No test: \n# the above calls saved and (attempted to) load Monte-Carlo simulations and\n# simulated them for nq = 128 observations\n# in the following call no saving, no loading and simulation for n = 100\n# observations is required, progress of the simulation will be reported\nstepFit(y, x = x, alpha = 0.5, jumpint = TRUE, confband = TRUE,\n messages = 1000L, options = list(simulation = \"vector\",\n load = list(), save = list()))\n\n# with given stat to compute q\nstat <- monteCarloSimulation(n = 128L)\nidentical(stepFit(y, x = x, alpha = 0.5, stat = stat,\n jumpint = TRUE, confband = TRUE),\n stepFit(y, x = x, alpha = 0.5, jumpint = TRUE, confband = TRUE,\n options = list(load = list())))\n## End(No test)\n\n## Don't show: \nunlink(R.cache::getCacheRootPath(), force = TRUE, recursive = TRUE)\n\nR.cache::setCacheRootPath(savePathRcache)\n## End(Don't show)\n\n\n"} {"package":"stepR","topic":"stepR-package","snippet":"### Name: stepR-package\n### Title: Multiscale Change-Point Inference\n### Aliases: stepR-package stepR\n### Keywords: package nonparametric\n\n### ** Examples\n\n## Don't show: \nsavePathRcache <- R.cache::getCacheRootPath()\n\nR.cache::setCacheRootPath(path = file.path(R.cache::getCacheRootPath(), \"test\"))\n## End(Don't show)\n\n# generate random observations\nset.seed(1)\nn <- 100L\nx <- seq(1 / n, 1, 1 / n)\nmu <- stepfit(cost = 0, family = \"gauss\", value = c(0, 3, 0, -2, 0), param = NULL,\n leftEnd = x[c(1, 21, 26, 71, 81)],\n rightEnd = x[c(20, 25, 70, 80, 100)], x0 = 0,\n leftIndex = c(1, 21, 26, 71, 81),\n rightIndex = c(20, 25, 70, 80, 100))\nsigma0 <- 0.5\nepsilon <- rnorm(n, 0, sigma0)\ny <- fitted(mu) + epsilon\nplot(x, y, pch = 16, col = \"grey30\", ylim = c(-3, 4))\nlines(mu, lwd = 3)\n\n# computation of SMUCE and its confidence statements\nfit <- stepFit(y, x = x, alpha = 0.5, jumpint = TRUE, confband = TRUE)\nlines(fit, lwd = 3, col = \"red\", lty = \"22\")\n\n# confidence intervals for the change-point locations\npoints(jumpint(fit), col = \"red\", lwd = 3)\n# confidence band\nlines(confband(fit), lty = \"22\", col = \"darkred\", lwd = 2)\n\n# higher significance level for larger detection power, but less confidence\n# suggested for screening purposes\nstepFit(y, x = x, alpha = 0.9, jumpint = TRUE, confband = TRUE)\n\n# smaller significance level for the small risk that the number of\n# change-points is overestimated with probability not more than 5%,\n# but smaller detection power\nstepFit(y, x = x, alpha = 0.05, jumpint = TRUE, confband = TRUE)\n## No test: \n# different interval system, lengths, penalty and given parameter sd\nstepFit(y, x = x, alpha = 0.5, intervalSystem = \"dyaLen\",\n lengths = c(1L, 2L, 4L, 8L), penalty = \"weights\",\n weights = c(0.4, 0.3, 0.2, 0.1), sd = sigma0,\n jumpint = TRUE, confband = TRUE)\n\n# the above calls saved and (attempted to) load Monte-Carlo simulations and\n# simulated them for nq = 128 observations\n# in the following call no saving, no loading and simulation for n = 100\n# observations is required, progress of the simulation will be reported\nstepFit(y, x = x, alpha = 0.5, jumpint = TRUE, confband = TRUE, messages = 1000L,\n options = list(simulation = \"vector\", load = list(), save = list()))\n## End(No test)\n# critVal was called in stepFit, can be called explicitly,\n# for instance outside of a for loop to save computation time\nqVector <- critVal(100L, alpha = 0.5)\nidentical(stepFit(y, x = x, q = qVector, jumpint = TRUE, confband = TRUE), fit)\n\nqValue <- critVal(100L, alpha = 0.5, output = \"value\")\nidentical(stepFit(y, x = x, q = qValue, jumpint = TRUE, confband = TRUE), fit)\n\n# computeBounds gives the multiscale contraint\ncomputeBounds(y, alpha = 0.5)\n## No test: \n# monteCarloSimulation will be called in critVal if required\n# can be called explicitly\nstat <- monteCarloSimulation(n = 100L)\nidentical(critVal(n = 100L, alpha = 0.5, stat = stat),\n critVal(n = 100L, alpha = 0.5,\n options = list(load = list(), simulation = \"vector\")))\nidentical(critVal(n = 100L, alpha = 0.5, stat = stat, output = \"value\"),\n critVal(n = 100L, alpha = 0.5, output = \"value\",\n options = list(load = list(), simulation = \"vector\")))\n\nstat <- monteCarloSimulation(n = 100L, output = \"maximum\")\nidentical(critVal(n = 100L, alpha = 0.5, stat = stat),\n critVal(n = 100L, alpha = 0.5,\n options = list(load = list(), simulation = \"vector\")))\nidentical(critVal(n = 100L, alpha = 0.5, stat = stat, output = \"value\"),\n critVal(n = 100L, alpha = 0.5, output = \"value\",\n options = list(load = list(), simulation = \"vector\")))\n## End(No test) \n# fit satisfies the multiscale contraint, i.e.\n# the computed penalized multiscale statistic is not larger than the global quantile\ncomputeStat(y, signal = fit, output = \"maximum\") <= qValue\n# multiscale vector of statistics is componentwise not larger than \n# the vector of critical values\nall(computeStat(y, signal = fit, output = \"vector\") <= qVector)\n\n## No test: \n# family \"hsmuce\"\nset.seed(1)\ny <- c(rnorm(50, 0, 1), rnorm(50, 1, 0.2))\nplot(x, y, pch = 16, col = \"grey30\", ylim = c(-2.5, 2))\n\n# computation of HSMUCE and its confidence statements\nfit <- stepFit(y, x = x, alpha = 0.5, family = \"hsmuce\",\n jumpint = TRUE, confband = TRUE)\nlines(fit, lwd = 3, col = \"red\", lty = \"22\")\n\n# confidence intervals for the change-point locations\npoints(jumpint(fit), col = \"red\", lwd = 3)\n# confidence band\nlines(confband(fit), lty = \"22\", col = \"darkred\", lwd = 2)\n\n# for comparison SMUCE, not recommend to use here\nlines(stepFit(y, x = x, alpha = 0.5,\n jumpint = TRUE, confband = TRUE),\n lwd = 3, col = \"blue\", lty = \"22\")\n\n\n# family \"mDependentPS\"\n# generate observations from a moving average process\nset.seed(1)\ny <- c(rep(0, 50), rep(2, 50)) +\n as.numeric(arima.sim(n = 100, list(ar = c(), ma = c(0.8, 0.5, 0.3)), sd = sigma0))\ncorrelations <- as.numeric(ARMAacf(ar = c(), ma = c(0.8, 0.5, 0.3), lag.max = 3))\ncovariances <- sigma0^2 * correlations\nplot(x, y, pch = 16, col = \"grey30\", ylim = c(-2, 4))\n\n# computation of SMUCE for dependent observations with given covariances\nfit <- stepFit(y, x = x, alpha = 0.5, family = \"mDependentPS\",\n covariances = covariances, jumpint = TRUE, confband = TRUE)\nlines(fit, lwd = 3, col = \"red\", lty = \"22\")\n\n# confidence intervals for the change-point locations\npoints(jumpint(fit), col = \"red\", lwd = 3)\n# confidence band\nlines(confband(fit), lty = \"22\", col = \"darkred\", lwd = 2)\n\n# for comparison SMUCE for independent observations, not recommend to use here\nlines(stepFit(y, x = x, alpha = 0.5,\n jumpint = TRUE, confband = TRUE),\n lwd = 3, col = \"blue\", lty = \"22\")\n\n# with given correlations, standard deviation will be estimated by sdrobnorm\nstepFit(y, x = x, alpha = 0.5, family = \"mDependentPS\",\n correlations = correlations, jumpint = TRUE, confband = TRUE)\n \n \n# examples from version 1.0-0\n# estimating step-functions with Gaussian white noise added\n# simulate a Gaussian hidden Markov model of length 1000 with 2 states\n# with identical transition rates 0.01, and signal-to-noise ratio 2\nsim <- contMC(1e3, 0:1, matrix(c(0, 0.01, 0.01, 0), 2), param=1/2)\nplot(sim$data, cex = 0.1)\nlines(sim$cont, col=\"red\")\n# maximum-likelihood estimation under multiresolution constraints\nfit.MRC <- smuceR(sim$data$y, sim$data$x)\nlines(fit.MRC, col=\"blue\")\n# choose number of jumps using BIC\npath <- steppath(sim$data$y, sim$data$x, max.blocks=1e2)\nfit.BIC <- path[[stepsel.BIC(path)]]\nlines(fit.BIC, col=\"green3\", lty = 2)\n\n# estimate after filtering\n# simulate filtered ion channel recording with two states\nset.seed(9)\n# sampling rate 10 kHz\nsampling <- 1e4\n# tenfold oversampling\nover <- 10\n# 1 kHz 4-pole Bessel-filter, adjusted for oversampling\ncutoff <- 1e3\ndf.over <- dfilter(\"bessel\", list(pole=4, cutoff=cutoff / sampling / over))\n# two states, leaving state 1 at 10 Hz, state 2 at 20 Hz\nrates <- rbind(c(0, 10), c(20, 0))\n# simulate 0.5 s, level 0 corresponds to state 1, level 1 to state 2\n# noise level is 0.3 after filtering\nSim <- contMC(0.5 * sampling, 0:1, rates, sampling=sampling, family=\"gaussKern\",\n param = list(df=df.over, over=over, sd=0.3))\nplot(Sim$data, pch = \".\")\nlines(Sim$discr, col = \"red\")\n# fit under multiresolution constraints using filter corresponding to sample rate\ndf <- dfilter(\"bessel\", list(pole=4, cutoff=cutoff / sampling))\nFit.MRC <- jsmurf(Sim$data$y, Sim$data$x, param=df, r=1e2)\nlines(Fit.MRC, col = \"blue\")\n# fit using TRANSIT\nFit.trans <- transit(Sim$data$y, Sim$data$x)\nlines(Fit.trans, col = \"green3\", lty=2)\n## End(No test)\n\n## Don't show: \nunlink(R.cache::getCacheRootPath(), force = TRUE, recursive = TRUE)\n\nR.cache::setCacheRootPath(savePathRcache)\n## End(Don't show)\n\n\n"} {"package":"stepR","topic":"stepblock","snippet":"### Name: stepblock\n### Title: Step function\n### Aliases: stepblock [.stepblock print.stepblock plot.stepblock\n### lines.stepblock\n### Keywords: nonparametric\n\n### ** Examples\n\n# step function consisting of 3 blocks: 1 on (0, 3]; 2 on (3, 6], 0 on (6, 8]\n# sampled on the integers 1:10\nf <- stepblock(value = c(1, 2, 0), rightEnd = c(3, 6, 8))\nf\n# show different plot types\nplot(f, type = \"C\")\nlines(f, type = \"E\", lty = 2, col = \"red\")\nlines(f, type = \"B\", lty = 3, col = \"blue\")\nlegend(\"bottomleft\", legend = c(\"C\", \"E\", \"B\"), lty = 1:3, col = c(\"black\", \"red\", \"blue\"))\n\n\n"} {"package":"stepR","topic":"stepbound","snippet":"### Name: stepbound\n### Title: Jump estimation under restrictions\n### Aliases: stepbound stepbound.default stepbound.stepcand\n### Keywords: nonparametric\n\n### ** Examples\n\n# simulate poisson data with two levels\ny <- rpois(100, c(rep(1, 50), rep(4, 50)))\n# compute bounds\nb <- bounds(y, penalty=\"len\", family=\"poisson\", q=4)\n# fit step function to bounds\nsb <- stepbound(y, b, family=\"poisson\", confband=TRUE)\nplot(y)\nlines(sb)\n# plot confidence intervals for jumps on axis\npoints(jumpint(sb), col=\"blue\")\n# confidence band\nlines(confband(sb), lty=2, col=\"blue\")\n\n\n"} {"package":"stepR","topic":"stepcand","snippet":"### Name: stepcand\n### Title: Forward selection of candidate jumps\n### Aliases: stepcand\n### Keywords: nonparametric\n\n### ** Examples\n\n# simulate 5 blocks (4 jumps) within a total of 100 data points\nb <- c(sort(sample(1:99, 4)), 100)\nf <- rep(rnorm(5, 0, 4), c(b[1], diff(b)))\nrbind(b = b, f = unique(f), lambda = exp(unique(f) / 10) * 20)\n# add gaussian noise\nx <- f + rnorm(100)\n# find 10 candidate jumps\nstepcand(x, max.cand = 10)\n# for poisson observations\ny <- rpois(100, exp(f / 10) * 20)\n# find 10 candidate jumps\nstepcand(y, max.cand = 10, family = \"poisson\")\n# for binomial observations\nsize <- 10\nz <- rbinom(100, size, pnorm(f / 10))\n# find 10 candidate jumps\nstepcand(z, max.cand = 10, family = \"binomial\", param = size)\n\n\n"} {"package":"stepR","topic":"[.stepfit","snippet":"### Name: stepfit\n### Title: Fitted step function\n### Aliases: [.stepfit stepfit print.stepfit plot.stepfit lines.stepfit\n### fitted.stepfit residuals.stepfit logLik.stepfit\n### Keywords: nonparametric\n\n### ** Examples\n\n# simulate 5 blocks (4 jumps) within a total of 100 data points\nb <- c(sort(sample(1:99, 4)), 100)\np <- rep(runif(5), c(b[1], diff(b))) # success probabilities\n# binomial observations, each with 10 trials\ny <- rbinom(100, 10, p)\n# find solution with 5 blocks\nfit <- steppath(y, family = \"binomial\", param = 10)[[5]]\nplot(y, ylim = c(0, 10))\nlines(fit, col = \"red\")\n# residual diagnostics for Gaussian data\nyg <- rnorm(100, qnorm(p), 1)\nfitg <- steppath(yg)[[5]]\nplot(yg, ylim = c(0, 10))\nlines(fitg, col = \"red\")\nplot(resid(fitg, yg))\nqqnorm(resid(fitg, yg))\n\n\n"} {"package":"stepR","topic":"steppath","snippet":"### Name: steppath\n### Title: Solution path of step-functions\n### Aliases: steppath steppath.default steppath.stepcand [[.steppath\n### length.steppath print.steppath logLik.steppath\n### Keywords: nonparametric\n\n### ** Examples\n\n# simulate 5 blocks (4 jumps) within a total of 100 data points\nb <- c(sort(sample(1:99, 4)), 100)\nf <- rep(rnorm(5, 0, 4), c(b[1], diff(b)))\n# add Gaussian noise\nx <- f + rnorm(100)\n# find 10 candidate jumps\ncand <- stepcand(x, max.cand = 10)\ncand\n# compute solution path\npath <- steppath(cand)\npath\nplot(x)\nlines(path[[5]], col = \"red\")\n# compare result having 5 blocks with truth\nfit <- path[[5]]\nfit\nlogLik(fit)\nAIC(logLik(fit))\ncbind(fit, trueRightEnd = b, trueLevel = unique(f))\n# for poisson observations\ny <- rpois(100, exp(f / 10) * 20)\n# compute solution path, compare result having 5 blocks with truth\ncbind(steppath(y, max.cand = 10, family = \"poisson\")[[5]],\n trueRightEnd = b, trueIntensity = exp(unique(f) / 10) * 20)\n# for binomial observations\nsize <- 10\nz <- rbinom(100, size, pnorm(f / 10))\n# compute solution path, compare result having 5 blocks with truth\ncbind(steppath(z, max.cand = 10, family = \"binomial\", param = size)[[5]],\n trueRightEnd = b, trueIntensity = pnorm(unique(f) / 10))\n# an example where stepcand is not optimal but indices found are close to optimal ones\nblocks <- c(rep(0, 9), 1, 3, rep(1, 9))\nblocks\nstepcand(blocks, max.cand = 3)[,c(\"rightEnd\", \"value\", \"number\")]\n# erroneously puts the \"1\" into the right block in the first step\nsteppath(blocks)[[3]][,c(\"rightEnd\", \"value\")]\n# putting the \"1\" in the middle block is optimal\nsteppath(blocks, max.cand = 3, cand.radius = 1)[[3]][,c(\"rightEnd\", \"value\")]\n# also looking in the 1-neighbourhood remedies the problem\n\n\n"} {"package":"stepR","topic":"stepsel","snippet":"### Name: stepsel\n### Title: Automatic selection of number of jumps\n### Aliases: stepsel stepsel.MRC stepsel.AIC stepsel.BIC\n### Keywords: nonparametric\n\n### ** Examples\n\n# simulate 5 blocks (4 jumps) within a total of 100 data points\nb <- c(sort(sample(1:99, 4)), 100)\nf <- rep(rnorm(5, 0, 4), c(b[1], diff(b)))\nrbind(b = b, f = unique(f))\n# add gaussian noise\ny <- f + rnorm(100)\n# find 10 candidate jumps\npath <- steppath(y, max.cand = 10)\n# select number of jumps by simulated MRC with sqrt-penalty\n# thresholded with positive delta, and by BIC\nsel.MRC <- stepsel(path, y, \"MRC\", alpha = 0.05, r = 1e2, penalty = \"sqrt\")\nsel.MRC\ndelta <- .1\nsel.delta <- stepsel(path, y, \"MRC\",\n q = (1 + delta) * sdrobnorm(y) * sqrt(2 * length(y)), penalty = \"none\")\nsel.delta\nsel.BIC <- stepsel(path, type=\"BIC\")\nsel.BIC\n# compare results with truth\nfit.MRC <- path[[sel.MRC]]\nas.data.frame(fit.MRC)\nas.data.frame(path[[sel.delta]])\nas.data.frame(path[[sel.BIC]])\n\n\n"} {"package":"stepR","topic":"transit","snippet":"### Name: transit\n### Title: TRANSIT algorithm for detecting jumps\n### Aliases: transit\n### Keywords: nonparametric\n\n### ** Examples\n\n# estimating step-functions with Gaussian white noise added\n# simulate a Gaussian hidden Markov model of length 1000 with 2 states\n# with identical transition rates 0.01, and signal-to-noise ratio 2\nsim <- contMC(1e3, 0:1, matrix(c(0, 0.01, 0.01, 0), 2), param=1/2)\nplot(sim$data, cex = 0.1)\nlines(sim$cont, col=\"red\")\n# maximum-likelihood estimation under multiresolution constraints\nfit.MRC <- smuceR(sim$data$y, sim$data$x)\nlines(fit.MRC, col=\"blue\")\n# choose number of jumps using BIC\npath <- steppath(sim$data$y, sim$data$x, max.blocks=1e2)\nfit.BIC <- path[[stepsel.BIC(path)]]\nlines(fit.BIC, col=\"green3\", lty = 2)\n\n# estimate after filtering\n# simulate filtered ion channel recording with two states\nset.seed(9)\n# sampling rate 10 kHz\nsampling <- 1e4\n# tenfold oversampling\nover <- 10\n# 1 kHz 4-pole Bessel-filter, adjusted for oversampling\ncutoff <- 1e3\ndf.over <- dfilter(\"bessel\", list(pole=4, cutoff=cutoff / sampling / over))\n# two states, leaving state 1 at 10 Hz, state 2 at 20 Hz\nrates <- rbind(c(0, 10), c(20, 0))\n# simulate 0.5 s, level 0 corresponds to state 1, level 1 to state 2\n# noise level is 0.3 after filtering\nSim <- contMC(0.5 * sampling, 0:1, rates, sampling=sampling, family=\"gaussKern\",\n param = list(df=df.over, over=over, sd=0.3))\nplot(Sim$data, pch = \".\")\nlines(Sim$discr, col = \"red\")\n# fit under multiresolution constraints using filter corresponding to sample rate\ndf <- dfilter(\"bessel\", list(pole=4, cutoff=cutoff / sampling))\nFit.MRC <- jsmurf(Sim$data$y, Sim$data$x, param=df, r=1e2)\nlines(Fit.MRC, col = \"blue\")\n# fit using TRANSIT\nFit.trans <- transit(Sim$data$y, Sim$data$x)\nlines(Fit.trans, col = \"green3\", lty=2)\n\n\n"} {"package":"pmultinom","topic":"invert.pmultinom","snippet":"### Name: invert.pmultinom\n### Title: Calculate the sample size such that the probability of a result\n### is a given amount.\n### Aliases: invert.pmultinom\n\n### ** Examples\n\n# How many cells must be sequenced to have a 95% chance of\n# observing at least 2 from each subclone of a tumor? (Data\n# from Casasent et al (2018); see vignette(\"pmultinom\") for\n# details of this example)\n\n# Input: \nncells <- 204\nsubclone.freqs <- c(43, 20, 82, 17, 5, 37)/ncells\ntarget.number <- c(2, 2, 2, 2, 2, 0)\nlower.bound <- target.number - 1\ninvert.pmultinom(lower=lower.bound, probs=subclone.freqs,\n target.prob=.95, method=\"exact\")\n# Output:\n# [1] 192\n\n\n\n"} {"package":"pmultinom","topic":"pmultinom","snippet":"### Name: pmultinom\n### Title: Calculate the probability that a multnomial random vector is\n### between, elementwise, two other vectors.\n### Aliases: pmultinom\n\n### ** Examples\n\n# To determine the bias of a die, Rudolph Wolf rolled it\n# 20,000 times. Side 2 was the most frequently observed, and\n# was observed 3631 times. What is the probability that a\n# fair die would have a side observed this many times or\n# more?\n\n# Input: \n1 - pmultinom(upper=rep.int(3630, 6), size=20000,\n probs=rep.int(1/6, 6), method=\"exact\")\n# Output:\n# [1] 7.379909e-08\n\n# Therefore we conclude that the die is biased. Fougere\n# (1988) attempted to account for these biases by assuming\n# certain manufacturing errors. Repeating the calculation\n# with the distribution Fougere derived:\n\n# Input:\ntheoretical.dist <- c(.17649, .17542, .15276, .15184, .17227, .17122)\n1 - pmultinom(upper=rep.int(3630, 6), size=20000,\n probs=theoretical.dist, method=\"exact\")\n# Output:\n# [1] 0.043362\n\n# Therefore we conclude that the die still seems more biased\n# than Fougere's model can explain.\n\n\n\n"} {"package":"mixopt","topic":"c_mixopt_list","snippet":"### Name: c_mixopt_list\n### Title: Combines mixopt_list objects\n### Aliases: c_mixopt_list\n\n### ** Examples\n\nc_mixopt_list(NULL, as.mixopt_list(1:5), NULL, as.mixopt_list(letters[1:5]))\nc_mixopt_list(as.mixopt_list(1:3), NULL)\n\n\n"} {"package":"mixopt","topic":"full_index_line_search","snippet":"### Name: full_index_line_search\n### Title: Optimize over array using line search\n### Aliases: full_index_line_search\n\n### ** Examples\n\nfull_index_line_search(function(x) {(x-50)^2}, 3:12, 5)\nfull_index_line_search(function(x) {(x-50)^2}, 3, 1)\nfull_index_line_search(function(x) {(x-50)^2}, 3:4, 1)\nfull_index_line_search(function(x) {(x-50)^2}, 3:5, 1)\nfull_index_line_search(function(x) {(x+50)^2}, 3, 1)\nfull_index_line_search(function(x) {(x+50)^2}, 3:4, 1)\nfull_index_line_search(function(x) {(x+50)^2}, 3:5, 1)\nfull_index_line_search(function(x) {(x-50)^2}, 12:3, 8)\nfull_index_line_search(function(x) {(x-50)^2}, 0:1000, 8)\nfull_index_line_search(function(x) {(x-50)^2}, 0:1000, 999)\nfull_index_line_search(function(x) {sin(x/30)}, 0:1000, 999)\n\n\n"} {"package":"mixopt","topic":"index_line_search","snippet":"### Name: index_line_search\n### Title: Line search over indexed array in one direction\n### Aliases: index_line_search\n\n### ** Examples\n\nindex_line_search(function(x) {(x-100)^2}, 1:290)\nindex_line_search(function(x) {(-x-100)^2}, -(1:290)^.92, plot=\"ind\")\nindex_line_search(function(x) {(-x-100)^2}, -(1:290)^.92, plot=\"x\")\nxx <- sort(runif(1e2, -250, -30))\nindex_line_search(function(x) {(-x-100)^2}, xx, plot=\"ind\")\nindex_line_search(function(x) {(-x-100)^2}, xx, plot=\"x\")\n\n\n"} {"package":"mixopt","topic":"mixopt","snippet":"### Name: mixopt\n### Title: Mixed variable optimization using coordinate descent\n### Aliases: mixopt mixopt_blockcd mixopt_coorddesc mixopt_multistart\n\n### ** Examples\n\n# Simple 1D example\nmixopt_blockcd(par=list(mopar_cts(2,8)), fn=function(x) {(4.5-x[1])^2})\n# With gradient (isn't faster)\nmixopt_blockcd(par=list(mopar_cts(2,8)), fn=function(x) {(4.5-x[1])^2},\n gr=function(x) {-2*(4.5-x[1])})\n\n# 1D discrete ordered\nmixopt_blockcd(par=list(mopar_ordered(100:10000)),\n fn=function(x) {(x[1] - 500.3)^2})\n\n# 2D: one continuous, one factor\nmixopt_blockcd(par=list(mopar_cts(2,8), mopar_unordered(letters[1:6])),\n fn=function(x) {ifelse(x[2] == 'b', -1, 0) +\n (4.5-x[1])^2})\n# Simple 1D example\nmixopt_coorddesc(par=list(mopar_cts(2,8)), fn=function(x) {(4.5-x[1])^2})\n\n# 1D discrete ordered\nmixopt_coorddesc(par=list(mopar_ordered(100:10000)),\n fn=function(x) {(x[1] - 500.3)^2})\n\n# 2D: one continuous, one factor\nmixopt_coorddesc(par=list(mopar_cts(2,8), mopar_unordered(letters[1:6])),\n fn=function(x) {ifelse(x[2] == 'b', -1, 0) +\n (4.5-x[1])^2})\n# 2D\nlibrary(ggplot2)\nlibrary(dplyr)\nf6 <- function(x) {-(-x[1]*.5*sin(.5*x[1])*1 - 1e-2*x[2]^2 +\n .2*x[1] - .3*x[2])}\nContourFunctions::cf_func(f6, xlim=c(0,100), ylim=c(-100,100))\nm6 <- mixopt_coorddesc(par=list(mopar_cts(0,100), mopar_cts(-100,100)),\n fn=f6, track = TRUE)\nplot_track(m6)\nms6 <- mixopt_multistart(par=list(mopar_cts(0,100), mopar_cts(-100,100)),\n fn=f6, track = TRUE)\nplot_track(ms6)\nContourFunctions::cf_func(f6, xlim=c(0,100), ylim=c(-100,100),\n gg = TRUE) +\n geom_point(data=as.data.frame(matrix(unlist(ms6$track$par),\n ncol=2, byrow=TRUE)) %>%\n bind_cols(newbest=ms6$track$newbest),\n aes(V1, V2, color=newbest), alpha=.5)\n\n\n"} {"package":"mixopt","topic":"mopar_cts","snippet":"### Name: mopar_cts\n### Title: Continuous variable\n### Aliases: mopar_cts\n\n### ** Examples\n\nmopar_cts(2,8)\nmopar_cts(2,8,7)\n\n\n"} {"package":"mixopt","topic":"mopar_ordered","snippet":"### Name: mopar_ordered\n### Title: Ordered variable parameter\n### Aliases: mopar_ordered\n\n### ** Examples\n\nmopar_ordered(c(1,3,5))\nmopar_ordered(c('a','c'))\nmopar_ordered(1:4)\nmopar_ordered(4:1)\nmopar_ordered(list('a', 2, 'c', sin))\n\n\n"} {"package":"mixopt","topic":"mopar_unordered","snippet":"### Name: mopar_unordered\n### Title: Unordered factor parameter\n### Aliases: mopar_unordered\n\n### ** Examples\n\nmopar_unordered(c(1,3,9))\nmopar_unordered(letters)\n\n\n"} {"package":"mixopt","topic":"plot_track","snippet":"### Name: plot_track\n### Title: Plot the tracked parameters from an optimization\n### Aliases: plot_track\n\n### ** Examples\n\nf8 <- function(x) {-(x[[1]]+x[[2]]) + .1*(x[[1]] - x[[2]])^2}\nContourFunctions::cf_func(f8, xlim=c(0,100), ylim=c(0,100))\nm8 <- mixopt_coorddesc(par=list(mopar_ordered(0:100), mopar_ordered(0:100)),\n fn=f8, track = TRUE)\nplot_track(m8)\n\nlibrary(ggplot2)\nlibrary(dplyr)\nContourFunctions::cf_func(f8, xlim=c(0,100), ylim=c(0,100),\n gg = TRUE) +\n geom_point(data=as.data.frame(matrix(unlist(m8$track$par),\n ncol=2, byrow=TRUE)) %>%\n bind_cols(newbest=m8$track$newbest),\n aes(V1, V2, color=newbest))\n\n\n"} {"package":"mixopt","topic":"[.mixopt_list","snippet":"### Name: [.mixopt_list\n### Title: Index mixopt_list\n### Aliases: [.mixopt_list\n\n### ** Examples\n\na <- list(1,4,'c', 'g')\nclass(a) <- \"mixopt_list\"\na\na[3]\na[2:3]\na[-(2:3)]\nas.data.frame(a)\n\nb <- as.mixopt_list(c(1,2,3,4,5))\nsum(b)\nb^2\nb+b\nb-b\nb*b\nb/b\nc(b)\nc(b, b)\nc(b, 1)\nc(1, b)\nc(a, b, a)\nc_mixopt_list(0, 1, 2, 3, 4, a, 5, 6, 7, 8, b, 9)\nc_mixopt_list(NULL, 3, NULL, a, NULL, 66666, NULL, b)\n\n\n"} {"package":"mixopt","topic":"verify_par","snippet":"### Name: verify_par\n### Title: Verify parameters\n### Aliases: verify_par\n\n### ** Examples\n\nverify_par(\n list(\n mopar_cts(2, 8, 6)\n )\n)\n\n\n"} {"package":"svDialogstcltk","topic":"dlg_dir.tcltkGUI","snippet":"### Name: dlg_dir.tcltkGUI\n### Title: A Tcl/Tk version of the svDialogs directory selection dialog box\n### Aliases: dlg_dir.tcltkGUI\n### Keywords: misc\n\n### ** Examples\n\nlibrary(svDialogstcltk) # Tcl/Tk dialog boxes are now used by default\n## Not run: \n##D # A quick default directory changer\n##D setwd(dlg_dir(default = getwd())$res)\n## End(Not run)\n\n\n"} {"package":"svDialogstcltk","topic":"dlg_input.tcltkGUI","snippet":"### Name: dlg_input.tcltkGUI\n### Title: A Tcl/Tk version of the svDialogs input a string or value dialog\n### box\n### Aliases: dlg_input.tcltkGUI\n### Keywords: misc\n\n### ** Examples\n\nlibrary(svDialogstcltk) # Tcl/Tk dialog boxes are now used by default\n## Not run: \n##D # Ask something...\n##D user <- dlg_input(\"Who are you?\", Sys.info()[\"user\"])$res\n##D if (!length(user)) {# The user clicked the 'cancel' button\n##D cat(\"OK, you prefer to stay anonymous!\\n\")\n##D } else {\n##D cat(\"Hello\", user, \"\\n\")\n##D }\n## End(Not run)\n\n\n"} {"package":"svDialogstcltk","topic":"dlg_list.tcltkGUI","snippet":"### Name: dlg_list.tcltkGUI\n### Title: A Tcl/Tk version of the svDialogs list selection dialog box\n### Aliases: dlg_list.tcltkGUI\n### Keywords: misc\n\n### ** Examples\n\nlibrary(svDialogstcltk) # Tcl/Tk dialog boxes are now used by default\n## Not run: \n##D # Select one or several months\n##D res <- dlg_list(month.name, multiple = TRUE)$res\n##D if (!length(res)) {\n##D cat(\"You cancelled the choice\\n\")\n##D } else {\n##D cat(\"You selected:\\n\")\n##D print(res)\n##D }\n## End(Not run)\n\n\n"} {"package":"svDialogstcltk","topic":"dlg_message.tcltkGUI","snippet":"### Name: dlg_message.tcltkGUI\n### Title: A Tcl/Tk version of the svDialogs message box\n### Aliases: dlg_message.tcltkGUI\n### Keywords: misc\n\n### ** Examples\n\nlibrary(svDialogstcltk) # Tcl/Tk dialog boxes are now used by default\n## Not run: \n##D # A simple information box\n##D dlg_message(\"Hello world!\")$res\n##D \n##D # Ask to continue\n##D dlg_message(c(\"This is a long task!\", \"Continue?\"), \"okcancel\")$res\n##D \n##D # Ask a question\n##D dlg_message(\"Do you like apples?\", \"yesno\")$res\n##D \n##D # Idem, but one can interrupt too\n##D res <- dlg_message(\"Do you like oranges?\", \"yesnocancel\")$res\n##D if (res == \"cancel\")\n##D cat(\"Ah, ah! You refuse to answer!\\n\")\n## End(Not run)\n\n\n"} {"package":"svDialogstcltk","topic":"dlg_open.tcltkGUI","snippet":"### Name: dlg_open.tcltkGUI\n### Title: A Tcl/Tk version of the svDialogs file open dialog box\n### Aliases: dlg_open.tcltkGUI\n### Keywords: misc\n\n### ** Examples\n\nlibrary(svDialogstcltk) # Tcl/Tk dialog boxes are now used by default\n## Not run: \n##D # Choose one R file\n##D dlg_open(title = \"Select one R file\", filters = dlg_filters[c(\"R\", \"All\"), ])$res\n##D # Choose several files\n##D dlg_open(multiple = TRUE)$res\n## End(Not run)\n\n\n"} {"package":"svDialogstcltk","topic":"dlg_save.tcltkGUI","snippet":"### Name: dlg_save.tcltkGUI\n### Title: A Tcl/Tk version of the svDialogs file save dialog box\n### Aliases: dlg_save.tcltkGUI\n### Keywords: misc\n\n### ** Examples\n\nlibrary(svDialogstcltk) # Tcl/Tk dialog boxes are now used by default\n## Not run: \n##D # Choose one R filename to save some R script into it\n##D dlg_save(title = \"Save R script to\", filters = dlg_filters[c(\"R\", \"All\"), ])$res\n## End(Not run)\n\n\n"} {"package":"OWEA","topic":"design","snippet":"### Name: design\n### Title: Design Generator for Three Models\n### Aliases: design\n\n### ** Examples\n\n# NOTE: max_iter is usually set to 40. \n# Here max_iter = 5 is for demenstration only.\n# crossover dropout model\n## D-optimal\n\nexample1 <- design('dropout',10,0,3,3,drop=c(0,0,0.5), max_iter = 5)\nsummary(example1)\neff(example1) # efficiency from rounding\neffLB(example1) # obtain lower bound of efficiency\n\n## A-optimal\ndesign('dropout',10,1,3,3,drop=c(0,0,0.5), max_iter = 5)\n\n\n# proportional model\n## D-optimal\ndesign('proportional',10,0,3,3, sigma = diag(1,3),tau = matrix(sqrt(1+3),\n nrow=3, ncol=1),lambda = 0.2, max_iter = 5)\n\n## A-optimal\ndesign('proportional',10,1,3,3, sigma = diag(1,3), tau = matrix(sqrt(1+3),\n nrow=3, ncol=1),lambda = 0.2, max_iter = 5)\n\n\n# interference model\n## D-optimal\ndesign('interference',10,0,3,3, sigma = diag(1,3), max_iter = 5)\n\n## A-optimal\ndesign('interference',10,1,3,3, sigma = diag(1,3), max_iter = 5)\n\n\n\n"} {"package":"OWEA","topic":"design_app","snippet":"### Name: design_app\n### Title: Shiny App for 'design' function\n### Aliases: design_app\n\n### ** Examples\n\n \n## Not run: \n##D design_app() # lauching the app.\n## End(Not run)\n\n\n"} {"package":"mixtur","topic":"fit_mixtur","snippet":"### Name: fit_mixtur\n### Title: Fit the mixture model.\n### Aliases: fit_mixtur\n\n### ** Examples\n\n\n# load the example data\ndata <- bays2009_full\n\n# fit the 3-component mixture model ignoring condition\n## No test: \nfit <- fit_mixtur(data = data,\n model = \"3_component\",\n unit = \"radians\",\n id_var = \"id\",\n response_var = \"response\",\n target_var = \"target\",\n non_target_var = \"non_target\",\n set_size_var = \"set_size\",\n condition_var = NULL)\n## End(No test)\n\n\n\n"} {"package":"mixtur","topic":"get_summary_statistics","snippet":"### Name: get_summary_statistics\n### Title: Obtain summary statistics of response error\n### Aliases: get_summary_statistics\n\n### ** Examples\n\n# load an example data frame\ndata(bays2009_full)\n\n# calculate the summary statistics per condition and per set size\nsummary_data <- get_summary_statistics(data = bays2009_full,\n unit = \"radians\",\n condition_var = \"duration\",\n set_size_var = \"set_size\")\n\n\n\n"} {"package":"mixtur","topic":"plot_error","snippet":"### Name: plot_error\n### Title: Plot response error of behavioural data relative to target\n### values.\n### Aliases: plot_error\n\n### ** Examples\n\nplot_error(bays2009_full,\n unit = \"radians\",\n set_size_var = \"set_size\")\n\n\n\n"} {"package":"mixtur","topic":"plot_summary_statistic","snippet":"### Name: plot_summary_statistic\n### Title: Plot summary statistics of behavioural data\n### Aliases: plot_summary_statistic\n\n### ** Examples\n\nplot_summary_statistic(bays2009_full,\n unit = \"radians\",\n statistic = \"precision\",\n set_size_var = \"set_size\",\n condition_var = \"duration\")\n\n\n\n"} {"package":"mixtur","topic":"simulate_mixtur","snippet":"### Name: simulate_mixtur\n### Title: Generate simulated data from mixture models\n### Aliases: simulate_mixtur\n\n### ** Examples\n\n\n# simulate from the slots model\n## No test: \nslots_data <- simulate_mixtur(n_trials = 1000,\n model = \"slots\",\n kappa = 8.2,\n K = 2.5,\n set_size = c(2, 4, 6, 8))\n## End(No test)\n\n# simulate one set size from the 3_component model\n## No test: \ncomponent_data <- simulate_mixtur(n_trials = 1000,\n model = \"3_component\",\n kappa = 8.2,\n p_u = .1,\n p_n = .15,\n set_size = 4)\n## End(No test)\n\n# simulate multiple set sizes from the 3_component model\n## No test: \ncomponent_data_multiple_sets <- simulate_mixtur(n_trials = 1000,\n model = \"3_component\",\n kappa = c(10, 8, 6),\n p_u = c(.1, .1, .1),\n p_n = c(.1, .15, .2),\n set_size = c(2, 4, 6))\n## End(No test)\n\n\n"} {"package":"hibayes","topic":"ibrm","snippet":"### Name: ibrm\n### Title: Bayes model\n### Aliases: ibrm\n\n### ** Examples\n\n# Load the example data attached in the package\npheno_file_path = system.file(\"extdata\", \"demo.phe\", package = \"hibayes\")\npheno = read.table(pheno_file_path, header=TRUE)\n\nbfile_path = system.file(\"extdata\", \"demo\", package = \"hibayes\")\nbin = read_plink(bfile_path, threads=1)\nfam = bin$fam\ngeno = bin$geno\nmap = bin$map\n\n# For GS/GP\n## no environmental effects:\nfit = ibrm(T1~1, data=pheno, M=geno, M.id=fam[,2], method=\"BayesCpi\",\n\tniter=2000, nburn=1200, thin=5, threads=1)\n\n## overview of the returned results\nsummary(fit)\n\n## No test: \n\n## add fixed effects or covariates:\nfit = ibrm(T1~sex+season+day+bwt, data=pheno, M=geno, M.id=fam[,2],\n\tmethod=\"BayesCpi\")\n \n## add environmental random effects:\nfit = ibrm(T1~sex+(1|loc)+(1|dam), data=pheno, M=geno, M.id=fam[,2],\n\tmethod=\"BayesCpi\")\n\n# For GWAS\nfit = ibrm(T1~sex+bwt+(1|dam), data=pheno, M=geno, M.id=fam[,2],\n\tmethod=\"BayesCpi\", map=map, windsize=1e6)\n## End(No test)\n\n# get the SD of estimated SNP effects for markers\nsummary(fit)$alpha\n# get the SD of estimated breeding values\nsummary(fit)$g\n\n\n\n"} {"package":"hibayes","topic":"ldmat","snippet":"### Name: ldmat\n### Title: LD variance-covariance matrix calculation\n### Aliases: ldmat\n\n### ** Examples\n\nbfile_path = system.file(\"extdata\", \"demo\", package = \"hibayes\")\ndata = read_plink(bfile_path)\ngeno = data$geno\nmap = data$map\n## No test: \nxx = ldmat(geno, threads=4, verbose=FALSE) #chromosome wide full ld matrix\n# xx = ldmat(geno, chisq=5, threads=4) #chromosome wide sparse ld matrix\n# xx = ldmat(geno, map, ldchr=FALSE, threads=4) #chromosome block ld matrix\n# xx = ldmat(geno, map, ldchr=FALSE, chisq=5, threads=4) #chromosome block + sparse ld matrix\n## End(No test)\n\n\n\n"} {"package":"hibayes","topic":"read_plink","snippet":"### Name: read_plink\n### Title: data load\n### Aliases: read_plink\n\n### ** Examples\n\nbfile_path = system.file(\"extdata\", \"demo\", package = \"hibayes\")\ndata = read_plink(bfile_path, out=tempfile(), mode=\"A\")\nfam = data$fam\ngeno = data$geno\nmap = data$map\n\n\n"} {"package":"hibayes","topic":"sbrm","snippet":"### Name: sbrm\n### Title: SBayes model\n### Aliases: sbrm\n\n### ** Examples\n\nbfile_path = system.file(\"extdata\", \"demo\", package = \"hibayes\")\nbin = read_plink(bfile_path, threads=1)\nfam = bin$fam\ngeno = bin$geno\nmap = bin$map\n\nsumstat_path = system.file(\"extdata\", \"demo.ma\", package = \"hibayes\")\nsumstat = read.table(sumstat_path, header=TRUE)\nhead(sumstat)\n\n## No test: \n# computate ld variance covariance matrix\n## construct genome wide full variance-covariance matrix\nldm1 <- ldmat(geno, threads=4)\t\n## construct genome wide sparse variance-covariance matrix\n# ldm2 <- ldmat(geno, chisq=5, threads=4)\t\n## construct chromosome wide full variance-covariance matrix\n# ldm3 <- ldmat(geno, map, ldchr=FALSE, threads=4)\t\n## construct chromosome wide sparse variance-covariance matrix\n# ldm4 <- ldmat(geno, map, ldchr=FALSE, chisq=5, threads=4)\n\n# if the order of SNPs in genotype is not consistent with the order in sumstat file, \n# prior adjusting is necessary.\nindx = match(map[, 1], sumstat[, 1])\nsumstat = sumstat[indx, ]\n\n# fit model\nfit = sbrm(sumstat=sumstat, ldm=ldm1, method=\"BayesCpi\", Pi = c(0.95, 0.05), \n\tniter=20000, nburn=12000, seed=666666, map=map, windsize=1e6, threads=1)\n\n# overview of the returned results\nsummary(fit)\n\n# get the SD of estimated SNP effects for markers\nsummary(fit)$alpha\n## End(No test)\n\n\n\n"} {"package":"hibayes","topic":"ssbrm","snippet":"### Name: ssbrm\n### Title: Single-step Bayes model\n### Aliases: ssbrm\n\n### ** Examples\n\n# Load the example data attached in the package\npheno_file_path = system.file(\"extdata\", \"demo.phe\", package = \"hibayes\")\npheno = read.table(pheno_file_path, header=TRUE)\n\nbfile_path = system.file(\"extdata\", \"demo\", package = \"hibayes\")\nbin = read_plink(bfile_path, threads=1)\nfam = bin$fam\ngeno = bin$geno\nmap = bin$map\n\npedigree_file_path = system.file(\"extdata\", \"demo.ped\", package = \"hibayes\")\nped = read.table(pedigree_file_path, header=TRUE)\n\n# For GS/GP\n## no environmental effects:\nfit = ssbrm(T1~1, data=pheno, M=geno, M.id=fam[,2], pedigree=ped,\n\tmethod=\"BayesCpi\", niter=1000, nburn=600, thin=5, printfreq=100, threads=1)\n\n## overview of the returned results\nsummary(fit)\n\n## No test: \n\n## add fixed effects or covariates:\nfit = ssbrm(T1~sex+bwt, data=pheno, M=geno, M.id=fam[,2], pedigree=ped,\n\tmethod=\"BayesCpi\")\n\n## add environmental random effects:\nfit = ssbrm(T1~(1|loc)+(1|dam), data=pheno, M=geno, M.id=fam[,2],\n\tpedigree=ped, method=\"BayesCpi\")\n\n# For GWAS\nfit = ssbrm(T1~sex+bwt+(1|dam), data=pheno, M=geno, M.id=fam[,2],\n\tpedigree=ped, method=\"BayesCpi\", map=map, windsize=1e6)\n## End(No test)\n\n# get the SD of estimated SNP effects for markers\nsummary(fit)$alpha\n# get the SD of estimated breeding values\nsummary(fit)$g\n\n\n\n"} {"package":"EffectLiteR","topic":"autoSelectSubset","snippet":"### Name: autoSelectSubset\n### Title: Autoselect Subset for Aggregated Effects\n### Aliases: autoSelectSubset\n\n### ** Examples\n\nm1 <- effectLite(y=\"dv\", z=c(\"z1\"), k=c(\"k1\"), x=\"x\", \ncontrol=\"control\", data=example01, fixed.cell=TRUE, fixed.z=TRUE)\nnewdata <- data.frame(k1=NA, z1=1)\nagg.subset <- autoSelectSubset(m1, newdata)\n\n\n"} {"package":"EffectLiteR","topic":"computeAggregatedEffects","snippet":"### Name: computeAggregatedEffects\n### Title: Compute Aggregated Effects\n### Aliases: computeAggregatedEffects\n\n### ** Examples\n\nm1 <- effectLite(y=\"dv\", z=c(\"z1\"), k=c(\"k1\"), x=\"x\", \ncontrol=\"control\", data=example01, fixed.cell=TRUE, fixed.z=TRUE)\nnewdata <- data.frame(k1=NA, z1=1)\nagg.subset <- autoSelectSubset(m1, newdata)\ncomputeAggregatedEffects(m1, agg.subset)\n\n\n"} {"package":"EffectLiteR","topic":"conditionalEffectsPlot","snippet":"### Name: conditionalEffectsPlot\n### Title: Plot conditional effects\n### Aliases: conditionalEffectsPlot\n\n### ** Examples\n\nm1 <- effectLite(y=\"dv\", x=\"x\", k=\"k1\", z=\"z1\", control=\"control\", data=example01)\nconditionalEffectsPlot(m1, zsel=\"z1\", gxsel=\"g1\", colour=\"k1\")\n\n\n\n"} {"package":"EffectLiteR","topic":"effectLite","snippet":"### Name: effectLite\n### Title: Estimate average and conditional effects\n### Aliases: effectLite\n\n### ** Examples\n\n## Example with one categorical covariate\nm1 <- effectLite(y=\"y\", x=\"x\", k=\"z\", control=\"0\", data=nonortho)\nprint(m1) \n\n## Example with one categorical and one continuous covariate\nm1 <- effectLite(y=\"dv\", x=\"x\", k=c(\"k1\"), z=c(\"z1\"), control=\"control\", data=example01)\nprint(m1)\n\n## Example with latent outcome and latent covariate\nmeasurement <- '\neta2 =~ 1*CPM12 + 1*CPM22\neta1 =~ 1*CPM11 + 1*CPM21\nCPM11 + CPM12 ~ 0*1\nCPM21 ~ c(m,m)*1\nCPM22 ~ c(p,p)*1'\n\nm1 <- effectLite(y=\"eta2\", x=\"x\", z=c(\"eta1\"), control=\"0\", \n measurement=measurement, data=example02lv)\nprint(m1)\n\n## Not run: \n##D ## Example with cluster variable and sampling weights\n##D m1 <- effectLite(y=\"y\", x=\"x\", z=\"z\", fixed.cell=TRUE, control=\"0\", \n##D syntax.only=F, data=example_multilevel, \n##D ids=~cid, weights=~weights)\n##D print(m1)\n## End(Not run)\n\n\n"} {"package":"EffectLiteR","topic":"elrPredict","snippet":"### Name: elrPredict\n### Title: Predict Conditional Effects\n### Aliases: elrPredict\n\n### ** Examples\n\nm1 <- effectLite(y=\"dv\", z=c(\"z1\"), k=c(\"k1\",\"kateg2\"), x=\"x\", \ncontrol=\"control\", data=example01)\nnewdata <- data.frame(k1=\"male\", kateg2=\"1\", z1=2)\nelrPredict(m1, newdata)\n\n\n"} {"package":"EffectLiteR","topic":"generateMeasurementModel","snippet":"### Name: generateMeasurementModel\n### Title: Generate measurement model\n### Aliases: generateMeasurementModel\n\n### ** Examples\n\n## Example with three latent variables\nnames <- c(\"eta\", \"xi1\", \"xi2\")\nindicators <- list(\"eta\" = c(\"y1\",\"y2\",\"y3\"), \n \"xi1\" = c(\"z1\",\"z2\"),\n \"xi2\" = c(\"z12\",\"z22\",\"z32\",\"z42\"))\nncells = 6\nmodel = c(\"parallel\",\"tau-equi\",\"tau-cong\")\ncat(generateMeasurementModel(names, indicators, ncells, model))\n\n## Example with method factor\nnames <- c(\"eta\", \"xi\", \"mf\")\nindicators <- list(\"eta\" = c(\"y12\",\"y22\"), \n \"xi\" = c(\"y11\",\"y21\"),\n \"mf\" = c(\"y12\",\"y22\"))\nncells = 2\ncat(generateMeasurementModel(names, indicators, ncells))\n\n## Example with categorical items\nnames <- c(\"eta\", \"xi\")\nindicators <- list(\"eta\" = paste0(\"y\",1:7,1),\n \"xi\" = paste0(\"z\",1:5,1))\nncells = 2\nmodel = c(\"tau-equi-categorical\",\"tau-cong-categorical\")\ncat(generateMeasurementModel(names, indicators, ncells, model, \n data=elrdata_categorical_items))\n\n\n\n"} {"package":"nbTransmission","topic":"clusterInfectors","snippet":"### Name: clusterInfectors\n### Title: Clusters the infectors based on their transmission probabilities\n### Aliases: clusterInfectors\n\n### ** Examples\n\n\n## Use the nbResults data frame included in the package which has the results\n## of the nbProbabilities() function on a TB-like outbreak.\n\n## Clustering using top n\n# High probability cluster includes infectors with highest 3 probabilities\nclust1 <- clusterInfectors(nbResults, indIDVar = \"individualID\", pVar = \"pScaled\",\n clustMethod = \"n\", cutoff = 3)\ntable(clust1$cluster)\n\n## Clustering using hierarchical clustering\n\n# Cluster all infectees, do not force gap to be certain size\nclust2 <- clusterInfectors(nbResults, indIDVar = \"individualID\", pVar = \"pScaled\",\n clustMethod = \"hc_absolute\", cutoff = 0)\ntable(clust2$cluster)\n\n## No test: \n# Absolute difference: gap between top and bottom clusters is more than 0.05\nclust3 <- clusterInfectors(nbResults, indIDVar = \"individualID\", pVar = \"pScaled\",\n clustMethod = \"hc_absolute\", cutoff = 0.05)\ntable(clust3$cluster)\n\n# Relative difference: gap between top and bottom clusters is more than double any other gap\nclust4 <- clusterInfectors(nbResults, indIDVar = \"individualID\", pVar = \"pScaled\",\n clustMethod = \"hc_relative\", cutoff = 2)\ntable(clust4$cluster)\n\n## Clustering using kernel density estimation\n# Using a small binwidth of 0.01\nclust5 <- clusterInfectors(nbResults, indIDVar = \"individualID\", pVar = \"pScaled\",\n clustMethod = \"kd\", cutoff = 0.01)\ntable(clust5$cluster)\n## End(No test)\n\n\n\n"} {"package":"nbTransmission","topic":"estimateR","snippet":"### Name: estimateR\n### Title: Estimates the effective reproductive number\n### Aliases: estimateR\n\n### ** Examples\n\n\n## Use the nbResults data frame included in the package which has the results\n## of the nbProbabilities() function on a TB-like outbreak.\n\n## Getting initial estimates of the reproductive number\n# (without specifying rangeForAvg and without confidence intervals)\nrInitial <- estimateR(nbResults, dateVar = \"infectionDate\",\n indIDVar = \"individualID\", pVar = \"pScaled\",\n timeFrame = \"months\")\n \n## Finding the stable portion of the outbreak for rangeForAvg using plot of Rt\ncut1 <- 25\ncut2 <- 125\n\n# Optional plot to determine the cutpoints above\n# ggplot(data = rInitial$RtDf, aes(x = timeRank, y = Rt)) +\n# geom_point() +\n# geom_line() +\n# geom_hline(data = rInitial$RtAvgDf, aes(yintercept = RtAvg), size = 0.7) +\n# geom_vline(aes(xintercept = cut1), linetype = 2, size = 0.7) +\n# geom_vline(aes(xintercept = cut2), linetype = 2, size = 0.7)\n \n## Finding the final reproductive number estimates with confidence intervals\n# NOTE should run with bootSamples > 2.\nrFinal <- estimateR(nbResults, dateVar = \"infectionDate\",\n indIDVar = \"individualID\", pVar = \"pScaled\",\n timeFrame = \"months\", rangeForAvg = c(cut1, cut2),\n bootSamples = 2, alpha = 0.05)\n\nrFinal$RtAvgDf\n\n\n\n"} {"package":"nbTransmission","topic":"estimateSI","snippet":"### Name: estimateSI\n### Title: Estimates the generation/serial interval distribution\n### Aliases: estimateSI\n\n### ** Examples\n\n\n## First, run the algorithm without including time as a covariate.\norderedPair <- pairData[pairData$infectionDiffY > 0, ]\n\n## Create a variable called snpClose that will define probable links\n# (<3 SNPs) and nonlinks (>12 SNPs) all pairs with between 2-12 SNPs\n# will not be used to train.\norderedPair$snpClose <- ifelse(orderedPair$snpDist < 3, TRUE,\n ifelse(orderedPair$snpDist > 12, FALSE, NA))\ntable(orderedPair$snpClose)\n\n## Running the algorithm\n# NOTE should run with nReps > 1.\nresGen <- nbProbabilities(orderedPair = orderedPair,\n indIDVar = \"individualID\",\n pairIDVar = \"pairID\",\n goldStdVar = \"snpClose\",\n covariates = c(\"Z1\", \"Z2\", \"Z3\", \"Z4\"),\n label = \"SNPs\", l = 1,\n n = 10, m = 1, nReps = 1)\n \n## Merging the probabilities back with the pair-level data\nnbResultsNoT <- merge(resGen[[1]], orderedPair, by = \"pairID\", all = TRUE)\n\n## Estimating the serial interval\n\n# Using hierarchical clustering with a 0.05 absolute difference cutoff\nestimateSI(nbResultsNoT, indIDVar = \"individualID\",\n timeDiffVar = \"infectionDiffY\", pVar = \"pScaled\",\n clustMethod = \"hc_absolute\", cutoff = 0.05, initialPars = c(2, 2))\n \n## No test: \n# Using all pairs\nestimateSI(nbResultsNoT, indIDVar = \"individualID\",\n timeDiffVar = \"infectionDiffY\", pVar = \"pScaled\",\n clustMethod = \"none\", initialPars = c(2, 2))\n\n\n# # Using a shifted gamma distribution:\n# # not allowing serial intervals of less than 3 months (0.25 years)\nestimateSI(nbResultsNoT, indIDVar = \"individualID\",\n timeDiffVar = \"infectionDiffY\", pVar = \"pScaled\",\n clustMethod = \"hc_absolute\", cutoff = 0.05,\n initialPars = c(2, 2), shift = 0.25)\n\n\n# # Using multiple cutoffs\nestimateSI(nbResultsNoT, indIDVar = \"individualID\",\n timeDiffVar = \"infectionDiffY\", pVar = \"pScaled\",\n clustMethod = \"hc_absolute\", cutoff = c(0.025, 0.05), initialPars = c(2, 2))\n## End(No test)\n\n\n## Adding confidence intervals\n# NOTE should run with bootSamples > 2.\nestimateSI(nbResultsNoT, indIDVar = \"individualID\",\n timeDiffVar = \"infectionDiffY\", pVar = \"pScaled\",\n clustMethod = \"hc_absolute\", cutoff = 0.05,\n initialPars = c(2, 2), shift = 0.25, bootSamples = 2)\n\n\n\n"} {"package":"nbTransmission","topic":"indToPair","snippet":"### Name: indToPair\n### Title: Transforms a dataset of individuals to a dataset of pairs\n### Aliases: indToPair\n\n### ** Examples\n\n## Create a dataset of all pairs with no date variable\npairU <- indToPair(indData = indData, indIDVar = \"individualID\")\n\n## Create a dataset of all pairs with a date variable\npairUD <- indToPair(indData = indData, indIDVar = \"individualID\",\n dateVar = \"infectionDate\", units = \"days\")\n\n## Create a dataset of ordered pairs\npairO <- indToPair(indData = indData, indIDVar = \"individualID\",\n dateVar = \"infectionDate\", units = \"days\", ordered = TRUE)\n\n\n\n"} {"package":"nbTransmission","topic":"nbHeatmap","snippet":"### Name: nbHeatmap\n### Title: Plots a heatmap of the relative transmission probabilities\n### Aliases: nbHeatmap\n\n### ** Examples\n\n\n## No test: \n## Heatmap with no clustering in color with the default probability breaks\npar(mar = c(0, 0, 1, 0))\nnbHeatmap(nbResults, indIDVar = \"individualID\", dateVar = \"infectionDate\",\npVar = \"pScaled\", clustMethod = \"none\")\ndev.off() \n\n\n## Adding stars for the top cluster, in black and white, changing the probability breaks\npar(mar = c(0, 0, 1, 0))\nnbHeatmap(nbResults, indIDVar = \"individualID\", dateVar = \"infectionDate\",\n pVar = \"pScaled\", clustMethod = \"hc_absolute\", cutoff = 0.05,\n blackAndWhite = TRUE, probBreaks = c(-0.01, 0.01, 0.1, 0.25, 0.5, 1))\ndev.off()\n## End(No test)\n\n\n\n\n"} {"package":"nbTransmission","topic":"nbNetwork","snippet":"### Name: nbNetwork\n### Title: Plots a network of the relative transmission probabilities\n### Aliases: nbNetwork\n\n### ** Examples\n\n\n## No test: \n## Network of all pairs in color with the default probability breaks\npar(mar = c(0, 0, 0.2, 0))\nnbNetwork(nbResults, indIDVar = \"individualID\", dateVar = \"infectionDate\",\npVar = \"pScaled\", clustMethod = \"none\")\ndev.off()\n\n\n## Network of just the top cluster of infectors, black and white, changing the probability breaks\npar(mar = c(0, 0, 0.2, 0))\nnbNetwork(nbResults, indIDVar = \"individualID\", dateVar = \"infectionDate\",\n pVar = \"pScaled\", clustMethod = \"hc_absolute\", cutoff = 0.05,\n blackAndWhite = TRUE, probBreaks = c(-0.01, 0.01, 0.1, 0.25, 0.5, 1))\ndev.off() \n## End(No test)\n\n\n\n\n"} {"package":"nbTransmission","topic":"nbProbabilities","snippet":"### Name: nbProbabilities\n### Title: Estimates relative transmission probabilities\n### Aliases: nbProbabilities\n\n### ** Examples\n\n## Use the pairData dataset which represents a TB-like outbreak\n# First create a dataset of ordered pairs\norderedPair <- pairData[pairData$infectionDiffY >= 0, ]\n\n## Create a variable called snpClose that will define probable links\n# (<3 SNPs) and nonlinks (>12 SNPs) all pairs with between 2-12 SNPs\n# will not be used to train.\norderedPair$snpClose <- ifelse(orderedPair$snpDist < 3, TRUE,\n ifelse(orderedPair$snpDist > 12, FALSE, NA))\ntable(orderedPair$snpClose)\n\n## Running the algorithm\n#NOTE should run with nReps > 1.\nresGen <- nbProbabilities(orderedPair = orderedPair,\n indIDVar = \"individualID\",\n pairIDVar = \"pairID\",\n goldStdVar = \"snpClose\",\n covariates = c(\"Z1\", \"Z2\", \"Z3\", \"Z4\", \"timeCat\"),\n label = \"SNPs\", l = 1,\n n = 10, m = 1, nReps = 1)\n \n## Merging the probabilities back with the pair-level data\nnbResults <- merge(resGen[[1]], orderedPair, by = \"pairID\", all = TRUE)\n\n\n\n"} {"package":"nbTransmission","topic":"nbResults","snippet":"### Name: nbResults\n### Title: Dataset with results of 'nbProbabilities'\n### Aliases: nbResults\n### Keywords: datasets\n\n### ** Examples\n\n\n# ## NOT RUN ##\n# ## This is the code used to create this dataset ##\n# orderedPair <- pairData[pairData$infectionDiff > 0, ]\n# orderedPair$snpClose <- ifelse(orderedPair$snpDist < 3, TRUE,\n# ifelse(orderedPair$snpDist > 12, FALSE, NA))\n# set.seed(0)\n# covariates = c(\"Z1\", \"Z2\", \"Z3\", \"Z4\", \"timeCat\")\n# resGen <- nbProbabilities(orderedPair = orderedPair,\n# indIDVar = \"individualID\",\n# pairIDVar = \"pairID\",\n# goldStdVar = \"snpClose\",\n# covariates = covariates,\n# label = \"SNPs\", l = 1,\n# n = 10, m = 1, nReps = 50)\n# nbResults <- merge(resGen[[1]], orderedPair, by = \"pairID\", all = TRUE)\n\n\n"} {"package":"nbTransmission","topic":"performNB","snippet":"### Name: performNB\n### Title: Performs naive bayes classification\n### Aliases: performNB\n\n### ** Examples\n\n## Use iris dataset and predict if a flower is of the specices \"virginica\".\n\ndata(iris)\nirisNew <- iris\n## Creating an id variable\nirisNew$id <- seq(1:nrow(irisNew))\n## Creating logical variable indicating if the flower is of the species virginica\nirisNew$spVirginica <- irisNew$Species == \"virginica\"\n\n## Creating categorical/factor versions of the covariates\nirisNew$Sepal.Length.Cat <- factor(cut(irisNew$Sepal.Length, c(0, 5, 6, 7, Inf)),\n labels = c(\"<=5.0\", \"5.1-6.0\", \"6.1-7.0\", \"7.1+\"))\n\nirisNew$Sepal.Width.Cat <- factor(cut(irisNew$Sepal.Width, c(0, 2.5, 3, 3.5, Inf)),\n labels = c(\"<=2.5\", \"2.6-3.0\", \"3.1-3.5\", \"3.6+\"))\n\nirisNew$Petal.Length.Cat <- factor(cut(irisNew$Petal.Length, c(0, 2, 4, 6, Inf)),\n labels = c(\"<=2.0\", \"2.1-4.0\", \"4.1-6.0\", \"6.0+\"))\n\nirisNew$Petal.Width.Cat <- factor(cut(irisNew$Petal.Width, c(0, 1, 2, Inf)),\n labels = c(\"<=1.0\", \"1.1-2.0\", \"2.1+\"))\n\n## Using NB to predict if the species is virginica\n## (training and predicting on same dataset)\npred <- performNB(irisNew, irisNew, obsIDVar = \"id\",\n goldStdVar = \"spVirginica\",\n covariates = c(\"Sepal.Length.Cat\", \"Sepal.Width.Cat\",\n \"Petal.Length.Cat\", \"Petal.Width.Cat\"), l = 1)\nirisResults <- merge(irisNew, pred$probabilities, by = \"id\")\ntapply(irisResults$p, irisResults$Species, summary)\n\n\n\n"} {"package":"nbTransmission","topic":"performPEM","snippet":"### Name: performPEM\n### Title: Executes the PEM algorthim to estimate the generation/serial\n### interval distribution\n### Aliases: performPEM\n\n### ** Examples\n\n\n## First, run the algorithm without including time as a covariate.\norderedPair <- pairData[pairData$infectionDiffY > 0, ]\n\n## Create a variable called snpClose that will define probable links\n# (<3 SNPs) and nonlinks (>12 SNPs) all pairs with between 2-12 SNPs\n# will not be used to train.\norderedPair$snpClose <- ifelse(orderedPair$snpDist < 3, TRUE,\n ifelse(orderedPair$snpDist > 12, FALSE, NA))\ntable(orderedPair$snpClose)\n\n## Running the algorithm\n#NOTE should run with nReps > 1.\nresGen <- nbProbabilities(orderedPair = orderedPair,\n indIDVar = \"individualID\",\n pairIDVar = \"pairID\",\n goldStdVar = \"snpClose\",\n covariates = c(\"Z1\", \"Z2\", \"Z3\", \"Z4\"),\n label = \"SNPs\", l = 1,\n n = 10, m = 1, nReps = 1)\n \n## Merging the probabilities back with the pair-level data\nnbResultsNoT <- merge(resGen[[1]], orderedPair, by = \"pairID\", all = TRUE)\n\n## Estimating the serial interval\n\n## No test: \n# Using all pairs and plotting the parameters\n performPEM(nbResultsNoT, indIDVar = \"individualID\", timeDiffVar = \"infectionDiffY\",\n pVar = \"pScaled\", initialPars = c(2, 2), shift = 0, plot = TRUE)\n## End(No test)\n\n\n# Clustering the probabilities first\nallClust <- clusterInfectors(nbResultsNoT, indIDVar = \"individualID\", pVar = \"pScaled\",\n clustMethod = \"hc_absolute\", cutoff = 0.05)\n\nperformPEM(allClust[allClust$cluster == 1, ], indIDVar = \"individualID\",\n timeDiffVar = \"infectionDiffY\", pVar = \"pScaled\",\n initialPars = c(2, 2), shift = 0, plot = TRUE)\n\n## No test: \n# The above is equivalent to the following code using the function estimateSI()\n# though the plot will not be printed and more details will be added\nestimateSI(nbResultsNoT, indIDVar = \"individualID\", timeDiffVar = \"infectionDiffY\",\n pVar = \"pScaled\", clustMethod = \"hc_absolute\", cutoff = 0.05,\n initialPars = c(2, 2))\n## End(No test)\n\n\n\n\n"} {"package":"nbTransmission","topic":"plotRt","snippet":"### Name: plotRt\n### Title: Creates a plot of the effective reproductive number\n### Aliases: plotRt\n\n### ** Examples\n\n\n## Use the nbResults data frame included in the package which has the results\n# of the nbProbabilities() function on a TB-like outbreak.\n\n## Getting initial estimates of the reproductive number\n# (ithout specifying nbResults and without confidence intervals)\nrInitial <- estimateR(nbResults, dateVar = \"infectionDate\",\n indIDVar = \"individualID\", pVar = \"pScaled\",\n timeFrame = \"months\")\n \n## Finding the stable portion of the outbreak for rangeForAvg using the plot\nplotRt(rInitial)\ncut1 <- 25\ncut2 <- 125\n\n## Finding the final reproductive number estimates with confidence intervals\n# NOTE should run with bootSamples > 10.\nrFinal <- estimateR(nbResults, dateVar = \"infectionDate\",\n indIDVar = \"individualID\", pVar = \"pScaled\",\n timeFrame = \"months\", rangeForAvg = c(cut1, cut2),\n bootSamples = 10, alpha = 0.05)\n\n## Ploting the final result \nplotRt(rFinal, includeRtAvg = TRUE, includeRtCI = TRUE, includeRtAvgCI = TRUE)\n\n\n\n"} {"package":"kirby21.t1","topic":"get_t1_filenames","snippet":"### Name: get_t1_filenames\n### Title: Get T1 Image Filenames\n### Aliases: get_t1_filenames\n\n### ** Examples\n\nget_t1_filenames()\n\n\n"} {"package":"xaringanExtra","topic":"animate_css","snippet":"### Name: animate_css\n### Title: Animate.css\n### Aliases: animate_css use_animate_css html_dependency_animate_css\n### use_animate_all\n\n### ** Examples\n\nuse_animate_css()\nhtml_dependency_animate_css()\n\n\n\n"} {"package":"xaringanExtra","topic":"clipboard","snippet":"### Name: clipboard\n### Title: Clipboard\n### Aliases: clipboard use_clipboard html_dependency_clipboardjs\n### html_dependency_clipboard\n\n### ** Examples\n\nuse_clipboard()\n\n\n\n"} {"package":"xaringanExtra","topic":"css_position","snippet":"### Name: css_position\n### Title: Helper to set absolute position of an element.\n### Aliases: css_position\n\n### ** Examples\n\ncss_position(top = \"1em\", right = \"1em\") # top right corner\ncss_position(top = \"1em\", left = \"1em\") # top left corner\ncss_position(bottom = 0, right = 0) # bottom right corner\n\n\n"} {"package":"xaringanExtra","topic":"editable","snippet":"### Name: editable\n### Title: Editable\n### Aliases: editable use_editable html_dependency_editable\n\n### ** Examples\n\nuse_editable()\n\n\n\n"} {"package":"xaringanExtra","topic":"embed_xaringan","snippet":"### Name: embed_xaringan\n### Title: Embed a xaringan presentation in a web page\n### Aliases: embed_xaringan\n\n### ** Examples\n\n# In your slides call\nuse_share_again()\n\n\n\n"} {"package":"xaringanExtra","topic":"extra_styles","snippet":"### Name: extra_styles\n### Title: Add Extra CSS Styles\n### Aliases: extra_styles use_extra_styles html_dependency_extra_styles\n\n### ** Examples\n\nuse_extra_styles()\n\n\n\n"} {"package":"xaringanExtra","topic":"fit_screen","snippet":"### Name: fit_screen\n### Title: Fit Slides to the Screen\n### Aliases: fit_screen use_fit_screen html_dependency_fit_screen\n\n### ** Examples\n\nuse_fit_screen()\n\n\n\n"} {"package":"xaringanExtra","topic":"freezeframe","snippet":"### Name: freezeframe\n### Title: FreezeFrame\n### Aliases: freezeframe use_freezeframe html_dependency_freezeframe\n\n### ** Examples\n\nuse_freezeframe()\n\n\n\n"} {"package":"xaringanExtra","topic":"logo","snippet":"### Name: logo\n### Title: Add Logo\n### Aliases: logo use_logo html_dependency_logo\n\n### ** Examples\n\nxaringan_logo <- file.path(\n \"https://raw.githubusercontent.com/rstudio/hex-stickers/master\",\n \"PNG/xaringan.png\"\n)\nuse_logo(xaringan_logo)\n\n\n\n"} {"package":"xaringanExtra","topic":"panelset","snippet":"### Name: panelset\n### Title: Panelset\n### Aliases: panelset use_panelset style_panelset_tabs style_panelset\n### html_dependency_panelset\n\n### ** Examples\n\nuse_panelset()\n\n\n\n"} {"package":"xaringanExtra","topic":"scribble","snippet":"### Name: scribble\n### Title: Scribble\n### Aliases: scribble use_scribble html_dependency_fabricjs\n### html_dependency_scribble\n\n### ** Examples\n\nuse_scribble()\n\n\n\n"} {"package":"xaringanExtra","topic":"search","snippet":"### Name: search\n### Title: Search\n### Aliases: search use_search html_dependency_search style_search\n\n### ** Examples\n\nuse_search()\n\n\n\n"} {"package":"xaringanExtra","topic":"share_again","snippet":"### Name: share_again\n### Title: Share or Embed xaringan Slides\n### Aliases: share_again use_share_again style_share_again\n\n### ** Examples\n\n# In your slides call\nuse_share_again()\n\n# In the document where you want to embed the slides call\nembed_xaringan(\"https://slides.yihui.org/xaringan/\")\n\n\n\n"} {"package":"xaringanExtra","topic":"slide_tone","snippet":"### Name: slide_tone\n### Title: Slide Tone\n### Aliases: slide_tone use_slide_tone html_dependency_slide_tone\n\n### ** Examples\n\nuse_slide_tone()\n\n\n\n"} {"package":"xaringanExtra","topic":"style_banner","snippet":"### Name: style_banner\n### Title: Style Banner\n### Aliases: style_banner\n\n### ** Examples\n\nstyle_banner(text_color = \"red\")\nstyle_banner(text_color = \"white\", background_color = \"red\")\n\n\n\n"} {"package":"xaringanExtra","topic":"tachyons","snippet":"### Name: tachyons\n### Title: Tachyons\n### Aliases: tachyons use_tachyons html_dependency_tachyons\n\n### ** Examples\n\nuse_tachyons()\n\n\n\n"} {"package":"xaringanExtra","topic":"tile_view","snippet":"### Name: tile_view\n### Title: Tile View\n### Aliases: tile_view use_tile_view html_dependency_tile_view\n\n### ** Examples\n\nuse_tile_view()\n\n\n\n"} {"package":"xaringanExtra","topic":"use_banner","snippet":"### Name: use_banner\n### Title: Add a banner to the top or bottom of your slides.\n### Aliases: use_banner\n\n### ** Examples\n\nuse_banner(bottom_left = \"bit.ly/my-awesome-slides\")\n\nuse_banner(\n bottom_left = \"bit.ly/my-awesome-slides\",\n top_center = \"My Presentation\",\n exclude = c(\"title-slide\", \"inverse\"),\n style_banner(text_color = \"grey\")\n)\n\n\n\n"} {"package":"xaringanExtra","topic":"use_broadcast","snippet":"### Name: use_broadcast\n### Title: Broadcast Your Slides\n### Aliases: use_broadcast\n\n### ** Examples\n\nuse_broadcast()\n\n\n\n"} {"package":"xaringanExtra","topic":"use_progress_bar","snippet":"### Name: use_progress_bar\n### Title: Add an animated progress bar\n### Aliases: use_progress_bar\n\n### ** Examples\n\nxaringanExtra::use_progress_bar(\"red\", \"top\", \"0.25em\")\n\n\n\n"} {"package":"xaringanExtra","topic":"use_xaringan_extra","snippet":"### Name: use_xaringan_extra\n### Title: Use xaringanExtra Extensions\n### Aliases: use_xaringan_extra\n\n### ** Examples\n\nuse_xaringan_extra(c(\"tile_view\", \"panelset\"))\nuse_xaringan_extra(c(\"tile_view\", \"scribble\", \"share_again\"))\n\n\n\n"} {"package":"xaringanExtra","topic":"webcam","snippet":"### Name: webcam\n### Title: Webcam\n### Aliases: webcam use_webcam html_dependency_webcam\n\n### ** Examples\n\nuse_webcam()\n\n\n\n"} {"package":"timsac","topic":"armafit","snippet":"### Name: armafit\n### Title: ARMA Model Fitting\n### Aliases: armafit\n### Keywords: ts\n\n### ** Examples\n\n# \"arima.sim\" is a function in \"stats\".\n# Note that the sign of MA coefficient is opposite from that in \"timsac\".\ny <- arima.sim(list(order=c(2,0,1), ar=c(0.64,-0.8), ma=-0.5), n = 1000)\nz <- armafit(y, model.order = c(2,1))\nz$arcoef\nz$macoef\n\n\n"} {"package":"timsac","topic":"auspec","snippet":"### Name: auspec\n### Title: Power Spectrum\n### Aliases: auspec\n### Keywords: ts\n\n### ** Examples\n\ny <- arima.sim(list(order=c(2,0,0), ar=c(0.64,-0.8)), n = 200)\nauspec(y, log = TRUE)\n\n\n"} {"package":"timsac","topic":"autcor","snippet":"### Name: autcor\n### Title: Autocorrelation\n### Aliases: autcor\n### Keywords: ts\n\n### ** Examples\n\n# Example 1 for the normal distribution \ny <- rnorm(200)\nautcor(y, lag_axis = FALSE)\n\n# Example 2 for the ARIMA model\ny <- arima.sim(list(order=c(2,0,0), ar=c(0.64,-0.8)), n = 200)\nautcor(y, lag = 20)\n\n\n"} {"package":"timsac","topic":"autoarmafit","snippet":"### Name: autoarmafit\n### Title: Automatic ARMA Model Fitting\n### Aliases: autoarmafit print.autoarmafit\n### Keywords: ts\n\n### ** Examples\n\n# \"arima.sim\" is a function in \"stats\".\n# Note that the sign of MA coefficient is opposite from that in \"timsac\".\ny <- arima.sim(list(order=c(2,0,1),ar=c(0.64,-0.8),ma=-0.5), n = 1000)\nautoarmafit(y)\n\n\n"} {"package":"timsac","topic":"baysea","snippet":"### Name: baysea\n### Title: Bayesian Seasonal Adjustment Procedure\n### Aliases: baysea\n### Keywords: ts\n\n### ** Examples\n\ndata(LaborData)\nbaysea(LaborData, forecast = 12)\n\n\n"} {"package":"timsac","topic":"bispec","snippet":"### Name: bispec\n### Title: Bispectrum\n### Aliases: bispec\n### Keywords: ts\n\n### ** Examples\n\ndata(bispecData)\nbispec(bispecData, lag = 30)\n\n\n"} {"package":"timsac","topic":"blocar","snippet":"### Name: blocar\n### Title: Bayesian Method of Locally Stationary AR Model Fitting; Scalar\n### Case\n### Aliases: blocar\n### Keywords: ts\n\n### ** Examples\n\ndata(locarData)\nz <- blocar(locarData, max.order = 10, span = 300)\nz$arcoef\n\n\n"} {"package":"timsac","topic":"blomar","snippet":"### Name: blomar\n### Title: Bayesian Method of Locally Stationary Multivariate AR Model\n### Fitting\n### Aliases: blomar print.blomar\n### Keywords: ts\n\n### ** Examples\n\ndata(Amerikamaru)\nblomar(Amerikamaru, max.order = 10, span = 300)\n\n\n"} {"package":"timsac","topic":"bsubst","snippet":"### Name: bsubst\n### Title: Bayesian Type All Subset Analysis\n### Aliases: bsubst\n### Keywords: ts\n\n### ** Examples\n\ndata(Canadianlynx)\nRegressor <- matrix(\n c( 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 1, 2, 1, 3, 1, 2, 3,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 3, 1, 2, 3,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 3 ),\n nrow = 3, ncol = 19, byrow = TRUE)\nz <- bsubst(Canadianlynx, mtype = 2, lag = 12, nreg = 19, Regressor)\nz$arcoef.bay\n\n\n"} {"package":"timsac","topic":"canarm","snippet":"### Name: canarm\n### Title: Canonical Correlation Analysis of Scalar Time Series\n### Aliases: canarm\n### Keywords: ts\n\n### ** Examples\n\n# \"arima.sim\" is a function in \"stats\".\n# Note that the sign of MA coefficient is opposite from that in \"timsac\".\ny <- arima.sim(list(order=c(2,0,1), ar=c(0.64,-0.8), ma=c(-0.5)), n = 1000)\nz <- canarm(y, max.order = 30)\nz$arcoef\nz$macoef\n\n\n"} {"package":"timsac","topic":"canoca","snippet":"### Name: canoca\n### Title: Canonical Correlation Analysis of Vector Time Series\n### Aliases: canoca\n### Keywords: ts\n\n### ** Examples\n\nar <- array(0, dim = c(3,3,2))\nar[, , 1] <- matrix(c(0.4, 0, 0.3,\n 0.2, -0.1, -0.5,\n 0.3, 0.1, 0), nrow = 3, ncol = 3, byrow= TRUE)\nar[, , 2] <- matrix(c(0, -0.3, 0.5,\n 0.7, -0.4, 1,\n 0, -0.5, 0.3), nrow = 3, ncol = 3, byrow = TRUE)\nx <- matrix(rnorm(1000*3), nrow = 1000, ncol = 3)\ny <- mfilter(x, ar, \"recursive\")\nz <- canoca(y)\nz$arcoef\n\n\n"} {"package":"timsac","topic":"covgen","snippet":"### Name: covgen\n### Title: Covariance Generation\n### Aliases: covgen\n### Keywords: ts\n\n### ** Examples\n\nspec <- raspec(h = 100, var = 1, arcoef = c(0.64,-0.8), plot = FALSE)\ncovgen(lag = 100, f = 0:100/200, gain = spec)\n\n\n"} {"package":"timsac","topic":"decomp","snippet":"### Name: decomp\n### Title: Time Series Decomposition (Seasonal Adjustment) by Square-Root\n### Filter\n### Aliases: decomp\n### Keywords: ts\n\n### ** Examples\n\ndata(Blsallfood)\ny <- ts(Blsallfood, start=c(1967,1), frequency=12)\nz <- decomp(y, trade = TRUE)\nz$aic\nz$lkhd\nz$sigma2\nz$tau1\nz$tau2\nz$tau3\n\n\n"} {"package":"timsac","topic":"exsar","snippet":"### Name: exsar\n### Title: Exact Maximum Likelihood Method of Scalar AR Model Fitting\n### Aliases: exsar\n### Keywords: ts\n\n### ** Examples\n\ndata(Canadianlynx)\nz <- exsar(Canadianlynx, max.order = 14)\nz$arcoef.maice\nz$arcoef.mle\n\n\n"} {"package":"timsac","topic":"fftcor","snippet":"### Name: fftcor\n### Title: Auto And/Or Cross Correlations via FFT\n### Aliases: fftcor\n### Keywords: ts\n\n### ** Examples\n\n# Example 1\nx <- rnorm(200)\ny <- rnorm(200)\nxy <- array(c(x,y), dim = c(200,2))\nfftcor(xy, lag_axis = FALSE)\n\n# Example 2\nxorg <- rnorm(1003)\nx <- matrix(0, nrow = 1000, ncol = 2)\nx[, 1] <- xorg[1:1000]\nx[, 2] <- xorg[4:1003] + 0.5*rnorm(1000)\nfftcor(x, lag = 20)\n\n\n"} {"package":"timsac","topic":"fpeaut","snippet":"### Name: fpeaut\n### Title: FPE Auto\n### Aliases: fpeaut\n### Keywords: ts\n\n### ** Examples\n\ny <- arima.sim(list(order=c(2,0,0), ar=c(0.64,-0.8)), n = 200)\nfpeaut(y, max.order = 20)\n\n\n"} {"package":"timsac","topic":"fpec","snippet":"### Name: fpec\n### Title: AR model Fitting for Control\n### Aliases: fpec print.fpec\n### Keywords: ts\n\n### ** Examples\n\nar <- array(0, dim = c(3,3,2))\nar[, , 1] <- matrix(c(0.4, 0, 0.3,\n 0.2, -0.1, -0.5,\n 0.3, 0.1, 0), nrow = 3, ncol = 3, byrow = TRUE)\nar[, , 2] <- matrix(c(0, -0.3, 0.5,\n 0.7, -0.4, 1,\n 0, -0.5, 0.3), nrow = 3, ncol = 3, byrow = TRUE)\nx <- matrix(rnorm(200*3), nrow = 200, ncol = 3)\ny <- mfilter(x, ar, \"recursive\")\nfpec(y, max.order = 10)\n\n\n"} {"package":"timsac","topic":"markov","snippet":"### Name: markov\n### Title: Maximum Likelihood Computation of Markovian Model\n### Aliases: markov\n### Keywords: ts\n\n### ** Examples\n\nx <- matrix(rnorm(1000*2), nrow = 1000, ncol = 2)\nma <- array(0, dim = c(2,2,2))\nma[, , 1] <- matrix(c( -1.0, 0.0,\n 0.0, -1.0), nrow = 2, ncol = 2, byrow = TRUE)\nma[, , 2] <- matrix(c( -0.2, 0.0,\n -0.1, -0.3), nrow = 2, ncol = 2, byrow = TRUE)\ny <- mfilter(x, ma, \"convolution\")\nar <- array(0, dim = c(2,2,3))\nar[, , 1] <- matrix(c( -1.0, 0.0,\n 0.0, -1.0), nrow = 2, ncol = 2, byrow = TRUE)\nar[, , 2] <- matrix(c( -0.5, -0.2,\n -0.2, -0.5), nrow = 2, ncol = 2, byrow = TRUE)\nar[, , 3] <- matrix(c( -0.3, -0.05,\n -0.1, -0.30), nrow = 2, ncol = 2, byrow = TRUE)\nz <- mfilter(y, ar, \"recursive\")\nmarkov(z)\n\n\n"} {"package":"timsac","topic":"mfilter","snippet":"### Name: mfilter\n### Title: Linear Filtering on a Multivariate Time Series\n### Aliases: mfilter\n### Keywords: ts\n\n### ** Examples\n\n#AR model simulation\nar <- array(0, dim = c(3,3,2))\nar[, , 1] <- matrix(c(0.4, 0, 0.3,\n 0.2, -0.1, -0.5,\n 0.3, 0.1, 0), nrow = 3, ncol = 3, byrow = TRUE)\nar[, , 2] <- matrix(c(0, -0.3, 0.5,\n 0.7, -0.4, 1,\n 0, -0.5, 0.3), nrow = 3, ncol = 3, byrow = TRUE)\nx <- matrix(rnorm(100*3), nrow = 100, ncol = 3)\ny <- mfilter(x, ar, \"recursive\")\n\n#Back to white noise\nma <- array(0, dim = c(3,3,3))\nma[, , 1] <- diag(3)\nma[, , 2] <- -ar[, , 1]\nma[, , 3] <- -ar[, , 2]\nz <- mfilter(y, ma, \"convolution\")\nmulcor(z)\n\n#AR-MA model simulation\nx <- matrix(rnorm(1000*2), nrow = 1000, ncol = 2)\nma <- array(0, dim = c(2,2,2))\nma[, , 1] <- matrix(c( -1.0, 0.0,\n 0.0, -1.0), nrow = 2, ncol = 2, byrow = TRUE)\nma[, , 2] <- matrix(c( -0.2, 0.0,\n -0.1, -0.3), nrow = 2, ncol = 2, byrow = TRUE)\ny <- mfilter(x, ma, \"convolution\")\n\nar <- array(0, dim = c(2,2,3))\nar[, , 1] <- matrix(c( -1.0, 0.0,\n 0.0, -1.0), nrow = 2, ncol = 2, byrow = TRUE)\nar[, , 2] <- matrix(c( -0.5, -0.2,\n -0.2, -0.5), nrow = 2, ncol = 2, byrow = TRUE)\nar[, , 3] <- matrix(c( -0.3, -0.05,\n -0.1, -0.30), nrow = 2, ncol = 2, byrow = TRUE)\nz <- mfilter(y, ar, \"recursive\")\n\n\n"} {"package":"timsac","topic":"mlocar","snippet":"### Name: mlocar\n### Title: Minimum AIC Method of Locally Stationary AR Model Fitting;\n### Scalar Case\n### Aliases: mlocar\n### Keywords: ts\n\n### ** Examples\n\ndata(locarData)\nz <- mlocar(locarData, max.order = 10, span = 300, const = 0)\nz$arcoef\n\n\n"} {"package":"timsac","topic":"mlomar","snippet":"### Name: mlomar\n### Title: Minimum AIC Method of Locally Stationary Multivariate AR Model\n### Fitting\n### Aliases: mlomar print.mlomar\n### Keywords: ts\n\n### ** Examples\n\ndata(Amerikamaru)\nmlomar(Amerikamaru, max.order = 10, span = 300, const = 0)\n\n\n"} {"package":"timsac","topic":"mulbar","snippet":"### Name: mulbar\n### Title: Multivariate Bayesian Method of AR Model Fitting\n### Aliases: mulbar\n### Keywords: ts\n\n### ** Examples\n\ndata(Powerplant)\nz <- mulbar(Powerplant, max.order = 10)\nz$pacoef.for\nz$pacoef.back\n\n\n"} {"package":"timsac","topic":"mulcor","snippet":"### Name: mulcor\n### Title: Multiple Correlation\n### Aliases: mulcor print.mulcor\n### Keywords: ts\n\n### ** Examples\n\n# Example 1 \ny <- rnorm(1000)\ndim(y) <- c(500,2)\nmulcor(y, lag_axis = FALSE)\n\n# Example 2\nxorg <- rnorm(1003)\nx <- matrix(0, nrow = 1000, ncol = 2)\nx[, 1] <- xorg[1:1000]\nx[, 2] <- xorg[4:1003] + 0.5*rnorm(1000)\nmulcor(x, lag = 20)\n\n\n"} {"package":"timsac","topic":"mulfrf","snippet":"### Name: mulfrf\n### Title: Frequency Response Function (Multiple Channel)\n### Aliases: mulfrf\n### Keywords: ts\n\n### ** Examples\n\nar <- array(0, dim = c(3,3,2))\nar[, , 1] <- matrix(c(0.4, 0, 0.3,\n 0.2, -0.1, -0.5,\n 0.3, 0.1, 0), nrow = 3, ncol = 3, byrow = TRUE)\nar[, , 2] <- matrix(c(0, -0.3, 0.5,\n 0.7, -0.4, 1,\n 0, -0.5, 0.3), nrow = 3, ncol = 3, byrow = TRUE)\nx <- matrix(rnorm(200*3), nrow = 200, ncol = 3)\ny <- mfilter(x, ar, \"recursive\")\nmulfrf(y, lag = 20)\n\n\n"} {"package":"timsac","topic":"mulmar","snippet":"### Name: mulmar\n### Title: Multivariate Case of Minimum AIC Method of AR Model Fitting\n### Aliases: mulmar\n### Keywords: ts\n\n### ** Examples\n\n# Example 1\ndata(Powerplant)\nz <- mulmar(Powerplant, max.order = 10)\nz$arcoef\n\n# Example 2\nar <- array(0, dim = c(3,3,2))\nar[, , 1] <- matrix(c(0.4, 0, 0.3,\n 0.2, -0.1, -0.5,\n 0.3, 0.1, 0), nrow = 3, ncol = 3, byrow = TRUE)\nar[, , 2] <- matrix(c(0, -0.3, 0.5,\n 0.7, -0.4, 1,\n 0, -0.5, 0.3), nrow = 3, ncol = 3,byrow = TRUE)\nx <- matrix(rnorm(200*3), nrow = 200, ncol = 3)\ny <- mfilter(x, ar, \"recursive\")\nz <- mulmar(y, max.order = 10)\nz$arcoef\n\n\n"} {"package":"timsac","topic":"mulnos","snippet":"### Name: mulnos\n### Title: Relative Power Contribution\n### Aliases: mulnos\n### Keywords: ts\n\n### ** Examples\n\nar <- array(0, dim = c(3,3,2))\nar[, , 1] <- matrix(c(0.4, 0, 0.3,\n 0.2, -0.1, -0.5,\n 0.3, 0.1, 0), nrow = 3, ncol = 3, byrow = TRUE)\nar[, , 2] <- matrix(c(0, -0.3, 0.5,\n 0.7, -0.4, 1,\n 0, -0.5, 0.3), nrow = 3, ncol = 3, byrow = TRUE)\nx <- matrix(rnorm(200*3), nrow = 200, ncol = 3)\ny <- mfilter(x, ar, \"recursive\")\nmulnos(y, max.order = 10, h = 20)\n\n\n"} {"package":"timsac","topic":"mulrsp","snippet":"### Name: mulrsp\n### Title: Multiple Rational Spectrum\n### Aliases: mulrsp\n### Keywords: ts\n\n### ** Examples\n\n# Example 1 for the normal distribution\nxorg <- rnorm(1003)\nx <- matrix(0, nrow = 1000, ncol = 2)\nx[, 1] <- xorg[1:1000]\nx[, 2] <- xorg[4:1003] + 0.5*rnorm(1000)\naaa <- ar(x)\nmulrsp(h = 20, d = 2, cov = aaa$var.pred, ar = aaa$ar)\n\n# Example 2 for the AR model\nar <- array(0, dim = c(3,3,2))\nar[, , 1] <- matrix(c(0.4, 0, 0.3,\n 0.2, -0.1, -0.5,\n 0.3, 0.1, 0), nrow = 3, ncol = 3, byrow = TRUE)\nar[, , 2] <- matrix(c(0, -0.3, 0.5,\n 0.7, -0.4, 1,\n 0, -0.5, 0.3), nrow = 3, ncol = 3, byrow = TRUE)\nx <- matrix(rnorm(200*3), nrow = 200, ncol = 3)\ny <- mfilter(x, ar, \"recursive\")\nz <- fpec(y, max.order = 10)\nmulrsp(h = 20, d = 3, cov = z$perr, ar = z$arcoef)\n\n\n"} {"package":"timsac","topic":"mulspe","snippet":"### Name: mulspe\n### Title: Multiple Spectrum\n### Aliases: mulspe ptint.mulspe\n### Keywords: ts\n\n### ** Examples\n\nsgnl <- rnorm(1003)\nx <- matrix(0, nrow = 1000, ncol = 2)\nx[, 1] <- sgnl[4:1003]\n# x[i,2] = 0.9*x[i-3,1] + 0.2*N(0,1)\nx[, 2] <- 0.9*sgnl[1:1000] + 0.2*rnorm(1000)\nmulspe(x, lag = 100, window = \"Hanning\")\n\n\n"} {"package":"timsac","topic":"nonst","snippet":"### Name: nonst\n### Title: Non-stationary Power Spectrum Analysis\n### Aliases: nonst\n### Keywords: ts\n\n### ** Examples\n\n# Non-stationary Test Data\ndata(nonstData)\nnonst(nonstData, span = 700, max.order = 49)\n\n\n"} {"package":"timsac","topic":"optdes","snippet":"### Name: optdes\n### Title: Optimal Controller Design\n### Aliases: optdes\n### Keywords: ts\n\n### ** Examples\n\n# Multivariate Example Data\nar <- array(0, dim = c(3,3,2))\nar[, , 1] <- matrix(c(0.4, 0, 0.3,\n 0.2, -0.1, -0.5,\n 0.3, 0.1, 0), nrow= 3, ncol= 3, byrow = TRUE)\nar[, , 2] <- matrix(c(0, -0.3, 0.5,\n 0.7, -0.4, 1,\n 0, -0.5, 0.3), nrow= 3, ncol= 3, byrow = TRUE)\nx <- matrix(rnorm(200*3), nrow = 200, ncol = 3)\ny <- mfilter(x, ar, \"recursive\")\nq.mat <- matrix(c(0.16,0,0,0.09), nrow = 2, ncol = 2)\nr.mat <- as.matrix(0.001)\noptdes(y, ns = 20, q = q.mat, r = r.mat)\n\n\n"} {"package":"timsac","topic":"optsim","snippet":"### Name: optsim\n### Title: Optimal Control Simulation\n### Aliases: optsim\n### Keywords: ts\n\n### ** Examples\n\n# Multivariate Example Data\nar <- array(0, dim = c(3,3,2))\nar[, , 1] <- matrix(c(0.4, 0, 0.3,\n 0.2, -0.1, -0.5,\n 0.3, 0.1, 0), nrow = 3, ncol = 3, byrow = TRUE)\nar[, , 2] <- matrix(c(0, -0.3, 0.5,\n 0.7, -0.4, 1,\n 0, -0.5, 0.3), nrow = 3, ncol = 3, byrow = TRUE)\nx <- matrix(rnorm(200*3), nrow = 200, ncol = 3)\ny <- mfilter(x, ar, \"recursive\")\nq.mat <- matrix(c(0.16,0,0,0.09), nrow = 2, ncol = 2)\nr.mat <- as.matrix(0.001)\noptsim(y, max.order = 10, ns = 20, q = q.mat, r = r.mat, len = 20)\n\n\n"} {"package":"timsac","topic":"perars","snippet":"### Name: perars\n### Title: Periodic Autoregression for a Scalar Time Series\n### Aliases: perars print.perars\n### Keywords: ts\n\n### ** Examples\n\ndata(Airpollution)\nperars(Airpollution, ni = 6, lag = 2, ksw = 1)\n\n\n"} {"package":"timsac","topic":"prdctr","snippet":"### Name: prdctr\n### Title: Prediction Program\n### Aliases: prdctr print.prdctr\n### Keywords: ts\n\n### ** Examples\n\n# \"arima.sim\" is a function in \"stats\".\n# Note that the sign of MA coefficient is opposite from that in \"timsac\".\ny <- arima.sim(list(order=c(2,0,1), ar=c(0.64,-0.8), ma=c(-0.5)), n = 1000)\ny1 <- y[1:900]\nz <- autoarmafit(y1)\nar <- z$model[[1]]$arcoef\nma <- z$model[[1]]$macoef\nvar <- z$model[[1]]$v\ny2 <- y[901:990]\nprdctr(y2, r = 50, s = 90, h = 10, arcoef = ar, macoef = ma, v = var)\n\n\n"} {"package":"timsac","topic":"raspec","snippet":"### Name: raspec\n### Title: Rational Spectrum\n### Aliases: raspec\n### Keywords: ts\n\n### ** Examples\n\n# Example 1 for the AR model\nraspec(h = 100, var = 1, arcoef = c(0.64,-0.8))\n\n# Example 2 for the MA model\nraspec(h = 20, var = 1, macoef = c(0.64,-0.8))\n\n\n"} {"package":"timsac","topic":"sglfre","snippet":"### Name: sglfre\n### Title: Frequency Response Function (Single Channel)\n### Aliases: sglfre\n### Keywords: ts\n\n### ** Examples\n\nar <- array(0, dim = c(3,3,2))\nar[, , 1] <- matrix(c(0.4, 0, 0.3,\n 0.2, -0.1, -0.5,\n 0.3, 0.1, 0), nrow = 3, ncol = 3, byrow = TRUE)\nar[, , 2] <- matrix(c(0, -0.3, 0.5,\n 0.7, -0.4, 1,\n 0, -0.5, 0.3), nrow = 3, ncol = 3, byrow = TRUE)\nx <- matrix(rnorm(200*3), nrow = 200, ncol = 3)\ny <- mfilter(x, ar, \"recursive\")\nsglfre(y, lag = 20, invar = 1, outvar = 2)\n\n\n"} {"package":"timsac","topic":"simcon","snippet":"### Name: simcon\n### Title: Optimal Controller Design and Simulation\n### Aliases: simcon\n### Keywords: ts\n\n### ** Examples\n\nx <- matrix(rnorm(1000*2), nrow = 1000, ncol = 2)\nma <- array(0, dim = c(2,2,2))\nma[, , 1] <- matrix(c( -1.0, 0.0,\n 0.0, -1.0), nrow = 2, ncol = 2, byrow = TRUE)\nma[, , 2] <- matrix(c( -0.2, 0.0,\n -0.1, -0.3), nrow = 2, ncol = 2, byrow = TRUE)\ny <- mfilter(x, ma, \"convolution\")\n\nar <- array(0, dim = c(2,2,3))\nar[, , 1] <- matrix(c( -1.0, 0.0,\n 0.0, -1.0), nrow = 2, ncol = 2, byrow = TRUE)\nar[, , 2] <- matrix(c( -0.5, -0.2,\n -0.2, -0.5), nrow = 2, ncol = 2, byrow = TRUE)\nar[, , 3] <- matrix(c( -0.3, -0.05,\n -0.1, -0.3), nrow = 2, ncol = 2, byrow = TRUE)\ny <- mfilter(y, ar, \"recursive\")\n\nz <- markov(y)\nweight <- matrix(c(0.0002, 0.0,\n 0.0, 2.9 ), nrow = 2, ncol = 2, byrow = TRUE)\nsimcon(span = 50, len = 700, r = 1, z$arcoef, z$impulse, z$v, weight)\n\n\n"} {"package":"timsac","topic":"thirmo","snippet":"### Name: thirmo\n### Title: Third Order Moments\n### Aliases: thirmo\n### Keywords: ts\n\n### ** Examples\n\ndata(bispecData)\nz <- thirmo(bispecData, lag = 30)\nz$tmomnt\n\n\n"} {"package":"timsac","topic":"unibar","snippet":"### Name: unibar\n### Title: Univariate Bayesian Method of AR Model Fitting\n### Aliases: unibar\n### Keywords: ts\n\n### ** Examples\n\ndata(Canadianlynx)\nz <- unibar(Canadianlynx, ar.order = 20)\nz$arcoef\n\n\n"} {"package":"timsac","topic":"unimar","snippet":"### Name: unimar\n### Title: Univariate Case of Minimum AIC Method of AR Model Fitting\n### Aliases: unimar\n### Keywords: ts\n\n### ** Examples\n\ndata(Canadianlynx)\nz <- unimar(Canadianlynx, max.order = 20)\nz$arcoef\n\n\n"} {"package":"timsac","topic":"wnoise","snippet":"### Name: wnoise\n### Title: White Noise Generator\n### Aliases: wnoise\n### Keywords: ts\n\n### ** Examples\n\n# Example 1\nwnoise(len = 100, perr = 1)\n\n# Example 2\nv <- matrix(c(1, 0, 0,\n 0, 2, 0,\n 0, 0, 3), nrow = 3, ncol = 3, byrow = TRUE)\nwnoise(len = 20, perr = v)\n\n\n"} {"package":"timsac","topic":"xsarma","snippet":"### Name: xsarma\n### Title: Exact Maximum Likelihood Method of Scalar ARMA Model Fitting\n### Aliases: xsarma\n### Keywords: ts\n\n### ** Examples\n\n# \"arima.sim\" is a function in \"stats\".\n# Note that the sign of MA coefficient is opposite from that in \"timsac\".\narcoef <- c(1.45, -0.9)\nmacoef <- c(-0.5)\ny <- arima.sim(list(order=c(2,0,1), ar=arcoef, ma=macoef), n = 100)\narcoefi <- c(1.5, -0.8)\nmacoefi <- c(0.0)\nz <- xsarma(y, arcoefi, macoefi)\nz$arcoef\nz$macoef\n\n\n"} {"package":"rbi.helpers","topic":"DIC","snippet":"### Name: DIC\n### Title: Compute Deviance Information Criterion (DIC) for a libbi model\n### Aliases: DIC DIC.libbi\n\n### ** Examples\n\nexample_run <- rbi::bi_read(\n system.file(package = \"rbi\", \"example_output.nc\")\n)\nexample_model_file <- system.file(package = \"rbi\", \"PZ.bi\")\nexample_bi <- rbi::attach_data(\n rbi::libbi(example_model_file), \"output\", example_run\n)\nDIC(example_bi)\n\n\n"} {"package":"rbi.helpers","topic":"acceptance_rate","snippet":"### Name: acceptance_rate\n### Title: Compute acceptance rate\n### Aliases: acceptance_rate\n\n### ** Examples\n\nexample_run <- rbi::bi_read(\n system.file(package = \"rbi.helpers\", \"example_run.nc\")\n)\nexample_model_file <- system.file(package = \"rbi\", \"PZ.bi\")\nexample_bi <- rbi::attach_data(\n rbi::libbi(example_model_file), \"output\", example_run\n)\nacceptance_rate(example_bi)\n\n\n"} {"package":"rbi.helpers","topic":"adapt_particles","snippet":"### Name: adapt_particles\n### Title: Adapt the number of particles\n### Aliases: adapt_particles\n\n### ** Examples\n\nexample_obs <- rbi::bi_read(system.file(package=\"rbi\", \"example_dataset.nc\"))\nexample_model <- rbi::bi_model(system.file(package=\"rbi\", \"PZ.bi\"))\nexample_bi <- rbi::libbi(model = example_model, obs = example_obs)\nobs_states <- rbi::var_names(example_model, type = \"obs\")\nmax_time <- max(vapply(example_obs[obs_states], function(x) {\n max(x[[\"time\"]])\n}, 0))\n## Not run: \n##D adapted <- adapt_particles(example_bi, nsamples = 128, end_time = max_time)\n## End(Not run)\n\n\n"} {"package":"rbi.helpers","topic":"adapt_proposal","snippet":"### Name: adapt_proposal\n### Title: Adapt the proposal distribution of MCMC using the covariance of\n### samples\n### Aliases: adapt_proposal\n\n### ** Examples\n\nexample_obs <- rbi::bi_read(system.file(package=\"rbi\", \"example_dataset.nc\"))\nexample_model <- rbi::bi_model(system.file(package=\"rbi\", \"PZ.bi\"))\nexample_bi <- rbi::libbi(model = example_model, obs = example_obs)\nobs_states <- rbi::var_names(example_model, type=\"obs\")\nmax_time <- max(vapply(example_obs[obs_states], function(x) {\n max(x[[\"time\"]])\n}, 0))\n# adapt to acceptance rate between 0.1 and 0.5\n## Not run: \n##D adapted <- adapt_proposal(example_bi,\n##D nsamples = 100, end_time = max_time,\n##D min = 0.1, max = 0.5, nparticles = 256, correlations = TRUE\n##D )\n## End(Not run)\n\n\n"} {"package":"RegressionFactory","topic":"fbase1.binomial.logit","snippet":"### Name: fbase1.binomial.logit\n### Title: Single-Parameter Base Log-likelihood Function(s) for Binomial\n### GLM\n### Aliases: fbase1.binomial.logit fbase1.binomial.probit\n### fbase1.binomial.cauchit fbase1.binomial.cloglog\n\n### ** Examples\n\n## Not run: \n##D library(sns)\n##D library(MfUSampler)\n##D \n##D # using the expander framework and binomial base log-likelihood\n##D # to define log-likelihood function for binary logit regression\n##D loglike.logit <- function(beta, X, y, fgh) {\n##D regfac.expand.1par(beta, X, y, fbase1.binomial.logit, fgh, n=1)\n##D }\n##D \n##D # generate data for logistic regression\n##D N <- 1000\n##D K <- 5\n##D X <- matrix(runif(N*K, min=-0.5, max=+0.5), ncol=K)\n##D beta <- runif(K, min=-0.5, max=+0.5)\n##D y <- 1*(runif(N) < 1.0/(1+exp(-X%*%beta)))\n##D \n##D # obtaining glm coefficients for comparison\n##D beta.glm <- glm(y~X-1, family=\"binomial\")$coefficients\n##D \n##D # mcmc sampling of log-likelihood\n##D nsmp <- 100\n##D \n##D # Slice Sampler (no derivatives needed)\n##D beta.smp <- array(NA, dim=c(nsmp,K)) \n##D beta.tmp <- rep(0,K)\n##D for (n in 1:nsmp) {\n##D beta.tmp <- MfU.Sample(beta.tmp\n##D , f=function(beta, X, y) loglike.logit(beta, X, y, fgh=0), X=X, y=y)\n##D beta.smp[n,] <- beta.tmp\n##D }\n##D beta.slice <- colMeans(beta.smp[(nsmp/2+1):nsmp,])\n##D \n##D # Adaptive Rejection Sampler\n##D # (only first derivative needed)\n##D beta.smp <- array(NA, dim=c(nsmp,K)) \n##D beta.tmp <- rep(0,K)\n##D for (n in 1:nsmp) {\n##D beta.tmp <- MfU.Sample(beta.tmp, uni.sampler=\"ars\"\n##D , f=function(beta, X, y, grad) {\n##D if (grad)\n##D loglike.logit(beta, X, y, fgh=1)$g\n##D else\n##D loglike.logit(beta, X, y, fgh=0)\n##D }\n##D , X=X, y=y)\n##D beta.smp[n,] <- beta.tmp\n##D }\n##D beta.ars <- colMeans(beta.smp[(nsmp/2+1):nsmp,])\n##D \n##D # SNS (Stochastic Newton Sampler)\n##D # (both first and second derivative needed)\n##D beta.smp <- array(NA, dim=c(nsmp,K)) \n##D beta.tmp <- rep(0,K)\n##D for (n in 1:nsmp) {\n##D beta.tmp <- sns(beta.tmp, fghEval=loglike.logit, X=X, y=y, fgh=2)\n##D beta.smp[n,] <- beta.tmp\n##D }\n##D beta.sns <- colMeans(beta.smp[(nsmp/2+1):nsmp,])\n##D \n##D # compare results\n##D cbind(beta.glm, beta.slice, beta.ars, beta.sns)\n## End(Not run)\n\n\n"} {"package":"RegressionFactory","topic":"fbase1.exponential.log","snippet":"### Name: fbase1.exponential.log\n### Title: Single-Parameter Base Log-likelihood Function for Exponential\n### GLM\n### Aliases: fbase1.exponential.log\n\n### ** Examples\n\n## Not run: \n##D library(sns)\n##D library(MfUSampler)\n##D \n##D # using the expander framework and base distributions to define\n##D # log-likelihood function for exponential regression\n##D loglike.exponential <- function(beta, X, y, fgh) {\n##D regfac.expand.1par(beta, X, y, fbase1.exponential.log, fgh)\n##D }\n##D \n##D # generate data for exponential regression\n##D N <- 1000\n##D K <- 5\n##D X <- matrix(runif(N*K, min=-0.5, max=+0.5), ncol=K)\n##D beta <- runif(K, min=-0.5, max=+0.5)\n##D y <- rexp(N, rate = exp(-X%*%beta))\n##D \n##D # mcmc sampling of log-likelihood\n##D nsmp <- 100\n##D \n##D # Slice Sampler (no derivatives needed)\n##D beta.smp <- array(NA, dim=c(nsmp,K)) \n##D beta.tmp <- rep(0,K)\n##D for (n in 1:nsmp) {\n##D beta.tmp <- MfU.Sample(beta.tmp\n##D , f=loglike.exponential, X=X, y=y, fgh=0)\n##D beta.smp[n,] <- beta.tmp\n##D }\n##D beta.slice <- colMeans(beta.smp[(nsmp/2+1):nsmp,])\n##D \n##D # Adaptive Rejection Sampler\n##D # (only first derivative needed)\n##D beta.smp <- array(NA, dim=c(nsmp,K)) \n##D beta.tmp <- rep(0,K)\n##D for (n in 1:nsmp) {\n##D beta.tmp <- MfU.Sample(beta.tmp, uni.sampler=\"ars\"\n##D , f=function(beta, X, y, grad) {\n##D if (grad)\n##D loglike.exponential(beta, X, y, fgh=1)$g\n##D else\n##D loglike.exponential(beta, X, y, fgh=0)\n##D }\n##D , X=X, y=y)\n##D beta.smp[n,] <- beta.tmp\n##D }\n##D beta.ars <- colMeans(beta.smp[(nsmp/2+1):nsmp,])\n##D \n##D # SNS (Stochastic Newton Sampler)\n##D # (both first and second derivative needed)\n##D beta.smp <- array(NA, dim=c(nsmp,K)) \n##D beta.tmp <- rep(0,K)\n##D for (n in 1:nsmp) {\n##D beta.tmp <- sns(beta.tmp, fghEval=loglike.exponential, X=X, y=y, fgh=2)\n##D beta.smp[n,] <- beta.tmp\n##D }\n##D beta.sns <- colMeans(beta.smp[(nsmp/2+1):nsmp,])\n##D \n##D # compare results\n##D cbind(beta, beta.slice, beta.ars, beta.sns)\n##D \n## End(Not run)\n\n\n"} {"package":"RegressionFactory","topic":"fbase1.geometric.logit","snippet":"### Name: fbase1.geometric.logit\n### Title: Single-Parameter Base Log-likelihood Function for Exponential\n### GLM\n### Aliases: fbase1.geometric.logit\n\n### ** Examples\n\n## Not run: \n##D library(sns)\n##D library(MfUSampler)\n##D \n##D # using the expander framework and base distributions to define\n##D # log-likelihood function for geometric regression\n##D loglike.geometric <- function(beta, X, y, fgh) {\n##D regfac.expand.1par(beta, X, y, fbase1.geometric.logit, fgh)\n##D }\n##D \n##D # generate data for geometric regression\n##D N <- 1000\n##D K <- 5\n##D X <- matrix(runif(N*K, min=-0.5, max=+0.5), ncol=K)\n##D beta <- runif(K, min=-0.5, max=+0.5)\n##D y <- rgeom(N, prob = 1/(1+exp(-X%*%beta)))\n##D \n##D # mcmc sampling of log-likelihood\n##D nsmp <- 100\n##D \n##D # Slice Sampler\n##D beta.smp <- array(NA, dim=c(nsmp,K)) \n##D beta.tmp <- rep(0,K)\n##D for (n in 1:nsmp) {\n##D beta.tmp <- MfU.Sample(beta.tmp\n##D , f=loglike.geometric, X=X, y=y, fgh=0)\n##D beta.smp[n,] <- beta.tmp\n##D }\n##D beta.slice <- colMeans(beta.smp[(nsmp/2+1):nsmp,])\n##D \n##D # Adaptive Rejection Sampler\n##D beta.smp <- array(NA, dim=c(nsmp,K)) \n##D beta.tmp <- rep(0,K)\n##D for (n in 1:nsmp) {\n##D beta.tmp <- MfU.Sample(beta.tmp, uni.sampler=\"ars\"\n##D , f=function(beta, X, y, grad) {\n##D if (grad)\n##D loglike.geometric(beta, X, y, fgh=1)$g\n##D else\n##D loglike.geometric(beta, X, y, fgh=0)\n##D }\n##D , X=X, y=y)\n##D beta.smp[n,] <- beta.tmp\n##D }\n##D beta.ars <- colMeans(beta.smp[(nsmp/2+1):nsmp,])\n##D \n##D # SNS (Stochastic Newton Sampler)\n##D beta.smp <- array(NA, dim=c(nsmp,K)) \n##D beta.tmp <- rep(0,K)\n##D for (n in 1:nsmp) {\n##D beta.tmp <- sns(beta.tmp, fghEval=loglike.geometric, X=X, y=y, fgh=2, rnd = n>nsmp/4)\n##D beta.smp[n,] <- beta.tmp\n##D }\n##D beta.sns <- colMeans(beta.smp[(nsmp/2+1):nsmp,])\n##D \n##D # compare sample averages with actual values\n##D cbind(beta, beta.sns, beta.slice, beta.ars)\n## End(Not run)\n\n\n"} {"package":"RegressionFactory","topic":"fbase1.poisson.log","snippet":"### Name: fbase1.poisson.log\n### Title: Single-Parameter Base Log-likelihood Function for Poisson GLM\n### Aliases: fbase1.poisson.log\n\n### ** Examples\n\n## Not run: \n##D library(sns)\n##D library(MfUSampler)\n##D \n##D # using the expander framework and base distributions to define\n##D # log-likelihood function for Poisson regression\n##D loglike.poisson <- function(beta, X, y, fgh) {\n##D regfac.expand.1par(beta, X, y, fbase1.poisson.log, fgh)\n##D }\n##D \n##D # generate data for Poisson regression\n##D N <- 1000\n##D K <- 5\n##D X <- matrix(runif(N*K, min=-0.5, max=+0.5), ncol=K)\n##D beta <- runif(K, min=-0.5, max=+0.5)\n##D y <- rpois(N, lambda = exp(X%*%beta))\n##D \n##D # obtaining glm coefficients for comparison\n##D beta.glm <- glm(y~X-1, family=\"poisson\")$coefficients\n##D \n##D # mcmc sampling of log-likelihood\n##D nsmp <- 100\n##D \n##D # Slice Sampler (no derivatives needed)\n##D beta.smp <- array(NA, dim=c(nsmp,K)) \n##D beta.tmp <- rep(0,K)\n##D for (n in 1:nsmp) {\n##D beta.tmp <- MfU.Sample(beta.tmp\n##D , f=loglike.poisson, X=X, y=y, fgh=0)\n##D beta.smp[n,] <- beta.tmp\n##D }\n##D beta.slice <- colMeans(beta.smp[(nsmp/2+1):nsmp,])\n##D \n##D # Adaptive Rejection Sampler\n##D # (only first derivative needed)\n##D beta.smp <- array(NA, dim=c(nsmp,K)) \n##D beta.tmp <- rep(0,K)\n##D for (n in 1:nsmp) {\n##D beta.tmp <- MfU.Sample(beta.tmp, uni.sampler=\"ars\"\n##D , f=function(beta, X, y, grad) {\n##D if (grad)\n##D loglike.poisson(beta, X, y, fgh=1)$g\n##D else\n##D loglike.poisson(beta, X, y, fgh=0)\n##D }\n##D , X=X, y=y)\n##D beta.smp[n,] <- beta.tmp\n##D }\n##D beta.ars <- colMeans(beta.smp[(nsmp/2+1):nsmp,])\n##D \n##D # SNS (Stochastic Newton Sampler)\n##D # (both first and second derivative needed)\n##D beta.smp <- array(NA, dim=c(nsmp,K)) \n##D beta.tmp <- rep(0,K)\n##D for (n in 1:nsmp) {\n##D beta.tmp <- sns(beta.tmp, fghEval=loglike.poisson, X=X, y=y, fgh=2, rnd = n>nsmp/4)\n##D beta.smp[n,] <- beta.tmp\n##D }\n##D beta.sns <- colMeans(beta.smp[(nsmp/2+1):nsmp,])\n##D \n##D # compare results\n##D cbind(beta.glm, beta.slice, beta.ars, beta.sns)\n## End(Not run)\n\n\n"} {"package":"RegressionFactory","topic":"fbase2.gamma.log.log","snippet":"### Name: fbase2.gamma.log.log\n### Title: Double-Parameter Base Log-likelihood Function for Gamma GLM\n### Aliases: fbase2.gamma.log.log\n\n### ** Examples\n\n## Not run: \n##D # we use this library for univariate slice sampling\n##D # of multivariate distributions\n##D library(MfUSampler)\n##D library(dglm)\n##D \n##D # simulating data according to assumed generative model\n##D # we assume log link functions for both mean and dispersion\n##D # given variance function V(mu) = mu^2, we have:\n##D # log(mu) = X%*%beta\n##D # log(phi) = X%*%gamma\n##D N <- 10000\n##D K <- 5\n##D X <- cbind(1,matrix(runif(N*(K-1), min=-0.5, max=+0.5), ncol=K-1))\n##D beta <- runif(K, min=0.0, max=+1.0)\n##D gamma <- runif(K, min=0.0, max=+1.0)\n##D shape.vec <- 1 / exp(X%*%gamma)\n##D rate.vec <- 1 / exp(X%*%gamma + X%*%beta)\n##D y <- rgamma(N, shape = shape.vec, rate = rate.vec)\n##D # implied dispersion:\n##D dispersion.vec <- 1 / shape.vec\n##D \n##D # model estimation using dglm package\n##D reg.dglm <- dglm(y~X-1, dformula = ~X-1, family=Gamma(link=\"log\"), dlink = \"log\")\n##D beta.dglm <- reg.dglm$coefficients\n##D gamma.dglm <- reg.dglm$dispersion.fit$coefficients\n##D \n##D # model estimation using RegressionFactory\n##D # (with univariate slice sampling)\n##D # defining the log-likelihood using the expander framework\n##D # assumng same covariates for both slots, hence we set Z=X\n##D # slice sampler does not need derivatives, hence we set fgh=0\n##D loglike.gamma <- function(coeff, X, y) {\n##D regfac.expand.2par(coeff, X=X, Z=X, y=y, fbase2=fbase2.gamma.log.log, fgh=0)\n##D }\n##D nsmp <- 100\n##D coeff.smp <- array(NA, dim=c(nsmp, 2*K)) \n##D coeff.tmp <- rep(0.1, 2*K)\n##D for (n in 1:nsmp) {\n##D coeff.tmp <- MfU.Sample(coeff.tmp, f=loglike.gamma, X=X, y=y)\n##D coeff.smp[n,] <- coeff.tmp\n##D }\n##D beta.slice <- colMeans(coeff.smp[(nsmp/2+1):nsmp, 1:K])\n##D gamma.slice <- colMeans(coeff.smp[(nsmp/2+1):nsmp, K+1:K])\n##D \n##D # compare results\n##D cbind(beta.dglm, beta.slice)\n##D cbind(gamma.dglm, gamma.slice)\n##D \n## End(Not run)\n\n\n"} {"package":"RegressionFactory","topic":"fbase2.gaussian.identity.log","snippet":"### Name: fbase2.gaussian.identity.log\n### Title: Double-Parameter Base Log-likelihood Function for Gaussian GLM\n### Aliases: fbase2.gaussian.identity.log\n\n### ** Examples\n\n## Not run: \n##D library(sns)\n##D library(MfUSampler)\n##D library(dglm)\n##D \n##D # defining log-likelihood function\n##D # vd==FALSE leads to constant-dispersion model (ordinary linear regression)\n##D # while vd==TRUE produces varying-dispersion model\n##D loglike.linreg <- function(coeff, X, y, fgh, block.diag = F, vd = F) {\n##D if (vd) regfac.expand.2par(coeff = coeff, X = X, Z = X, y = y\n##D , fbase2 = fbase2.gaussian.identity.log, fgh = fgh, block.diag = block.diag)\n##D else regfac.expand.2par(coeff = coeff, X = X, y = y\n##D , fbase2 = fbase2.gaussian.identity.log, fgh = fgh, block.diag = block.diag)\n##D }\n##D \n##D # simulating data according to generative model\n##D N <- 1000 # number of observations\n##D K <- 5 # number of covariates\n##D X <- matrix(runif(N*K, min=-0.5, max=+0.5), ncol=K)\n##D beta <- runif(K, min=-0.5, max=+0.5)\n##D gamma <- runif(K, min=-0.5, max=+0.5)\n##D mean.vec <- X%*%beta\n##D sd.vec <- exp(X%*%gamma)\n##D y <- rnorm(N, mean.vec, sd.vec)\n##D \n##D # constant-dispersion model\n##D # estimation using glm\n##D est.glm <- lm(y~X-1)\n##D beta.glm <- est.glm$coefficients\n##D sigma.glm <- summary(est.glm)$sigma\n##D # estimation using RegressionFactory\n##D # (we set rnd=F in sns to allow for better comparison with glm)\n##D nsmp <- 20\n##D coeff.smp <- array(NA, dim=c(nsmp, K+1)) \n##D coeff.tmp <- rep(0, K+1)\n##D for (n in 1:nsmp) {\n##D coeff.tmp <- sns(coeff.tmp, fghEval=loglike.linreg\n##D , X=X, y=y, fgh=2, block.diag = F, vd = F, rnd = F)\n##D coeff.smp[n,] <- coeff.tmp\n##D }\n##D beta.regfac.cd <- colMeans(coeff.smp[(nsmp/2+1):nsmp, 1:K])\n##D sigma.regfac.cd <- sqrt(exp(mean(coeff.smp[(nsmp/2+1):nsmp, K+1])))\n##D # comparing glm and RegressionFactory results\n##D # beta's must match exactly between glm and RegressionFactory\n##D cbind(beta, beta.glm, beta.regfac.cd)\n##D # sigma's won't match exactly\n##D cbind(mean(sd.vec), sigma.glm, sigma.regfac.cd)\n##D \n##D # varying-dispersion model\n##D # estimation using dglm\n##D est.dglm <- dglm(y~X-1, dformula = ~X-1, family = \"gaussian\", dlink = \"log\")\n##D beta.dglm <- est.dglm$coefficients\n##D gamma.dglm <- est.dglm$dispersion.fit$coefficients\n##D # estimation using RegressionFactory\n##D coeff.smp <- array(NA, dim=c(nsmp, 2*K)) \n##D coeff.tmp <- rep(0, 2*K)\n##D for (n in 1:nsmp) {\n##D coeff.tmp <- sns(coeff.tmp, fghEval=loglike.linreg\n##D , X=X, y=y, fgh=2, block.diag = F, vd = T, rnd = F)\n##D coeff.smp[n,] <- coeff.tmp\n##D }\n##D beta.regfac.vd <- colMeans(coeff.smp[(nsmp/2+1):nsmp, 1:K])\n##D gamma.regfac.vd <- colMeans(coeff.smp[(nsmp/2+1):nsmp, K+1:K])\n##D # comparing dglm and RegressionFactory results\n##D # neither beta's nor gamma's will match exactly\n##D cbind(beta, beta.dglm, beta.regfac.vd)\n##D cbind(gamma, gamma.dglm, gamma.regfac.vd)\n##D \n## End(Not run)\n\n\n"} {"package":"RegressionFactory","topic":"fbase2.inverse.gaussian.log.log","snippet":"### Name: fbase2.inverse.gaussian.log.log\n### Title: Double-Parameter Base Log-likelihood Function for\n### Inverse-Gaussian GLM\n### Aliases: fbase2.inverse.gaussian.log.log\n\n### ** Examples\n\n## Not run: \n##D # we use this library for univariate slice sampling\n##D # of multivariate distributions\n##D library(MfUSampler)\n##D library(dglm)\n##D \n##D # simulating data according to assumed generative model\n##D # we assume log link functions for both mean and dispersion\n##D # (shape parameter is inverse of dispersion)\n##D N <- 10000\n##D K <- 5\n##D X <- cbind(1,matrix(runif(N*(K-1), min=-0.5, max=+0.5), ncol=K-1))\n##D beta <- runif(K, min=-0.5, max=+0.5)\n##D gamma <- runif(K, min=-0.5, max=+0.5)\n##D mean.vec <- exp(X %*% beta)\n##D dispersion.vec <- exp(X %*% gamma)\n##D y <- rinvgauss(N, mean = mean.vec, dispersion = dispersion.vec)\n##D \n##D # model estimation using dglm package\n##D reg.dglm <- dglm(y~X-1, dformula = ~X-1, family=inverse.gaussian(link=\"log\"), dlink = \"log\")\n##D beta.dglm <- reg.dglm$coefficients\n##D gamma.dglm <- reg.dglm$dispersion.fit$coefficients\n##D \n##D # model estimation using RegressionFactory\n##D # (with univariate slice sampling)\n##D # defining the log-likelihood using the expander framework\n##D # assumng same covariates for both slots, hence we set Z=X\n##D # slice sampler does not need derivatives, hence we set fgh=0\n##D loglike.inverse.gaussian <- function(coeff, X, y) {\n##D regfac.expand.2par(coeff, X=X, Z=X, y=y, fbase2=fbase2.inverse.gaussian.log.log, fgh=0)\n##D }\n##D nsmp <- 100\n##D coeff.smp <- array(NA, dim=c(nsmp, 2*K)) \n##D coeff.tmp <- rep(0.1, 2*K)\n##D for (n in 1:nsmp) {\n##D coeff.tmp <- MfU.Sample(coeff.tmp, f=loglike.inverse.gaussian, X=X, y=y)\n##D coeff.smp[n,] <- coeff.tmp\n##D }\n##D beta.slice <- colMeans(coeff.smp[(nsmp/2+1):nsmp, 1:K])\n##D gamma.slice <- colMeans(coeff.smp[(nsmp/2+1):nsmp, K+1:K])\n##D \n##D # compare results\n##D cbind(beta.dglm, beta.slice)\n##D cbind(gamma.dglm, gamma.slice)\n##D \n## End(Not run)\n\n\n"} {"package":"RegressionFactory","topic":"regfac.expand.1par","snippet":"### Name: regfac.expand.1par\n### Title: Expander Function for Single-Parameter Base Distributions\n### Aliases: regfac.expand.1par\n\n### ** Examples\n\n## Not run: \n##D library(sns)\n##D # simulating logistic regression data\n##D N <- 1000 # number of observations\n##D K <- 10 # number of variables\n##D X <- matrix(runif(N*K, min=-0.5, max=+0.5), ncol=K)\n##D beta <- runif(K, min=-0.5, max=+0.5)\n##D Xbeta <- X%*%beta\n##D y <- 1*(runif(N)<1/(1+exp(-Xbeta)))\n##D beta.est <- rep(0,K)\n##D # run sns in non-stochastic mode, i.e. Newton-Raphson optimization\n##D for (i in 1:10) {\n##D beta.est <- sns(beta.est, regfac.expand.1par, rnd=F, X=X, y=y\n##D , fbase1=fbase1.binomial.logit)\n##D }\n##D # use glm to estimate beta and compare\n##D beta.est.glm <- glm(y~X-1, family=\"binomial\")$coefficients\n##D cbind(beta.est, beta.est.glm)\n## End(Not run)\n\n\n"} {"package":"RegressionFactory","topic":"regfac.expand.2par","snippet":"### Name: regfac.expand.2par\n### Title: Expander Function for Two-Parameter Base Distributions\n### Aliases: regfac.expand.2par\n\n### ** Examples\n\n## Not run: \n##D library(dglm)\n##D library(sns)\n##D \n##D # defining log-likelihood function\n##D loglike.linreg <- function(coeff, X, y) {\n##D regfac.expand.2par(coeff = coeff, X = X, Z = X, y = y\n##D , fbase2 = fbase2.gaussian.identity.log, fgh = 2, block.diag = T)\n##D }\n##D \n##D # simulating data according to generative model\n##D N <- 1000 # number of observations\n##D K <- 5 # number of covariates\n##D X <- matrix(runif(N*K, min=-0.5, max=+0.5), ncol=K)\n##D beta <- runif(K, min=-0.5, max=+0.5)\n##D gamma <- runif(K, min=-0.5, max=+0.5)\n##D mean.vec <- X%*%beta\n##D sd.vec <- exp(X%*%gamma)\n##D y <- rnorm(N, mean.vec, sd.vec)\n##D \n##D # estimation using dglm\n##D est.dglm <- dglm(y~X-1, dformula = ~X-1, family = \"gaussian\", dlink = \"log\")\n##D beta.dglm <- est.dglm$coefficients\n##D gamma.dglm <- est.dglm$dispersion.fit$coefficients\n##D \n##D # estimation using RegressionFactory\n##D coeff.tmp <- rep(0, 2*K)\n##D for (n in 1:10) {\n##D coeff.tmp <- sns(coeff.tmp, fghEval=loglike.linreg\n##D , X=X, y=y, rnd = F)\n##D }\n##D beta.regfac.vd <- coeff.tmp[1:K]\n##D gamma.regfac.vd <- coeff.tmp[K+1:K]\n##D \n##D # comparing dglm and RegressionFactory results\n##D # neither beta's nor gamma's will match exactly\n##D cbind(beta.dglm, beta.regfac.vd)\n##D cbind(gamma.dglm, gamma.regfac.vd)\n## End(Not run)\n\n\n"} {"package":"RegressionFactory","topic":"regfac.merge","snippet":"### Name: regfac.merge\n### Title: Utility Function for Adding Two Functions and Their Derivatives\n### Aliases: regfac.merge\n\n### ** Examples\n\n# constructing the log-posterior for Bayesian logistic regression\n# log-likelihood\nloglike.logistic <- function(beta, X, y, fgh) {\n regfac.expand.1par(beta, X, y, fbase1.binomial.logit, fgh, n=1)\n}\n# log-prior\nlogprior.logistic <- function(beta, mu.beta, sd.beta, fgh) {\n f <- sum(dnorm(beta, mu.beta, sd.beta, log=TRUE))\n if (fgh==0) return (f)\n g <- -(beta-mu.beta)/sd.beta^2\n if (fgh==1) return (list(f=f, g=g))\n #h <- diag(rep(-1/sd.beta^2,length(beta)))\n h <- diag(-1/sd.beta^2)\n return (list(f=f, g=g, h=h))\n}\n# adding log-likelihood and log-prior according to Bayes rule\nlogpost.logistic <- function(beta, X, y, mu.beta, sd.beta, fgh) {\n ret.loglike <- loglike.logistic(beta, X, y, fgh)\n ret.logprior <- logprior.logistic(beta, mu.beta, sd.beta, fgh)\n regfac.merge(ret.loglike,ret.logprior, fgh=fgh)\n}\n\n\n\n"} {"package":"rgl2gltf","topic":"Gltf","snippet":"### Name: Gltf\n### Title: R6 Class for glTF file objects\n### Aliases: Gltf\n\n### ** Examples\n\n\n## ------------------------------------------------\n## Method `Gltf$print`\n## ------------------------------------------------\n\n## No test: \nsamples <- \"https://raw.githubusercontent.com/KhronosGroup/glTF-Sample-Models/master/2.0\"\ngltf <- readGLB(paste0(samples, \"/2CylinderEngine/glTF-Binary/2CylinderEngine.glb?raw=true\"))\ngltf$print(names = \"meshes\")\n## End(No test)\n\n\n"} {"package":"rgl2gltf","topic":"as.gltf","snippet":"### Name: as.gltf\n### Title: Produce glTF objects\n### Aliases: as.gltf as.gltf.mesh3d as.gltf.rglscene as.gltf.default\n\n### ** Examples\n\ncube <- rgl::rotate3d(rgl::cube3d(col = \"red\"), -pi/10, 1,0,0)\ngltf <- as.gltf(cube)\nrgl::plot3d(gltf)\ngltf$closeBuffers()\n\n\n"} {"package":"rgl2gltf","topic":"findEntry","snippet":"### Name: findEntry\n### Title: Find a component of a recursive object\n### Aliases: findEntry namePattern hasClass\n\n### ** Examples\n\nx <- list( a = list( b = list(c(d=\"A\", e=\"B\"), 1L, 1:3)))\nlocations <- findEntry(x, namePattern(\"e\"))\nlocations\n\n#This shows how the result can be used:\nx[[locations[[1]]]]\nexpr <- paste0(c(\"x\", names(locations[[1]])), collapse = \"\")\nexpr\neval(parse(text=expr))\n\nfindEntry(x, hasClass(\"integer\"))\n\n\n"} {"package":"rgl2gltf","topic":"gltfWidget","snippet":"### Name: gltfWidget\n### Title: Create a widget for a glTF display.\n### Aliases: gltfWidget\n\n### ** Examples\n\nif ((interactive() || rgl::in_pkgdown_example()) && requireNamespace(\"manipulateWidget\")) {\n gltf <- readGLB(system.file(\"glb/RiggedSimple.glb\", package = \"rgl2gltf\"))\n gltfWidget(gltf)\n}\n\n\n\n"} {"package":"rgl2gltf","topic":"playgltf","snippet":"### Name: playgltf\n### Title: Play an animated glTF object.\n### Aliases: playgltf showNodes\n\n### ** Examples\n\n## No test: \nif (interactive() && !rgl::in_pkgdown_example()) {\n# This example is fast enough using the \"whole\" method:\n\ngltf1 <- readGLB(system.file(\"glb/RiggedSimple.glb\", package = \"rgl2gltf\"))\nplaygltf(gltf1, start = 0, stop = 3, method = \"whole\")\n\n# It looks terrible using the \"rigid\" method, because some triangles\n# need to be deformed:\n\nplaygltf(gltf1, start = 0, stop = 3, method = \"rigid\")\n\n# This example is too slow using anything but \"rigid\", but it's fine there:\n\nsamples <- \"https://raw.githubusercontent.com/KhronosGroup/glTF-Sample-Models/master/2.0\"\ngltf2 <- readGLB(paste0(samples, \"/BrainStem/glTF-Binary/BrainStem.glb?raw=true\"))\nplaygltf(gltf2, start = 0, stop = 2, speed = 0.25, method = \"rigid\")\n}\n## End(No test)\n\n\n"} {"package":"rgl2gltf","topic":"readGLB","snippet":"### Name: readGLB\n### Title: Read a GLB file.\n### Aliases: readGLB\n\n### ** Examples\n\n## No test: \n# This web page has lots of sample files\n\nsamples <- \"https://raw.githubusercontent.com/KhronosGroup/glTF-Sample-Models/master/2.0\"\n\n# Get one of them: an avocado\n\ngltf <- readGLB(paste0(samples, \"/Avocado/glTF-Binary/Avocado.glb?raw=true\"))\n\nif (interactive())\n rgl::plot3d(gltf)\n\nif (rgl::in_pkgdown_example())\n gltfWidget(gltf)\n\ngltf$closeBuffers()\n## End(No test)\n\n\n"} {"package":"rgl2gltf","topic":"readglTF","snippet":"### Name: readglTF\n### Title: Read a glTF file\n### Aliases: readglTF\n\n### ** Examples\n\n## No test: \n\n# This web page has lots of sample files\n\nsamples <- \"https://raw.githubusercontent.com/KhronosGroup/glTF-Sample-Models/master/2.0\"\nfilename <- tempfile(fileext = \".gltf\")\n\n# Get one of them: a 2 cylinder engine. We need both parts\n# to be able to view it, though only the .gltf part is\n# needed for readglTF()\n\ndownload.file(paste0(samples, \"/2CylinderEngine/glTF/2CylinderEngine.gltf\"),\n destfile = filename)\ndownload.file(paste0(samples, \"/2CylinderEngine/glTF/2CylinderEngine0.bin?raw=true\"),\n destfile = file.path(tempdir(), \"2CylinderEngine0.bin\"),\n mode = \"wb\")\n\ngltf <- readglTF(filename)\ngltf\n\n# gltf files contain references to other files using\n# relative paths, so we can only use them from their\n# own directory\nolddir <- setwd(dirname(filename))\nrgl::plot3d(gltf)\nsetwd(olddir)\n\n## End(No test)\n\n\n"} {"package":"rgl2gltf","topic":"setPBRshaders","snippet":"### Name: setPBRshaders\n### Title: Set shaders for physically based rendering.\n### Aliases: setPBRshaders\n\n### ** Examples\n\n## No test: \n# This web page has lots of sample files\n\nsamples <- \"https://raw.githubusercontent.com/KhronosGroup/glTF-Sample-Models/master/2.0\"\n\n# Get one of them: a 2 cylinder engine\n\ngltf <- readGLB(paste0(samples, \"/NormalTangentTest/glTF-Binary/NormalTangentTest.glb?raw=true\"))\ngltfMat <- gltf$getMaterial(0)\nscene <- as.rglscene(gltf)\nid <- scene$objects[[1]]$id\nscene <- setPBRshaders(gltf, gltfMat, id, scene)\ncat(scene$objects[[1]]$userFragmentShader)\n## End(No test)\n\n\n"} {"package":"rgl2gltf","topic":"showTags","snippet":"### Name: showTags\n### Title: Debugging tool: show tags for objects in rgl scene\n### Aliases: showTags\n\n### ** Examples\n\nexample(\"plot3d\", package = \"rgl\")\nshowTags()\n\n\n"} {"package":"rgl2gltf","topic":"writeglTF","snippet":"### Name: writeglTF\n### Title: Write a glTF or GLB file.\n### Aliases: writeglTF writeGLB\n\n### ** Examples\n\nfilename <- tempfile(fileext = \".glb\")\nwriteGLB(as.gltf(rgl::cube3d(col = \"red\")), filename)\n\n\n"} {"package":"rddensity","topic":"rdbwdensity","snippet":"### Name: rdbwdensity\n### Title: Bandwidth Selection for Manipulation Testing\n### Aliases: rdbwdensity\n\n### ** Examples\n\n# Generate a random sample\nset.seed(42)\nx <- rnorm(2000, mean = -0.5)\n\n# Bandwidth selection\nsummary(rdbwdensity(X = x, vce=\"jackknife\"))\n\n\n\n"} {"package":"rddensity","topic":"rddensity","snippet":"### Name: rddensity\n### Title: Manipulation Testing Using Local Polynomial Density Estimation\n### Aliases: rddensity\n\n### ** Examples\n\n### Continuous Density\nset.seed(42)\nx <- rnorm(2000, mean = -0.5)\nrdd <- rddensity(X = x, vce = \"jackknife\")\nsummary(rdd)\n\n### Bandwidth selection using rdbwdensity()\nrddbw <- rdbwdensity(X = x, vce = \"jackknife\")\nsummary(rddbw)\n\n### Plotting using rdplotdensity()\n# 1. From -2 to 2 with 25 evaluation points at each side\nplot1 <- rdplotdensity(rdd, x, plotRange = c(-2, 2), plotN = 25)\n\n# 2. Plotting a uniform confidence band\nset.seed(42) # fix the seed for simulating critical values\nplot2 <- rdplotdensity(rdd, x, plotRange = c(-2, 2), plotN = 25, CIuniform = TRUE)\n\n### Density discontinuity at 0\nx[x > 0] <- x[x > 0] * 2\nrdd2 <- rddensity(X = x, vce = \"jackknife\")\nsummary(rdd2)\nplot3 <- rdplotdensity(rdd2, x, plotRange = c(-2, 2), plotN = 25)\n\n\n\n"} {"package":"rddensity","topic":"rdplotdensity","snippet":"### Name: rdplotdensity\n### Title: Density Plotting for Manipulation Testing\n### Aliases: rdplotdensity\n\n### ** Examples\n\n# Generate a random sample with a density discontinuity at 0\nset.seed(42)\nx <- rnorm(2000, mean = -0.5)\nx[x > 0] <- x[x > 0] * 2\n\n# Estimation\nrdd <- rddensity(X = x)\nsummary(rdd)\n\n# Density plot (from -2 to 2 with 25 evaluation points at each side)\nplot1 <- rdplotdensity(rdd, x, plotRange = c(-2, 2), plotN = 25)\n\n# Plotting a uniform confidence band\nset.seed(42) # fix the seed for simulating critical values\nplot3 <- rdplotdensity(rdd, x, plotRange = c(-2, 2), plotN = 25, CIuniform = TRUE)\n\n\n\n"} {"package":"Boruta","topic":"Boruta","snippet":"### Name: Boruta\n### Title: Feature selection with the Boruta algorithm\n### Aliases: Boruta Boruta.default Boruta.formula\n\n### ** Examples\n\nset.seed(777)\n\n#Boruta on the \"small redundant XOR\" problem; read ?srx for details\ndata(srx)\nBoruta(Y~.,data=srx)->Boruta.srx\n\n#Results summary\nprint(Boruta.srx)\n\n#Result plot\nplot(Boruta.srx)\n\n#Attribute statistics\nattStats(Boruta.srx)\n\n#Using alternative importance source, rFerns\nBoruta(Y~.,data=srx,getImp=getImpFerns)->Boruta.srx.ferns\nprint(Boruta.srx.ferns)\n\n#Verbose\nBoruta(Y~.,data=srx,doTrace=2)->Boruta.srx\n\n## Not run: \n##D #Boruta on the iris problem extended with artificial irrelevant features\n##D #Generate said features\n##D iris.extended<-data.frame(iris,apply(iris[,-5],2,sample))\n##D names(iris.extended)[6:9]<-paste(\"Nonsense\",1:4,sep=\"\")\n##D #Run Boruta on this data\n##D Boruta(Species~.,data=iris.extended,doTrace=2)->Boruta.iris.extended\n##D #Nonsense attributes should be rejected\n##D print(Boruta.iris.extended)\n## End(Not run)\n\n## Not run: \n##D #Boruta on the HouseVotes84 data from mlbench\n##D library(mlbench); data(HouseVotes84)\n##D na.omit(HouseVotes84)->hvo\n##D #Takes some time, so be patient\n##D Boruta(Class~.,data=hvo,doTrace=2)->Bor.hvo\n##D print(Bor.hvo)\n##D plot(Bor.hvo)\n##D plotImpHistory(Bor.hvo)\n## End(Not run)\n## Not run: \n##D #Boruta on the Ozone data from mlbench\n##D library(mlbench); data(Ozone)\n##D library(randomForest)\n##D na.omit(Ozone)->ozo\n##D Boruta(V4~.,data=ozo,doTrace=2)->Bor.ozo\n##D cat('Random forest run on all attributes:\\n')\n##D print(randomForest(V4~.,data=ozo))\n##D cat('Random forest run only on confirmed attributes:\\n')\n##D print(randomForest(ozo[,getSelectedAttributes(Bor.ozo)],ozo$V4))\n## End(Not run)\n## Not run: \n##D #Boruta on the Sonar data from mlbench\n##D library(mlbench); data(Sonar)\n##D #Takes some time, so be patient\n##D Boruta(Class~.,data=Sonar,doTrace=2)->Bor.son\n##D print(Bor.son)\n##D #Shows important bands\n##D plot(Bor.son,sort=FALSE)\n## End(Not run)\n\n\n"} {"package":"Boruta","topic":"attStats","snippet":"### Name: attStats\n### Title: Extract attribute statistics\n### Aliases: attStats\n\n### ** Examples\n\n## Not run: \n##D library(mlbench); data(Sonar)\n##D #Takes some time, so be patient\n##D Boruta(Class~.,data=Sonar,doTrace=2)->Bor.son\n##D print(Bor.son)\n##D stats<-attStats(Bor.son)\n##D print(stats)\n##D plot(normHits~meanImp,col=stats$decision,data=stats)\n## End(Not run)\n\n\n"} {"package":"Boruta","topic":"decohereTransdapter","snippet":"### Name: decohereTransdapter\n### Title: Decohere transdapter\n### Aliases: decohereTransdapter\n\n### ** Examples\n\nset.seed(777)\n# SRX data only contains multivariate interactions\ndata(srx)\n# Decoherence transform removes them all,\n# leaving no confirmed features\nBoruta(Y~.,data=srx,getImp=decohereTransdapter())\n\n\n"} {"package":"Boruta","topic":"getImpLegacyRf","snippet":"### Name: getImpLegacyRf\n### Title: randomForest importance adapters\n### Aliases: getImpLegacyRf getImpLegacyRfZ getImpLegacyRfGini\n### getLegacyImpRfRaw getImpLegacyRfRaw\n\n### ** Examples\n\nset.seed(777)\n#Add some nonsense attributes to iris dataset by shuffling original attributes\niris.extended<-data.frame(iris,apply(iris[,-5],2,sample))\nnames(iris.extended)[6:9]<-paste(\"Nonsense\",1:4,sep=\"\")\n#Run Boruta on this data\nBoruta(Species~.,getImp=getImpLegacyRfZ,\n data=iris.extended,doTrace=2)->Boruta.iris.extended\n#Nonsense attributes should be rejected\nprint(Boruta.iris.extended)\n\n\n"} {"package":"Boruta","topic":"getSelectedAttributes","snippet":"### Name: getSelectedAttributes\n### Title: Extract names of the selected attributes\n### Aliases: getSelectedAttributes\n\n### ** Examples\n\n## Not run: \n##D data(iris)\n##D #Takes some time, so be patient\n##D Boruta(Species~.,data=iris,doTrace=2)->Bor.iris\n##D print(Bor.iris)\n##D print(getSelectedAttributes(Bor.iris))\n## End(Not run)\n\n\n"} {"package":"Boruta","topic":"imputeTransdapter","snippet":"### Name: imputeTransdapter\n### Title: Impute transdapter\n### Aliases: imputeTransdapter\n\n### ** Examples\n\n## Not run: \n##D set.seed(777)\n##D data(srx)\n##D srx_na<-srx\n##D # Randomly punch 25 holes in the SRX data\n##D holes<-25\n##D holes<-cbind(\n##D sample(nrow(srx),holes,replace=TRUE),\n##D sample(ncol(srx),holes,replace=TRUE)\n##D )\n##D srx_na[holes]<-NA\n##D # Use impute transdapter to mitigate them with internal imputation\n##D Boruta(Y~.,data=srx_na,getImp=imputeTransdapter(getImpRfZ))\n## End(Not run)\n\n\n"} {"package":"Boruta","topic":"plot.Boruta","snippet":"### Name: plot.Boruta\n### Title: Plot Boruta object\n### Aliases: plot.Boruta\n\n### ** Examples\n\n## Not run: \n##D library(mlbench); data(HouseVotes84)\n##D na.omit(HouseVotes84)->hvo\n##D #Takes some time, so be patient\n##D Boruta(Class~.,data=hvo,doTrace=2)->Bor.hvo\n##D print(Bor.hvo)\n##D plot(Bor.hvo)\n## End(Not run)\n\n\n"} {"package":"Boruta","topic":"plotImpHistory","snippet":"### Name: plotImpHistory\n### Title: Plot Boruta object as importance history\n### Aliases: plotImpHistory\n\n### ** Examples\n\n## Not run: \n##D library(mlbench); data(Sonar)\n##D #Takes some time, so be patient\n##D Boruta(Class~.,data=Sonar,doTrace=2)->Bor.son\n##D print(Bor.son)\n##D plotImpHistory(Bor.son)\n## End(Not run)\n\n\n"} {"package":"cereal","topic":"cereal_decode","snippet":"### Name: cereal_decode\n### Title: Convert a JSON-serialized prototype to a vctrs prototype\n### Aliases: cereal_decode\n\n### ** Examples\n\ncereal_decode(structure(list(), class = \"cereal_integer\"))\ncereal_decode(structure(list(), class = \"cereal_Date\"))\n\n\n\n"} {"package":"cereal","topic":"cereal_details","snippet":"### Name: cereal_details\n### Title: Find needed details for vctrs prototype\n### Aliases: cereal_details\n\n### ** Examples\n\ncereal_details(factor(letters[1:5], labels = \"letter\"))\ncereal_details(factor(LETTERS[3:1], ordered = TRUE))\ncereal_details(as.POSIXct(\"2023-01-01\", tz = \"America/New_York\"))\n\n\n"} {"package":"cereal","topic":"cereal_encode","snippet":"### Name: cereal_encode\n### Title: Encode a vector as JSON\n### Aliases: cereal_encode\n\n### ** Examples\n\ncereal_encode(1:10)\ncereal_encode(Sys.Date())\ncereal_encode(sample(letters, 5))\ncereal_encode(factor(letters[1:5], labels = \"letter\"))\ncereal_encode(factor(LETTERS[3:1], ordered = TRUE))\n\n## you can encode a ptype as well:\nptype <- vctrs::vec_ptype(factor(LETTERS[3:1], ordered = TRUE))\n## but \"example\" is NULL:\ncereal_encode(ptype)\n\n\n\n"} {"package":"cereal","topic":"cereal_to_json","snippet":"### Name: cereal_to_json\n### Title: Serialize and deserialize the prototype of a data frame to JSON\n### Aliases: cereal_to_json cereal_from_json\n\n### ** Examples\n\n\ndf <- tibble::tibble(\n a = 1,\n b = 2L,\n c = Sys.Date(),\n d = as.POSIXct(\"2019-01-01\", tz = \"America/New_York\"),\n e = \"x\",\n f = factor(\"blue\", levels = c(\"blue\", \"green\", \"red\")),\n g = ordered(\"small\", levels = c(\"small\", \"medium\", \"large\"))\n)\n\njson <- cereal_to_json(df)\njson\n\nstr(cereal_from_json(json))\n## same as:\nstr(vctrs::vec_ptype(df))\n\n\n\n"} {"package":"unpivotr","topic":"as_cells","snippet":"### Name: as_cells\n### Title: Tokenize data frames into a tidy 'melted' structure\n### Aliases: as_cells\n\n### ** Examples\n\nx <- data.frame(a = c(10, 20),\n b = c(\"foo\", \"bar\"),\n stringsAsFactors = FALSE)\nx\nas_cells(x)\nas_cells(x, row_names = TRUE)\nas_cells(x, col_names = TRUE)\n\n# 'list' columns are undisturbed\ny <- data.frame(a = c(\"a\", \"b\"), stringsAsFactors = FALSE)\ny$b <- list(1:2, 3:4)\ny\nas_cells(y)\n\n# Factors are preserved by being wrapped in lists so that their levels don't\n# conflict. Blanks are NULLs.\nz <- data.frame(x = factor(c(\"a\", \"b\")),\n y = factor(c(\"c\", \"d\"), ordered = TRUE))\nas_cells(z)\nas_cells(z)$fct\nas_cells(z)$ord\n\n# HTML tables can be extracted from the output of xml2::read_html(). These\n# are returned as a list of tables, similar to rvest::html_table(). The\n# value of each cell is its standalone HTML string, which can contain\n# anything -- even another table.\n\ncolspan <- system.file(\"extdata\", \"colspan.html\", package = \"unpivotr\")\nrowspan <- system.file(\"extdata\", \"rowspan.html\", package = \"unpivotr\")\nnested <- system.file(\"extdata\", \"nested.html\", package = \"unpivotr\")\n\n## Not run: \n##D browseURL(colspan)\n##D browseURL(rowspan)\n##D browseURL(nestedspan)\n## End(Not run)\n\nas_cells(xml2::read_html(colspan))\nas_cells(xml2::read_html(rowspan))\nas_cells(xml2::read_html(nested))\n\n\n"} {"package":"unpivotr","topic":"behead","snippet":"### Name: behead\n### Title: Strip a level of headers from a pivot table\n### Aliases: behead behead_if\n\n### ** Examples\n\n# A simple table with a row of headers\n(x <- data.frame(a = 1:2, b = 3:4))\n\n# Make a tidy representation of each cell\n(cells <- as_cells(x, col_names = TRUE))\n\n# Strip the cells in row 1 (the original headers) and use them as data\nbehead(cells, \"N\", foo)\n\n# More complex example: pivot table with several layers of headers\n(x <- purpose$`up-left left-up`)\n\n# Make a tidy representation\ncells <- as_cells(x)\nhead(cells)\ntail(cells)\n\n# Strip the headers and make them into data\ntidy <-\n cells %>%\n behead(\"up-left\", Sex) %>%\n behead(\"up\", `Sense of purpose`) %>%\n behead(\"left-up\", `Highest qualification`) %>%\n behead(\"left\", `Age group (Life-stages)`) %>%\n dplyr::mutate(count = as.integer(chr)) %>%\n dplyr::select(-row, -col, -data_type, -chr)\nhead(tidy)\n\n# Check against the provided 'tidy' version of the data.\ndplyr::anti_join(tidy, purpose$Tidy)\n\n# The provided 'tidy' data is missing a row for Male 15-24-year-olds with a\n# postgraduate qualification and a sense of purpose between 0 and 6. That\n# seems to have been an oversight by Statistics New Zealand.\n\ncells <- tibble::tribble(\n ~X1, ~adult, ~juvenile,\n \"LION\", 855, 677,\n \"male\", 496, 322,\n \"female\", 359, 355,\n \"TIGER\", 690, 324,\n \"male\", 381, 222,\n \"female\", 309, 102\n )\ncells <- as_cells(cells, col_names = TRUE)\n\ncells %>%\n behead_if(chr == toupper(chr), direction = \"left-up\", name = \"species\") %>%\n behead(\"left\", \"sex\") %>%\n behead(\"up\", \"age\") %>%\n dplyr::select(species, sex, age, population = dbl)\n\n\n"} {"package":"unpivotr","topic":"enhead","snippet":"### Name: enhead\n### Title: Join data cells to headers\n### Aliases: enhead\n\n### ** Examples\n\nlibrary(dplyr)\n# Load some pivoted data\n(x <- purpose$`up-left left-up`)\n# Make a tidy representation\ncells <- as_cells(x)\ncells <- cells[!is.na(cells$chr), ]\nhead(cells)\n# Select the cells containing the values\ndata_cells <-\n filter(cells, row >= 3, col >= 3) %>%\n transmute(row, col, count = as.integer(chr))\nhead(data_cells)\n# Select the headers\nqualification <-\n filter(cells, col == 1) %>%\n select(row, col, qualification = chr)\nage <-\n filter(cells, col == 2) %>%\n select(row, col, age = chr)\ngender <-\n filter(cells, row == 1) %>%\n select(row, col, gender = chr)\nsatisfaction <-\n filter(cells, row == 2) %>%\n select(row, col, satisfaction = chr)\n# From each data cell, search for the nearest one of each of the headers\ndata_cells %>%\n enhead(gender, \"up-left\") %>%\n enhead(satisfaction, \"up\") %>%\n enhead(qualification, \"left-up\") %>%\n enhead(age, \"left\") %>%\n select(-row, -col)\n\n# The `drop` argument controls what happens when for some cells there is no\n# header in the given direction. When `drop = TRUE` (the default), cells that\n# can't be joined to a header are dropped. Otherwise they are kept.\nenhead(data_cells, gender, \"up\")\nenhead(data_cells, gender, \"up\", drop = FALSE)\n\n\n"} {"package":"unpivotr","topic":"isolate_sentinels","snippet":"### Name: isolate_sentinels\n### Title: Move sentinel values into a separate column leaving NA behind\n### Aliases: isolate_sentinels\n\n### ** Examples\n\nx <- data.frame(name = c(\"Matilda\", \"Nicholas\", \"Olivia\", \"Paul\"),\n score = c(10, \"confidential\", \"N/A\", 12),\n stringsAsFactors = FALSE)\nx\nisolate_sentinels(x, score, c(\"confidential\", \"N/A\"))\nisolate_sentinels(x, score, c(\"confidential\", \"N/A\"), \"flag\")\n\n\n"} {"package":"unpivotr","topic":"justify","snippet":"### Name: justify\n### Title: Align one set of cells with another set\n### Aliases: justify\n\n### ** Examples\n\nheader_cells <- tibble::tibble(row = c(1L, 1L, 1L, 1L),\n col = c(3L, 5L, 8L, 10L),\n value = LETTERS[1:4])\ncorner_cells <- tibble::tibble(row = c(2L, 2L, 2L, 2L),\n col = c(1L, 4L, 6L, 9L))\njustify(header_cells, corner_cells)\n\n\n"} {"package":"unpivotr","topic":"merge_cells","snippet":"### Name: merge_cells\n### Title: Merge cell values into a single cell by rows or columns\n### Aliases: merge_cells merge_rows merge_cols\n\n### ** Examples\n\n x <- tibble::tribble(\n~row, ~col, ~data_type, ~chr,\n 1, 1, \"chr\", \"Katy\",\n 2, 1, \"chr\", \"Perry\",\n 3, 1, \"chr\", \"a\",\n 4, 1, \"chr\", \"b\",\n 5, 1, \"chr\", \"c\",\n 2, 2, \"chr\", \"Adele\",\n 3, 2, \"chr\", \"d\",\n 4, 2, \"chr\", \"e\",\n 5, 2, \"chr\", \"f\",\n 1, 3, \"chr\", \"Ariana\",\n 2, 3, \"chr\", \"Grande\",\n 3, 3, \"chr\", \"g\",\n 4, 3, \"chr\", \"h\",\n 5, 3, \"chr\", \"i\"\n)\nrectify(x)\ny <- merge_rows(x, 1:2, chr)\nrectify(y)\nz <- merge_cols(x, 1:2, chr)\nrectify(z)\n\n\n"} {"package":"unpivotr","topic":"pack","snippet":"### Name: pack\n### Title: Pack cell values from separate columns per data type into one\n### list-column\n### Aliases: pack unpack\n\n### ** Examples\n\n# A normal data frame\nw <- data.frame(foo = 1:2,\n bar = c(\"a\", \"b\"),\n stringsAsFactors = FALSE)\nw\n\n# The same data, represented by one row per cell, with integer values in the\n# `int` column and character values in the `chr` column.\nx <- as_cells(w)\nx\n\n# pack() and unpack() are complements\npack(x)\nunpack(pack(x))\n\n# Drop non-data columns from a wide data frame of cells from tidyxl\nif (require(tidyxl)) {\n cells <- tidyxl::xlsx_cells(system.file(\"extdata\", \"purpose.xlsx\", package = \"unpivotr\"))\n cells\n\n pack(cells) %>%\n dplyr::select(row, col, value) %>%\n unpack()\n}\n\n\n"} {"package":"unpivotr","topic":"partition","snippet":"### Name: partition\n### Title: Divide a grid of cells into partitions containing individual\n### tables\n### Aliases: partition partition_dim\n\n### ** Examples\n\n# The `purpose` dataset, represented in four summary tables\nmultiples <- purpose$small_multiples\nrectify(multiples, character, numeric)\n\n# The same thing in its raw 'melted' form that can be filtered\nmultiples\n\n# First, find the cells that mark a corner of each table\ncorners <-\n dplyr::filter(multiples,\n !is.na(character),\n !(character %in% c(\"Sex\", \"Value\", \"Female\", \"Male\")))\n\n# Then find out which cells fall into which partition\npartition(multiples, corners)\n\n# You can also use bottom-left corners (or top-right or bottom-right)\nbl_corners <- dplyr::filter(multiples, character == \"Male\")\npartition(multiples, bl_corners, align = \"bottom_left\")\n\n# To complete the grid even when not all corners are supplied, use `strict`\nbl_corners <- bl_corners[-1, ]\npartition(multiples, bl_corners, align = \"bottom_left\")\npartition(multiples, bl_corners, align = \"bottom_left\", strict = FALSE)\n# Given a set of cells in rows 1 to 10, partition them at the 3rd, 5th and 7th\n# rows.\npartition_dim(1:10, c(3, 5, 7))\n\n# Given a set of cells in columns 1 to 10, partition them at the 3rd, 5th and\n# 7th column. This example is exactly the same as the previous one, to show\n# that the function works the same way on columns as rows.\npartition_dim(1:10, c(3, 5, 7))\n\n# Given a set of cells in rows 1 to 10, partition them at the 3rd, 5th and\n# 7th rows, aligned to the bottom of the group.\npartition_dim(1:10, c(3, 5, 7), bound = \"lower\")\n\n# Non-integer row/column numbers and cutpoints can be used, even though they\n# make no sense in the context of partioning grids of cells. They are\n# rounded towards zero first.\npartition_dim(1:10 - .5, c(3, 5, 7))\npartition_dim(1:10, c(3, 5, 7) + 1.5)\n\n\n"} {"package":"unpivotr","topic":"rectify","snippet":"### Name: rectify\n### Title: Display cells as though in a spreadsheet\n### Aliases: rectify print.cell_grid\n\n### ** Examples\n\nx <- data.frame(name = c(\"Matilda\", \"Nicholas\"),\n score = c(14L, 10L),\n stringsAsFactors = FALSE)\n\n# This is the original form of the table, which is easy to read.\nx\n\n# This is the 'tidy' arrangement that is difficult for humans to read (but\n# easy for computers)\ny <- as_cells(x, col_names = TRUE)\ny\n\n# rectify() projects the cells as a spreadsheet again, for humans to read.\nrectify(y)\n\n# You can choose to use a particular column of the data\nrectify(y, values = chr)\nrectify(y, values = int)\n\n# You can also show which row or which column each cell came from, which\n# helps with understanding what this function does.\nrectify(y, values = row)\nrectify(y, values = col)\n\n# Empty rows and columns up to the first occupied cell are dropped, but the\n# row and column names reflect the original row and column numbers.\ny$row <- y$row + 5\ny$col <- y$col + 5\nrectify(y)\n\n# Supply named functions to format cell values for display.\nrectify(y, formatters = list(chr = toupper, int = ~ . * 10))\n#\n# Print in the browser or in the RStudio viewer pane\n## Not run: \n##D z <- rectify(y)\n##D print(z, \"browser\")\n##D print(z, \"rstudio\")\n## End(Not run)\n\n\n"} {"package":"unpivotr","topic":"spatter","snippet":"### Name: spatter\n### Title: Spread key-value pairs of mixed types across multiple columns\n### Aliases: spatter\n\n### ** Examples\n\n# A tidy representation of cells of mixed data types\nx <- data.frame(stringsAsFactors = FALSE,\n row = c(1L, 1L, 2L, 2L, 3L, 3L, 4L, 4L),\n col = c(1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L),\n data_type = c(\"character\", \"character\", \"character\", \"numeric\", \"character\",\n \"numeric\", \"character\", \"numeric\"),\n character = c(\"Name\", \"Age\", \"Matilda\", NA, \"Nicholas\", NA, \"Olivia\", NA),\n numeric = c(NA, NA, NA, 1, NA, 3, NA, 5))\nx\n\n# How it would look in a spreadsheet\nrectify(x)\n\n# How it looks after treating the cells in row 1 as headers\ny <- behead(x, \"N\", header)\ny$col <- NULL # Drop the 'col' column\ny\n\n# At this point you might want to do tidyr::spread(), but it won't work because\n# you want to use both the `character` and `numeric` columns as the values.\ntidyr::spread(y, header, numeric)\ntidyr::spread(y, header, character)\nspatter(y, header)\n\n# The difference between spatter() and tidyr::spread() is that spatter()\n# needs to know which data-type to use for each cell beneath the headers. By\n# default, it looks at the `data_type` column to decide, but you can change\n# that with the `types` argument.\ny %>%\n dplyr::select(-data_type, -numeric) %>%\n dplyr::mutate(data_type_2 = \"character\") %>%\n spatter(header, types = data_type_2)\n\n# Alternatively you can name one specific column to use for the cell values.\ny %>%\n dplyr::mutate(foo = letters[1:6]) %>%\n dplyr::select(header, row, foo) %>%\n spatter(header, values = foo)\n\n# The column used for the values is consumed before the spread occurs. If\n# it's necessary for demarking the rows, then make a copy of it first,\n# otherwise you'll get an error like \"Duplicate identifiers for rows ...\"\ny %>%\n dplyr::mutate(row2 = row) %>%\n dplyr::select(row, header, row2) %>%\n spatter(header, values = row2)\n\n# Like tidyr::spread(), you need to discard extraneous columns beforehand.\n# Otherwise you can get more rows out than you want.\ny$extra <- 11:16\nspatter(y, header)\n\n# pack() is an easy way to keep just the columns you need, without knowing\n# in advance which data-type columns you need. This examples adds a new\n# column, which is then removed by the pack-unpack sequence without having to\n# mention it by name.\nx$extra <- 11:18\nx %>%\n pack() %>%\n dplyr::select(row, col, value) %>%\n unpack()\n\n# spatter() automatically converts data types so that they can coexist in the\n# same column. Ordered factors in particular will always be coerced to\n# unordered factors.\n\n# You can control data type conversion by supplying custom functions, named\n# by the data type of the cells they are to convert (look at the `data_type`\n# column). If your custom functions aren't sufficient to avoid the need for\n# coercion, then they will be overridden.\nspatter(y, header,\n formatters = list(character = ~ toupper(.), numeric = as.complex))\n\n\n"} {"package":"unpivotr","topic":"tidy_table","snippet":"### Name: tidy_table\n### Title: Tokenize data frames into a tidy 'melted' structure\n### Aliases: tidy_table\n\n### ** Examples\n\nx <- data.frame(a = c(10, 20),\n b = c(\"foo\", \"bar\"),\n stringsAsFactors = FALSE)\nx\ntidy_table(x)\ntidy_table(x, row_names = TRUE)\ntidy_table(x, col_names = TRUE)\n\n# 'list' columns are undisturbed\ny <- data.frame(a = c(\"a\", \"b\"), stringsAsFactors = FALSE)\ny$b <- list(1:2, 3:4)\ny\ntidy_table(y)\n\n# Factors are preserved by being wrapped in lists so that their levels don't\n# conflict. Blanks are NULLs.\nz <- data.frame(x = factor(c(\"a\", \"b\")),\n y = factor(c(\"c\", \"d\"), ordered = TRUE))\ntidy_table(z)\ntidy_table(z)$fct\ntidy_table(z)$ord\n\n# HTML tables can be extracted from the output of xml2::read_html(). These\n# are returned as a list of tables, similar to rvest::html_table(). The\n# value of each cell is its standalone HTML string, which can contain\n# anything -- even another table.\n\ncolspan <- system.file(\"extdata\", \"colspan.html\", package = \"unpivotr\")\nrowspan <- system.file(\"extdata\", \"rowspan.html\", package = \"unpivotr\")\nnested <- system.file(\"extdata\", \"nested.html\", package = \"unpivotr\")\n\n## Not run: \n##D browseURL(colspan)\n##D browseURL(rowspan)\n##D browseURL(nestedspan)\n## End(Not run)\n\ntidy_table(xml2::read_html(colspan))\ntidy_table(xml2::read_html(rowspan))\ntidy_table(xml2::read_html(nested))\n\n\n"} {"package":"BoolFilter","topic":"BKF","snippet":"### Name: BKF\n### Title: Boolean Kalman Filter\n### Aliases: BKF\n\n### ** Examples\n\ndata(p53net_DNAdsb0)\n\nobsModel = list(type = 'NB', \n s = 10.875, \n mu = 0.01, \n delta = c(2, 2, 2, 2), \n phi = c(3, 3, 3, 3))\n\n#Simulate a network with Negative-Binomial observation model\ndata <- simulateNetwork(p53net_DNAdsb0, n.data = 100, p = 0.02, obsModel)\n \n#Derive the optimal estimate of the network using a BKF approach\nResults <- BKF(data$Y, p53net_DNAdsb0, p = 0.02, obsModel)\n\n\n\n"} {"package":"BoolFilter","topic":"BKS","snippet":"### Name: BKS\n### Title: Boolean Kalman Smoother\n### Aliases: BKS\n\n### ** Examples\n\ndata(p53net_DNAdsb0)\n\nobsModel = list(type = 'Bernoulli', q = 0.02)\n\n#Simulate a network with Bernoulli observation noise\ndata <- simulateNetwork(p53net_DNAdsb0, n.data = 100, p = 0.02, obsModel)\n \n#Derive the optimal estimate of state of the network using the BKS algorithm\nResults <- BKS(data$Y, p53net_DNAdsb0, p = 0.02, obsModel)\n\n\n\n"} {"package":"BoolFilter","topic":"BoolFilter-package","snippet":"### Name: BoolFilter-package\n### Title: Optimal Estimation of Partially-Observed Boolean Dynamical\n### Systems\n### Aliases: BoolFilter-package BoolFilter\n\n### ** Examples\n\n\ndata(p53net_DNAdsb0)\n \n#Simulate data from a Bernoulli observation model\ndata <- simulateNetwork(p53net_DNAdsb0, n.data = 100, p = 0.02,\n obsModel = list(type = \"Bernoulli\",\n p = 0.02))\n \n#Derive an estimate of the network using a BKF approach\nResults <- BKF(data$Y, p53net_DNAdsb0, .02,\n obsModel = list(type = \"Bernoulli\",\n p = 0.02))\n \n#View network approximation vs. correct trajectory\nplotTrajectory(Results$Xhat,\n labels = p53net_DNAdsb0$genes,\n dataset2 = data$X,\n compare = TRUE)\n\n\n\n"} {"package":"BoolFilter","topic":"MMAE","snippet":"### Name: MMAE\n### Title: Multiple Model Adaptive Estimation\n### Aliases: MMAE\n\n### ** Examples\n\n\n#load potential networks\ndata(p53net_DNAdsb0)\ndata(p53net_DNAdsb1)\n\nnet1 <- p53net_DNAdsb0\nnet2 <- p53net_DNAdsb1\n\n#define observation model\nobservation = list(type = 'NB', s = 10.875, mu = 0.01, delta = c(2, 2, 2, 2), phi = c(3, 3, 3, 3))\n\n#simulate data using one of the networks and a given 'p'\ndata <- simulateNetwork(net1, n.data = 100, p = 0.02, obsModel = observation)\n \n#run MMAE to determine model selection and parameter estimation\nMMAE(data, net=c(\"net1\",\"net2\"), p=c(0.02,0.1,0.15), threshold=0.8, obsModel = observation)\n\n\n\n\n"} {"package":"BoolFilter","topic":"SIR_BKF","snippet":"### Name: SIR_BKF\n### Title: Particle Filter\n### Aliases: SIR_BKF\n\n### ** Examples\n\n## No test: \ndata(cellcycle)\n\nobsModel = list(type = 'Gaussian', \n model = c(mu0 = 1, sigma0 = 2, mu1 = 5, sigma1 = 2))\n\n#generate data from Negative Binomial observation model for the\n#10-gene Mammalian Cell Cycle Network\ndata <- simulateNetwork(cellcycle, n.data = 100, p = 0.02, obsModel)\n\n#perform SIR-BKF algorithm\nResults <- SIR_BKF(data$Y, N = 1000, alpha = 0.95, cellcycle, p = 0.02, obsModel)\n \n## End(No test) \n\n\n\n"} {"package":"BoolFilter","topic":"melanoma","snippet":"### Name: melanoma\n### Title: Melanoma Regulatory Network\n### Aliases: melanoma\n\n### ** Examples\n\ndata(melanoma)\n\ndata <- simulateNetwork(melanoma, n.data = 100, p = .02,\n obsModel = list(type = 'Bernoulli', \n q = 0.05))\n\n\n"} {"package":"BoolFilter","topic":"p53net_DNAdsb0","snippet":"### Name: p53net_DNAdsb0\n### Title: p53 Negative-Feedback Gene Regulatory Boolean Network\n### Aliases: p53net_DNAdsb0\n\n### ** Examples\n\ndata(p53net_DNAdsb0)\n\ndata <- simulateNetwork(p53net_DNAdsb0, n.data = 100, p = .02,\n obsModel = list(type = 'Bernoulli', \n q = 0.05))\n\n\n"} {"package":"BoolFilter","topic":"p53net_DNAdsb1","snippet":"### Name: p53net_DNAdsb1\n### Title: p53 Negative-Feedback Gene Regulatory Boolean Network\n### Aliases: p53net_DNAdsb1\n\n### ** Examples\n\ndata(p53net_DNAdsb1)\n\ndata <- simulateNetwork(p53net_DNAdsb1, n.data = 100, p = .02,\n obsModel = list(type = 'Bernoulli', \n q = 0.05))\n\n\n"} {"package":"BoolFilter","topic":"plotTrajectory","snippet":"### Name: plotTrajectory\n### Title: Plot state variables of Boolean Regulatory Systems\n### Aliases: plotTrajectory\n\n### ** Examples\n\ndata(p53net_DNAdsb1)\n\ndata <- simulateNetwork(p53net_DNAdsb1, n.data = 100, p = 0.02,\n obsModel = list(type = 'Bernoulli',\n q = 0.05))\n\n\nplotTrajectory(data$X, \n labels = p53net_DNAdsb1$genes)\n \n \n#View both (original state trajectory and observation) datasets overlayed\nplotTrajectory(data$X, \n labels = p53net_DNAdsb1$genes,\n dataset2 = data$Y,\n compare = TRUE)\n\n\n\n"} {"package":"BoolFilter","topic":"simulateNetwork","snippet":"### Name: simulateNetwork\n### Title: Simulate Boolean Network\n### Aliases: simulateNetwork\n\n### ** Examples\n\ndata(p53net_DNAdsb1)\n\n#generate data from poisson observation model\ndataPoisson <- simulateNetwork(p53net_DNAdsb1, n.data = 100, p = 0.02, \n obsModel = list(type = 'Poisson',\n s = 10.875, \n mu = 0.01, \n delta = c(2,2,2,2)))\n\n#generate data from Bernoulli observation model\ndataBernoulli <- simulateNetwork(p53net_DNAdsb1, n.data = 100, p = 0.02, \n obsModel = list(type = 'Bernoulli',\n q = 0.05))\n\n\n"} {"package":"BoolFilter","topic":"BoolFilter","snippet":"### Name: BoolFilter-package\n### Title: Optimal Estimation of Partially-Observed Boolean Dynamical\n### Systems\n### Aliases: BoolFilter-package BoolFilter\n\n### ** Examples\n\n\ndata(p53net_DNAdsb0)\n \n#Simulate data from a Bernoulli observation model\ndata <- simulateNetwork(p53net_DNAdsb0, n.data = 100, p = 0.02,\n obsModel = list(type = \"Bernoulli\",\n p = 0.02))\n \n#Derive an estimate of the network using a BKF approach\nResults <- BKF(data$Y, p53net_DNAdsb0, .02,\n obsModel = list(type = \"Bernoulli\",\n p = 0.02))\n \n#View network approximation vs. correct trajectory\nplotTrajectory(Results$Xhat,\n labels = p53net_DNAdsb0$genes,\n dataset2 = data$X,\n compare = TRUE)\n\n\n\n"} {"package":"docopulae","topic":"Defficiency","snippet":"### Name: Defficiency\n### Title: D Efficiency\n### Aliases: Defficiency\n\n### ** Examples\n\n## see examples for param\n\n\n\n"} {"package":"docopulae","topic":"DerivLogf","snippet":"### Name: DerivLogf\n### Title: Build Derivative Function for Log f\n### Aliases: DerivLogf Deriv2Logf\n\n### ** Examples\n\n## see examples for param\n## mind the gain regarding runtime compared to numDeriv\n\n\n\n"} {"package":"docopulae","topic":"Dsensitivity","snippet":"### Name: Dsensitivity\n### Title: D Sensitivity\n### Aliases: Dsensitivity\n\n### ** Examples\n\n## see examples for param\n\n\n\n"} {"package":"docopulae","topic":"Wynn","snippet":"### Name: Wynn\n### Title: Wynn\n### Aliases: Wynn\n\n### ** Examples\n\n## see examples for param\n\n\n\n"} {"package":"docopulae","topic":"buildf","snippet":"### Name: buildf\n### Title: Build probability density or mass Function\n### Aliases: buildf\n\n### ** Examples\n\n## No test: \n## for an actual use case see examples for param\n\nlibrary(copula)\nlibrary(mvtnorm)\n\n## build bivariate normal\nmargins = function(y, theta) {\n mu = c(theta$mu1, theta$mu2)\n cbind(dnorm(y, mean=mu, sd=1), pnorm(y, mean=mu, sd=1))\n}\ncopula = normalCopula()\n\n# args: function, copula object, parNames\nf1 = buildf(margins, TRUE, copula, parNames='alpha1')\nf1 # uses theta[['alpha1']] as copula parameter\n\n## evaluate and plot\ntheta = list(mu1=2, mu2=-3, alpha1=0.4)\n\ny1 = seq(0, 4, length.out=51)\ny2 = seq(-5, -1, length.out=51)\nv1 = outer(y1, y2, function(z1, z2) apply(cbind(z1, z2), 1, f1, theta))\nstr(v1)\ncontour(y1, y2, v1, main='f1', xlab='y1', ylab='y2')\n\n## compare with bivariate normal from mvtnorm\ncopula@parameters = theta$alpha1\nv = outer(y1, y2, function(yy1, yy2)\n dmvnorm(cbind(yy1, yy2), mean=c(theta$mu1, theta$mu2),\n sigma=getSigma(copula)))\nall.equal(v1, v)\n\n\n## build bivariate pdf with normal margins and Clayton copula\nmargins = list(list(pdf=quote(dnorm(y[1], theta$mu1, 1)),\n cdf=quote(pnorm(y[1], theta$mu1, 1))),\n list(pdf=quote(dnorm(y[2], theta$mu2, 1)),\n cdf=quote(pnorm(y[2], theta$mu2, 1))))\ncopula = claytonCopula()\n\n# args: list, copula object, parNames\nf2 = buildf(margins, TRUE, copula, list(alpha='alpha1'))\nf2\n\n## evaluate and plot\ntheta = list(mu1=2, mu2=-3, alpha1=2)\n\ny1 = seq(0, 4, length.out=51)\ny2 = seq(-5, -1, length.out=51)\nv2 = outer(y1, y2, function(z1, z2) apply(cbind(z1, z2), 1, f2, theta))\nstr(v2)\ncontour(y1, y2, v2, main='f2', xlab='y1', ylab='y2')\n\n## build alternatives\ncexpr = substituteDirect(copula@exprdist$pdf,\n list(alpha=quote(theta$alpha1)))\n# args: list, expression\nf3 = buildf(margins, TRUE, cexpr) # equivalent to f2\nf3\n\nmargins = function(y, theta) {\n mu = c(theta$mu1, theta$mu2)\n cbind(dnorm(y, mean=mu, sd=1), pnorm(y, mean=mu, sd=1))\n}\n# args: function, copula object, parNames\nf4 = buildf(margins, TRUE, copula, 'alpha1')\nf4\n\ncpdf = function(u, theta) {\n copula@parameters = theta$alpha1\n dCopula(u, copula)\n}\n# args: function, function\nf5 = buildf(margins, TRUE, cpdf) # equivalent to f4\nf5\n\n# args: function, copula object\ncopula@parameters = 2\nf6 = buildf(margins, TRUE, copula)\nf6 # uses copula@parameters\n\ncpdf = function(u, theta) dCopula(u, copula)\n# args: function, function\nf7 = buildf(margins, TRUE, cpdf) # equivalent to f6\nf7\n\n## compare all\nvv = lapply(list(f3, f4, f5, f6, f7), function(f)\n outer(y1, y2, function(z1, z2) apply(cbind(z1, z2), 1, f, theta)))\nsapply(vv, all.equal, v2)\n## End(No test)\n\n\n"} {"package":"docopulae","topic":"design","snippet":"### Name: design\n### Title: Design\n### Aliases: design\n\n### ** Examples\n\n## see examples for param\n\n\n\n"} {"package":"docopulae","topic":"fisherI","snippet":"### Name: fisherI\n### Title: Fisher Information\n### Aliases: fisherI\n\n### ** Examples\n\n## see examples for param\n\n\n\n"} {"package":"docopulae","topic":"getM","snippet":"### Name: getM\n### Title: Get Fisher Information\n### Aliases: getM\n\n### ** Examples\n\n## see examples for param\n\n\n\n"} {"package":"docopulae","topic":"integrateA","snippet":"### Name: integrateA\n### Title: Integrate Alternative\n### Aliases: integrateA\n\n### ** Examples\n\nf = function(x) ifelse(x < 0, cos(x), sin(x))\n#curve(f(x), -1, 1)\ntry(integrate(f, -1, 1, subdivisions=1)$value)\nintegrateA(f, -1, 1, subdivisions=1)$value\nintegrateA(f, -1, 1, subdivisions=2)$value\nintegrateA(f, -1, 1, subdivisions=3)$value\n\n\n"} {"package":"docopulae","topic":"nint_expandSpace","snippet":"### Name: nint_expandSpace\n### Title: Expand Space\n### Aliases: nint_expandSpace\n\n### ** Examples\n\ns = nint_space(list(nint_intvDim(1, 2),\n nint_intvDim(3, 4)),\n list(nint_intvDim(-Inf, 0),\n nint_gridDim(c(0)),\n nint_intvDim(0, Inf))\n )\ns\nnint_expandSpace(s)\n\n\n"} {"package":"docopulae","topic":"nint_integrate","snippet":"### Name: nint_integrate\n### Title: Integrate\n### Aliases: nint_integrate\n\n### ** Examples\n\n## discrete\n## a) scatter\ns = nint_space(nint_scatDim(1:3),\n nint_scatDim(c(0, 2, 5)))\ns\n## (1, 0), (2, 2), (3, 5)\nnint_integrate(function(x) abs(x[1] - x[2]), s) # 1 + 0 + 2 == 3\n\n## b) grid\ns = nint_space(nint_gridDim(1:3),\n nint_gridDim(c(0, 2, 5)))\ns\n## (1, 0), (1, 2), (1, 5), (2, 0), ..., (3, 2), (3, 5)\nnint_integrate(function(x) ifelse(sum(x) < 5, 1, 0), s) # 5\n\n\n## continous\n## c)\ns = nint_space(nint_intvDim(1, 3),\n nint_intvDim(1, Inf))\ns\nnint_integrate(function(x) 1/x[2]**2, s) # 2\n\n## d) infinite, no transform\ns = nint_space(nint_intvDim(-Inf, Inf))\nnint_integrate(sin, s) # 0\n\n## e) infinite, transform\ns = nint_space(nint_intvDim(-Inf, Inf),\n nint_intvDim(-Inf, Inf))\n## probability integral transform\ntt = nint_transform(function(x) prod(dnorm(x)), s, list(list(\n dIdcs=1:2,\n g=function(x) pnorm(x),\n giDg=function(y) { t1 = qnorm(y); list(t1, dnorm(t1)) })))\ntt$space\nnint_integrate(tt$f, tt$space) # 1\n\n\n## functionally dependent\n## f) area of triangle\ns = nint_space(nint_intvDim(0, 1),\n nint_funcDim(function(x) nint_intvDim(x[1]/2, 1 - x[1]/2)) )\ns\nnint_integrate(function(x) 1, s) # 0.5\n\n## g) area of circle\ns = nint_space(\n nint_intvDim(-1, 1),\n nint_funcDim(function(x) nint_intvDim( c(-1, 1) * sin(acos(x[1])) ))\n)\ns\nnint_integrate(function(x) 1, s) # pi\n\n## h) volume of sphere\ns = nint_space(s[[1]],\n s[[2]],\n nint_funcDim(function(x) {\n r = sin(acos(x[1]))\n nint_intvDim(c(-1, 1) * r*cos(asin(x[2] / r)))\n }) )\ns\nnint_integrate(function(x) 1, s) # 4*pi/3\n\n\n"} {"package":"docopulae","topic":"nint_integrateNCube","snippet":"### Name: nint_integrateNCube\n### Title: Integrate Hypercube\n### Aliases: nint_integrateNCube nint_integrateNCube_integrate\n### nint_integrateNCube_cubature nint_integrateNCube_SparseGrid\n\n### ** Examples\n\n## integrate with defaults (stats::integrate)\nnint_integrate(sin, nint_space(nint_intvDim(pi/4, 3*pi/4)))\n\n\ndfltNCube = nint_integrateNCube\n\n## prepare for integrateA\nncube = function(f, lowerLimit, upperLimit, ...) {\n cat('using integrateA\\n')\n integrateA(f, lowerLimit, upperLimit, ..., subdivisions=2)\n}\nncube = nint_integrateNCube_integrate(ncube)\nunlockBinding('nint_integrateNCube', environment(nint_integrate))\nassign('nint_integrateNCube', ncube, envir=environment(nint_integrate))\n\n## integrate with integrateA\nnint_integrate(sin, nint_space(nint_intvDim(pi/4, 3*pi/4)))\n\n\n## prepare for cubature\nncube = function(f, lowerLimit, upperLimit, ...) {\n cat('using cubature\\n')\n r = cubature::adaptIntegrate(f, lowerLimit, upperLimit, ..., maxEval=1e3)\n return(r$integral)\n}\nunlockBinding('nint_integrateNCube', environment(nint_integrate))\nassign('nint_integrateNCube', ncube, envir=environment(nint_integrate))\n\n## integrate with cubature\nnint_integrate(sin, nint_space(nint_intvDim(pi/4, 3*pi/4)))\n\n\n## prepare for SparseGrid\nncube = function(dimension) {\n cat('using SparseGrid\\n')\n SparseGrid::createIntegrationGrid('GQU', dimension, 7)\n}\nncube = nint_integrateNCube_SparseGrid(ncube)\nunlockBinding('nint_integrateNCube', environment(nint_integrate))\nassign('nint_integrateNCube', ncube, envir=environment(nint_integrate))\n\n## integrate with SparseGrid\nnint_integrate(sin, nint_space(nint_intvDim(pi/4, 3*pi/4)))\n\n\nassign('nint_integrateNCube', dfltNCube, envir=environment(nint_integrate))\n\n\n"} {"package":"docopulae","topic":"nint_integrateNFunc","snippet":"### Name: nint_integrateNFunc\n### Title: Integrate N Function\n### Aliases: nint_integrateNFunc nint_integrateNFunc_recursive\n\n### ** Examples\n\ndfltNFunc = nint_integrateNFunc\n\n## area of circle\ns = nint_space(\n nint_intvDim(-1, 1),\n nint_funcDim(function(x) nint_intvDim(c(-1, 1) * sin(acos(x[1])) ))\n)\nnint_integrate(function(x) 1, s) # pi\n## see nint_integrate's examples for more sophisticated integrals\n\n\n## prepare for custom recursive implementation\nusing = TRUE\nnfunc = nint_integrateNFunc_recursive(\n function(f, lowerLimit, upperLimit, ...) {\n if (using) { # this function is called many times\n using <<- FALSE\n cat('using integrateA\\n')\n }\n integrateA(f, lowerLimit, upperLimit, ..., subdivisions=1)$value\n }\n)\nunlockBinding('nint_integrateNFunc', environment(nint_integrate))\nassign('nint_integrateNFunc', nfunc, envir=environment(nint_integrate))\n\n## integrate with custom recursive implementation\nnint_integrate(function(x) 1, s) # pi\n\n\n## prepare for custom solution\nf = function(f, funcs, x0, i0, ...) {\n # add sophisticated code here\n print(list(f=f, funcs=funcs, x0=x0, i0=i0, ...))\n stop('do something')\n}\nunlockBinding('nint_integrateNFunc', environment(nint_integrate))\nassign('nint_integrateNFunc', f, envir=environment(nint_integrate))\n\n## integrate with custom solution\ntry(nint_integrate(function(x) 1, s))\n\n\nassign('nint_integrateNFunc', dfltNFunc, envir=environment(nint_integrate))\n\n\n"} {"package":"docopulae","topic":"nint_space","snippet":"### Name: nint_space\n### Title: Space\n### Aliases: nint_space\n\n### ** Examples\n\ns = nint_space(nint_gridDim(seq(1, 3, 0.9)),\n nint_scatDim(seq(2, 5, 0.8)),\n nint_intvDim(-Inf, Inf),\n nint_funcDim(function(x) nint_intvDim(0, x[1])),\n list(nint_gridDim(c(0, 10)),\n list(nint_intvDim(1, 7)))\n )\ns\n\n\n"} {"package":"docopulae","topic":"nint_tanTransform","snippet":"### Name: nint_tanTransform\n### Title: Tangent Transform\n### Aliases: nint_tanTransform\n\n### ** Examples\n\nmu = 1e0\nsigma = mu/3\nf = function(x) dnorm(x, mean=mu, sd=sigma)\nspace = nint_space(nint_intvDim(-Inf, Inf))\n\ntt = nint_transform(f, space, list(nint_tanTransform(0, 1, dIdcs=1)))\ntt$space\nff = Vectorize(tt$f); curve(ff(x), tt$space[[1]][1], tt$space[[1]][2])\n\nnint_integrate(tt$f, tt$space) # should return 1\n\n# same with larger mu\nmu = 1e4\nsigma = mu/3\nf = function(x) dnorm(x, mean=mu, sd=sigma)\n\ntt = nint_transform(f, space, list(nint_tanTransform(0, 1, dIdcs=1)))\nff = Vectorize(tt$f); curve(ff(x), tt$space[[1]][1], tt$space[[1]][2])\n\ntry(nint_integrate(tt$f, tt$space)) # integral is probably divergent\n\n# same with different transformation\ntt = nint_transform(f, space, list(nint_tanTransform(mu, sigma, dIdcs=1)))\nff = Vectorize(tt$f); curve(ff(x), tt$space[[1]][1], tt$space[[1]][2])\n\nnint_integrate(tt$f, tt$space) # should return 1\n\n\n"} {"package":"docopulae","topic":"nint_transform","snippet":"### Name: nint_transform\n### Title: Transform Integral\n### Aliases: nint_transform\n\n### ** Examples\n\nlibrary(mvtnorm)\nlibrary(SparseGrid)\n\ndfltNCube = nint_integrateNCube\n\n\n## 1D, normal pdf\nmu = 137\nsigma = mu/6\nf = function(x) dnorm(x, mean=mu, sd=sigma)\nspace = nint_space(nint_intvDim(-Inf, Inf))\n\ntt = nint_transform(f, space,\n list(nint_tanTransform(mu + 3, sigma*1.01, dIdcs=1)))\ntt$space\nff = Vectorize(tt$f); curve(ff(x), tt$space[[1]][1], tt$space[[1]][2])\n\nnint_integrate(tt$f, tt$space) # returns 1\n\n\n## 2D, normal pdf\n\n## prepare for SparseGrid\nncube = function(dimension)\n SparseGrid::createIntegrationGrid('GQU', dimension, 7) # rather sparse!\nncube = nint_integrateNCube_SparseGrid(ncube)\nunlockBinding('nint_integrateNCube', environment(nint_integrate))\nassign('nint_integrateNCube', ncube, envir=environment(nint_integrate))\n\nmu = c(1, 2)\nsigma = matrix(c(1, 0.7,\n 0.7, 2), nrow=2)\nf = function(x) {\n if (all(is.infinite(x))) # dmvnorm returns NaN in this case\n return(0)\n return(dmvnorm(x, mean=mu, sigma=sigma))\n}\n\n# plot f\nx1 = seq(-1, 3, length.out=51); x2 = seq(-1, 5, length.out=51)\ny = outer(x1, x2, function(x1, x2) apply(cbind(x1, x2), 1, f))\ncontour(x1, x2, y, xlab='x[1]', ylab='x[2]', main='f')\n\nspace = nint_space(nint_intvDim(-Inf, Inf),\n nint_intvDim(-Inf, Inf))\n\ntt = nint_transform(f, space,\n list(nint_tanTransform(mu, diag(sigma), dIdcs=1:2)))\ntt$space\n\n# plot tt$f\nx1 = seq(tt$space[[1]][1], tt$space[[1]][2], length.out=51)\nx2 = seq(tt$space[[2]][1], tt$space[[2]][2], length.out=51)\ny = outer(x1, x2, function(x1, x2) apply(cbind(x1, x2), 1, tt$f))\ncontour(x1, x2, y, xlab='x[1]', ylab='x[2]', main='tt$f')\n\nnint_integrate(tt$f, tt$space) # doesn't return 1\n# tan transform is inaccurate here\n\n# probability integral transform\ndsigma = diag(sigma)\nt1 = list(g=function(x) pnorm(x, mean=mu, sd=dsigma),\n giDg=function(y) {\n x = qnorm(y, mean=mu, sd=dsigma)\n list(x, dnorm(x, mean=mu, sd=dsigma))\n },\n dIdcs=1:2)\n\ntt = nint_transform(f, space, list(t1))\n\n# plot tt$f\nx1 = seq(tt$space[[1]][1], tt$space[[1]][2], length.out=51)\nx2 = seq(tt$space[[2]][1], tt$space[[2]][2], length.out=51)\ny = outer(x1, x2, function(x1, x2) apply(cbind(x1, x2), 1, tt$f))\ncontour(x1, x2, y, xlab='x[1]', ylab='x[2]', main='tt$f')\n\nnint_integrate(tt$f, tt$space) # returns almost 1\n\n\n## 2D, half sphere\nf = function(x) sqrt(1 - x[1]^2 - x[2]^2)\nspace = nint_space(nint_intvDim(-1, 1),\n nint_funcDim(function(x)\n nint_intvDim(c(-1, 1)*sqrt(1 - x[1]^2))))\n\n# plot f\nx = seq(-1, 1, length.out=51)\ny = outer(x, x, function(x1, x2) apply(cbind(x1, x2), 1, f))\npersp(x, x, y, theta=45, phi=45, xlab='x[1]', ylab='x[2]', zlab='f')\n\ntt = nint_transform(f, space, list())\ntt$space\n\n# plot tt$f\nx1 = seq(tt$space[[1]][1], tt$space[[1]][2], length.out=51)\nx2 = seq(tt$space[[2]][1], tt$space[[2]][2], length.out=51)\ny = outer(x1, x2, function(x1, x2) apply(cbind(x1, x2), 1, tt$f))\npersp(x1, x2, y, theta=45, phi=45, xlab='x[1]', ylab='x[2]', zlab='tt$f')\n\nnint_integrate(tt$f, tt$space) # returns almost 4/3*pi / 2\n\n\n## 2D, constrained normal pdf\nf = function(x) prod(dnorm(x, 0, 1))\nspace = nint_space(nint_intvDim(-Inf, Inf),\n nint_funcDim(function(x) nint_intvDim(-Inf, x[1]^2)))\n\ntt = nint_transform(f, space, list(nint_tanTransform(0, 1, dIdcs=1:2)))\n\n# plot tt$f\nx1 = seq(tt$space[[1]][1], tt$space[[1]][2], length.out=51)\nx2 = seq(tt$space[[2]][1], tt$space[[2]][2], length.out=51)\ny = outer(x1, x2, function(x1, x2) apply(cbind(x1, x2), 1, tt$f))\npersp(x1, x2, y, theta=45, phi=45, xlab='x[1]', ylab='x[2]', zlab='tt$f')\n\nnint_integrate(tt$f, tt$space) # Mathematica returns 0.716315\n\n\nassign('nint_integrateNCube', dfltNCube, envir=environment(nint_integrate))\n\n\n"} {"package":"docopulae","topic":"nint_validateSpace","snippet":"### Name: nint_validateSpace\n### Title: Validate Space\n### Aliases: nint_validateSpace\n\n### ** Examples\n\n## valid\ns = nint_space()\ns\nnint_validateSpace(s)\n\ns = nint_space(nint_intvDim(-1, 1))\ns\nnint_validateSpace(s)\n\n## -1001\ns = nint_space(1)\ns\nnint_validateSpace(s)\n\n## -1002\ns = nint_space(list(nint_scatDim(c(1, 2)), nint_scatDim(c(1, 2, 3))))\ns\nnint_validateSpace(s)\n\ns = nint_space(nint_scatDim(c(1, 2)),\n nint_scatDim(c(1, 2, 3)))\ns\nnint_validateSpace(s)\n\n## -1003\nnint_validateSpace(1)\nnint_validateSpace(list(nint_space())) # valid\nnint_validateSpace(list(1))\n\n## -1004\ns1 = nint_space(nint_gridDim(1:3),\n nint_scatDim(c(0, 1)))\ns2 = nint_space(s1[[1]])\ns1 # 2D\ns2 # 1D\nnint_validateSpace(list(s1, s2))\n\n\n"} {"package":"docopulae","topic":"numDerivLogf","snippet":"### Name: numDerivLogf\n### Title: Build Derivative Function for Log f\n### Aliases: numDerivLogf numDeriv2Logf\n\n### ** Examples\n\n## see examples for param\n\n\n\n"} {"package":"docopulae","topic":"param","snippet":"### Name: param\n### Title: Parametric Model\n### Aliases: param\n\n### ** Examples\n\n## No test: \nlibrary(copula)\n\n\ndfltNCube = nint_integrateNCube\n\n## prepare for SparseGrid integration\nncube = function(dimension) {\n SparseGrid::createIntegrationGrid('GQU', dimension, 3)\n}\nncube = nint_integrateNCube_SparseGrid(ncube)\nunlockBinding('nint_integrateNCube', environment(nint_integrate))\nassign('nint_integrateNCube', ncube, envir=environment(nint_integrate))\n\n\n## general settings\nnumDeriv = FALSE\n\n\n## build pdf, derivatives\netas = function(theta) with(theta, {\n xx = x^(0:4)\n c(c(beta1, beta2, beta3) %*% xx[c(1, 2, 3)], # x^c(0, 1, 2)\n c(beta4, beta5, beta6) %*% xx[c(2, 4, 5)]) # x^c(1, 3, 4)\n})\n\ncopula = claytonCopula()\nalphas = c('alpha')\n\nparNames = c(paste('beta', 1:6, sep=''), alphas)\n\nif (numDeriv) {\n margins = function(y, theta, ...) {\n e = etas(theta)\n cbind(dnorm(y, mean=e, sd=1), pnorm(y, mean=e, sd=1))\n }\n f = buildf(margins, TRUE, copula, parNames=alphas)\n\n d2logf = numDeriv2Logf(f)\n\n} else {\n es = list(\n eta1=quote(theta$beta1 + theta$beta2*theta$x + theta$beta3*theta$x^2),\n eta2=quote(theta$beta4*theta$x + theta$beta5*theta$x^3 + theta$beta6*theta$x^4))\n\n margins = list(list(pdf=substitute(dnorm(y[1], mean=eta1, sd=1), es),\n cdf=substitute(pnorm(y[1], mean=eta1, sd=1), es)),\n list(pdf=substitute(dnorm(y[2], mean=eta2, sd=1), es),\n cdf=substitute(pnorm(y[2], mean=eta2, sd=1), es)))\n pn = as.list(alphas); names(pn) = alphas # map parameter to variable\n f = buildf(margins, TRUE, copula, parNames=pn)\n\n cat('building derivatives ...')\n tt = system.time(d2logf <- Deriv2Logf(f, parNames))\n cat('\\n')\n print(tt)\n}\n\nf\nstr(d2logf)\n\n\n## param\nmodel = function(theta) {\n integrand = function(y, theta, i, j)\n -d2logf(y, theta, i, j) * f(y, theta)\n\n yspace = nint_space(nint_intvDim(-Inf, Inf),\n nint_intvDim(-Inf, Inf))\n\n fisherIf = function(x) {\n theta$x = x\n\n ## probability integral transform\n e = etas(theta)\n\n tt = nint_transform(integrand, yspace, list(list(\n dIdcs=1:2,\n g=function(y) pnorm(y, mean=e, sd=1),\n giDg=function(z) {\n t1 = qnorm(z, mean=e, sd=1)\n list(t1, dnorm(t1, mean=e, sd=1))\n }\n )))\n\n fisherI(tt$f, theta, parNames, tt$space)\n }\n\n return(param(fisherIf, 1))\n}\n\ntheta = list(beta1=1, beta2=1, beta3=1,\n beta4=1, beta5=1, beta6=1,\n alpha=iTau(copula, 0.5), x=0)\nm = model(theta)\n\n## update.param\nsystem.time(m <- update(m, matrix(seq(0, 1, length.out=101), ncol=1)))\n\n## find D-optimal design\nD = Dsensitivity(defaults=list(x=m$x, desx=m$x, mod=m))\n\nd <- Wynn(D, 7.0007, maxIter=1e4)\nd$tag$Wynn$tolBreak\n\ndev.new(); plot(d, sensTol=7, main='d')\n\ngetM(m, d)\n\nrd = reduce(d, 0.05)\ncbind(x=rd$x, w=rd$w)\n\ndev.new(); plot(rd, main='rd')\n\ntry(getM(m, rd))\nm2 = update(m, rd)\ngetM(m2, rd)\n\n## find Ds-optimal design\ns = c(alphas, 'beta1', 'beta2', 'beta3')\nDs = Dsensitivity(A=s, defaults=list(x=m$x, desx=m$x, mod=m))\n\nds <- Wynn(Ds, 4.0004, maxIter=1e4)\nds$tag$Wynn$tolBreak\n\ndev.new(); plot(reduce(ds, 0.05), sensTol=4, main='ds')\n\n## create custom design\nn = 4\nd2 = design(x=matrix(seq(0, 1, length.out=n), ncol=1), w=rep(1/n, n))\n\nm = update(m, d2)\ndev.new(); plot(d2, sensx=d$x, sens=D(x=d$x, desx=d2$x, desw=d2$w, mod=m),\n sensTol=7, main='d2')\n\n## compare designs\nDefficiency(ds, d, m)\nDefficiency(d, ds, m, A=s) # Ds-efficiency\nDefficiency(d2, d, m)\nDefficiency(d2, ds, m) # D-efficiency\n\n## end with nice plot\ndev.new(); plot(rd, main='rd')\n\n\nassign('nint_integrateNCube', dfltNCube, envir=environment(nint_integrate))\n## End(No test)\n\n\n"} {"package":"docopulae","topic":"plot.desigh","snippet":"### Name: plot.desigh\n### Title: Plot Design\n### Aliases: plot.desigh\n\n### ** Examples\n\n## see examples for param\n\n\n\n"} {"package":"docopulae","topic":"reduce","snippet":"### Name: reduce\n### Title: Reduce Design\n### Aliases: reduce\n\n### ** Examples\n\n## see examples for param\n\n\n\n"} {"package":"docopulae","topic":"rowmatch","snippet":"### Name: rowmatch\n### Title: Row Matching\n### Aliases: rowmatch\n\n### ** Examples\n\na = as.matrix(expand.grid(as.double(2:3), as.double(3:6)))\na = a[sample(nrow(a)),]\na\n\nb = as.matrix(expand.grid(as.double(3:4), as.double(2:5)))\nb = b[sample(nrow(b)),]\nb\n\ni = rowmatch(a, b)\ni\nb[na.omit(i),] # matching rows\na[is.na(i),] # non matching rows\n\n\n"} {"package":"docopulae","topic":"roworder","snippet":"### Name: roworder\n### Title: Matrix Ordering Permutation\n### Aliases: roworder\n\n### ** Examples\n\nx = expand.grid(1:3, 1:2, 3:1)\nx = x[sample(seq1(1, nrow(x)), nrow(x)),]\nx\n\nord = roworder(x)\nord\n\nx[ord,]\n\n\n"} {"package":"docopulae","topic":"seq1","snippet":"### Name: seq1\n### Title: Sequence Generation\n### Aliases: seq1\n\n### ** Examples\n\nseq1(1, 3)\nseq1(3, 1) # different from seq\nseq(3, 1)\n3:1\n\nseq1(5, 1, -3)\n\n\n"} {"package":"docopulae","topic":"update.param","snippet":"### Name: update.param\n### Title: Update Parametric Model\n### Aliases: update.param\n\n### ** Examples\n\n## see examples for param\n\n\n\n"} {"package":"mmr","topic":"mm","snippet":"### Name: mm\n### Title: Matrix Multiplication\n### Aliases: mm %mm%\n\n### ** Examples\n\n\nx <- data.frame(a=c(1,2,3), b=c(5,6,7))\ny <- c(2,2)\nmm(x, y)\n\n\nx <- data.frame(a=c(1,2,3), b=c(5,6,7))\ny <- c(2,2)\nx %mm% y\n\n\n\n"} {"package":"adabag","topic":"Ensemble_ranking_IW","snippet":"### Name: Ensemble_ranking_IW\n### Title: Ensemble methods for ranking data: Item-Weighted Boosting and\n### Bagging Algorithms\n### Aliases: Ensemble_ranking_IW\n\n### ** Examples\n\n## Not run: \n##D # Load simulated ranking data\n##D data(simulatedRankingData)\n##D x <- simulatedRankingData$x\n##D y <- simulatedRankingData$y\n##D \n##D # Prepare the data with item weights\n##D dati <- prep_data(y, x, iw = c(2, 5, 5, 2))\n##D \n##D # Divide the data into training and test sets\n##D set.seed(12345)\n##D samp <- sample(nrow(dati))\n##D l <- length(dati[, 1])\n##D sub <- sample(1:l, 2 * l / 3)\n##D data_sub1 <- dati[sub, ]\n##D data_test1 <- dati[-sub, ]\n##D \n##D # Apply ensemble ranking with AdaBoost.M1\n##D boosting_1 <- Ensemble_ranking_IW(\n##D Label ~ .,\n##D data = data_sub1,\n##D iw = c(2, 5, 5, 2),\n##D mfinal = 3,\n##D coeflearn = \"Breiman\",\n##D control = rpart.control(maxdepth = 4, cp = -1),\n##D algo = \"boosting\",\n##D bin = FALSE\n##D )\n##D \n##D # Evaluate the performance\n##D test_boosting1 <- errorevol_ranking_vector_IW(boosting_1, \n##D newdata = data_test1, iw=c(2,5,5,2), squared = FALSE)\n##D test_boosting1.1 <- errorevol_ranking_vector_IW(boosting_1, \n##D newdata = data_sub1, iw=c(2,5,5,2), squared = FALSE)\n##D \n##D # Plot the error evolution\n##D plot.errorevol(test_boosting1, test_boosting1.1)\n##D \n## End(Not run)\n\n\n"} {"package":"adabag","topic":"MarginOrderedPruning.Bagging","snippet":"### Name: MarginOrderedPruning.Bagging\n### Title: MarginOrderedPruning.Bagging\n### Aliases: MarginOrderedPruning.Bagging\n### Keywords: tree classif\n\n### ** Examples\n\n## mlbench package should be loaded\nlibrary(mlbench)\ndata(Satellite)\n## Separate data into 3 parts: training set, pruning set and test set\nind <- sample(3, nrow(Satellite), replace = TRUE, prob=c(0.3, 0.2,0.5))\n\n## create bagging with training set\n#increase mfinal in your own execution of this example to see \n#the real usefulness of this function\nSatellite.bagging<-bagging(classes~.,data=Satellite[ind==1,],mfinal=3)\n#Satellite.bagging.pred<-predict(Satellite.bagging,Satellite[ind==3,])\n\n##pruning bagging\nSatellite.bagging.pruning<-MarginOrderedPruning.Bagging(Satellite.bagging,\nSatellite[ind==1,],Satellite[ind==2,])\n#Satellite.bagging.pruning.pred<-predict(Satellite.bagging.pruning$prunedBagging,\n#Satellite[ind==3,])\n\n## create bagging with training and pruning set\n#This example has been hidden to fulfill execution time <5s \n#Satellite.bagging2<-bagging(classes~.,data=Satellite[ind!=3,],25)\n#Satellite.bagging2.pred<-predict(Satellite.bagging2,Satellite[ind==3,])\n\n\n\n"} {"package":"adabag","topic":"adabag-package","snippet":"### Name: adabag-package\n### Title: Applies Multiclass AdaBoost.M1, SAMME and Bagging\n### Aliases: adabag-package adabag\n### Keywords: tree classif\n\n### ** Examples\n\n## rpart library should be loaded\ndata(iris)\niris.adaboost <- boosting(Species~., data=iris, boos=TRUE,\n\tmfinal=3)\nimportanceplot(iris.adaboost)\n\nsub <- c(sample(1:50, 35), sample(51:100, 35), sample(101:150, 35))\niris.bagging <- bagging(Species ~ ., data=iris[sub,], mfinal=3)\n#Predicting with labeled data\niris.predbagging<-predict.bagging(iris.bagging, newdata=iris[-sub,])\niris.predbagging\n#Predicting with unlabeled data\niris.predbagging<- predict.bagging(iris.bagging, newdata=iris[-sub,-5])\niris.predbagging\n\n\n"} {"package":"adabag","topic":"autoprune","snippet":"### Name: autoprune\n### Title: Builds automatically a pruned tree of class 'rpart'\n### Aliases: autoprune\n### Keywords: tree classif\n\n### ** Examples\n\n## rpart library should be loaded\nlibrary(rpart)\ndata(iris)\niris.prune<-autoprune(Species~., data=iris)\niris.prune\n\n## Comparing the test error of rpart and autoprune\nlibrary(mlbench)\ndata(BreastCancer)\nl <- length(BreastCancer[,1])\nsub <- sample(1:l,2*l/3)\n\nBC.rpart <- rpart(Class~.,data=BreastCancer[sub,-1],cp=-1, maxdepth=5)\nBC.rpart.pred <- predict(BC.rpart,newdata=BreastCancer[-sub,-1],type=\"class\")\ntb <-table(BC.rpart.pred,BreastCancer$Class[-sub])\ntb\n1-(sum(diag(tb))/sum(tb))\n\n\nBC.prune<-autoprune(Class~.,data=BreastCancer[,-1],subset=sub)\nBC.rpart.pred <- predict(BC.prune,newdata=BreastCancer[-sub,-1],type=\"class\")\ntb <-table(BC.rpart.pred,BreastCancer$Class[-sub])\ntb\n1-(sum(diag(tb))/sum(tb))\n\n\n\n\n\n"} {"package":"adabag","topic":"bagging","snippet":"### Name: bagging\n### Title: Applies the Bagging algorithm to a data set\n### Aliases: bagging\n### Keywords: tree classif\n\n### ** Examples\n\n## rpart library should be loaded\n#This example has been hidden to fulfill execution time <5s\n#library(rpart)\n#data(iris)\n#iris.bagging <- bagging(Species~., data=iris, mfinal=10)\n\n# Data Vehicle (four classes)\nlibrary(rpart)\nlibrary(mlbench)\ndata(Vehicle)\nl <- length(Vehicle[,1])\nsub <- sample(1:l,2*l/3)\nVehicle.bagging <- bagging(Class ~.,data=Vehicle[sub, ],mfinal=5, \n\tcontrol=rpart.control(maxdepth=5, minsplit=15))\n#Using the pruning option\nVehicle.bagging.pred <- predict.bagging(Vehicle.bagging,newdata=Vehicle[-sub, ], newmfinal=3)\nVehicle.bagging.pred$confusion\nVehicle.bagging.pred$error\n\n\n\n\n"} {"package":"adabag","topic":"bagging.cv","snippet":"### Name: bagging.cv\n### Title: Runs v-fold cross validation with Bagging\n### Aliases: bagging.cv\n### Keywords: tree classif\n\n### ** Examples\n\n## rpart library should be loaded\nlibrary(rpart)\ndata(iris)\niris.baggingcv <- bagging.cv(Species ~ ., v=2, data=iris, mfinal=3,\ncontrol=rpart.control(cp=0.01))\niris.baggingcv[-1]\n\n\n## rpart and mlbench libraries should be loaded\n## Data Vehicle (four classes) \n#This example has been hidden to keep execution time <5s\n#data(Vehicle)\n#Vehicle.bagging.cv <- bagging.cv(Class ~.,data=Vehicle,v=5,mfinal=10, \n#control=rpart.control(maxdepth=5))\n#Vehicle.bagging.cv[-1]\n\n\n\n"} {"package":"adabag","topic":"boosting","snippet":"### Name: boosting\n### Title: Applies the AdaBoost.M1 and SAMME algorithms to a data set\n### Aliases: boosting adaboost.M1\n### Keywords: tree classif\n\n### ** Examples\n\n\n## rpart library should be loaded\ndata(iris)\niris.adaboost <- boosting(Species~., data=iris, boos=TRUE, mfinal=3)\niris.adaboost\n\n\n## Data Vehicle (four classes) \nlibrary(mlbench)\ndata(Vehicle)\nl <- length(Vehicle[,1])\nsub <- sample(1:l,2*l/3)\nmfinal <- 3 \nmaxdepth <- 5\n\nVehicle.rpart <- rpart(Class~.,data=Vehicle[sub,],maxdepth=maxdepth)\nVehicle.rpart.pred <- predict(Vehicle.rpart,newdata=Vehicle[-sub, ],type=\"class\")\ntb <- table(Vehicle.rpart.pred,Vehicle$Class[-sub])\nerror.rpart <- 1-(sum(diag(tb))/sum(tb))\ntb\nerror.rpart\n\nVehicle.adaboost <- boosting(Class ~.,data=Vehicle[sub, ],mfinal=mfinal, coeflearn=\"Zhu\",\n\tcontrol=rpart.control(maxdepth=maxdepth))\nVehicle.adaboost.pred <- predict.boosting(Vehicle.adaboost,newdata=Vehicle[-sub, ])\nVehicle.adaboost.pred$confusion\nVehicle.adaboost.pred$error\n\n#comparing error evolution in training and test set\nerrorevol(Vehicle.adaboost,newdata=Vehicle[sub, ])->evol.train\nerrorevol(Vehicle.adaboost,newdata=Vehicle[-sub, ])->evol.test\n\nplot.errorevol(evol.test,evol.train)\n\n\n\n\n"} {"package":"adabag","topic":"boosting.cv","snippet":"### Name: boosting.cv\n### Title: Runs v-fold cross validation with AdaBoost.M1 or SAMME\n### Aliases: boosting.cv\n### Keywords: tree classif\n\n### ** Examples\n\n\n## rpart library should be loaded\ndata(iris)\niris.boostcv <- boosting.cv(Species ~ ., v=2, data=iris, mfinal=5, \ncontrol=rpart.control(cp=0.01))\niris.boostcv[-1]\n\n## rpart and mlbench libraries should be loaded\n## Data Vehicle (four classes) \n#This example has been hidden to fulfill execution time <5s \n#data(Vehicle)\n#Vehicle.boost.cv <- boosting.cv(Class ~.,data=Vehicle,v=5, mfinal=10, coeflearn=\"Zhu\",\n#control=rpart.control(maxdepth=5))\n#Vehicle.boost.cv[-1]\n\n\n\n\n\n"} {"package":"adabag","topic":"errorevol","snippet":"### Name: errorevol\n### Title: Shows the error evolution of the ensemble\n### Aliases: errorevol\n### Keywords: tree classif\n\n### ** Examples\n\n\nlibrary(mlbench)\ndata(BreastCancer)\nl <- length(BreastCancer[,1])\nsub <- sample(1:l,2*l/3)\ncntrl <- rpart.control(maxdepth = 3, minsplit = 0, cp = -1)\n\nBC.adaboost <- boosting(Class ~.,data=BreastCancer[sub,-1],mfinal=5, control=cntrl)\nBC.adaboost.pred <- predict.boosting(BC.adaboost,newdata=BreastCancer[-sub,-1])\n\nerrorevol(BC.adaboost,newdata=BreastCancer[-sub,-1])->evol.test\nerrorevol(BC.adaboost,newdata=BreastCancer[sub,-1])->evol.train\n\nplot.errorevol(evol.test,evol.train)\nabline(h=min(evol.test[[1]]), col=\"red\",lty=2,lwd=2)\nabline(h=min(evol.train[[1]]), col=\"blue\",lty=2,lwd=2)\n\n\n\n\n"} {"package":"adabag","topic":"errorevol_ranking_vector_IW","snippet":"### Name: errorevol_ranking_vector_IW\n### Title: Calculate the error evolution and final predictions of an\n### item-weighted ensemble for rankings\n### Aliases: errorevol_ranking_vector_IW\n\n### ** Examples\n\n## Not run: \n##D # Load simulated ranking data\n##D data(simulatedRankingData)\n##D x <- simulatedRankingData$x\n##D y <- simulatedRankingData$y\n##D \n##D # Prepare the data with item weights\n##D dati <- prep_data(y, x, iw = c(2, 5, 5, 2))\n##D \n##D # Divide the data into training and test sets\n##D set.seed(12345)\n##D samp <- sample(nrow(dati))\n##D l <- length(dati[, 1])\n##D sub <- sample(1:l, 2 * l / 3)\n##D data_sub1 <- dati[sub, ]\n##D data_test1 <- dati[-sub, ]\n##D \n##D # Apply ensemble ranking with AdaBoost.M1\n##D boosting_1 <- Ensemble_ranking_IW(\n##D Label ~ .,\n##D data = data_sub1,\n##D iw = c(2, 5, 5, 2),\n##D mfinal = 3,\n##D coeflearn = \"Breiman\",\n##D control = rpart.control(maxdepth = 4, cp = -1),\n##D algo = \"boosting\",\n##D bin = FALSE\n##D )\n##D \n##D # Evaluate the performance\n##D test_boosting1 <- errorevol_ranking_vector_IW(boosting_1, \n##D newdata = data_test1, iw=c(2,5,5,2), squared = FALSE)\n##D test_boosting1.1 <- errorevol_ranking_vector_IW(boosting_1, \n##D newdata = data_sub1, iw=c(2,5,5,2), squared = FALSE)\n##D \n##D # Plot the error evolution\n##D plot.errorevol(test_boosting1, test_boosting1.1)\n##D \n## End(Not run)\n\n\n"} {"package":"adabag","topic":"importanceplot","snippet":"### Name: importanceplot\n### Title: Plots the variables relative importance\n### Aliases: importanceplot\n### Keywords: tree classif\n\n### ** Examples\n\n#Examples\n#Iris example\nlibrary(rpart)\ndata(iris)\nsub <- c(sample(1:50, 25), sample(51:100, 25), sample(101:150, 25))\niris.adaboost <- boosting(Species ~ ., data=iris[sub,], mfinal=3)\nimportanceplot(iris.adaboost)\n\n#Examples with bagging\n#iris.bagging <- bagging(Species ~ ., data=iris[sub,], mfinal=5)\n#importanceplot(iris.bagging, horiz=TRUE, cex.names=.6)\n\n\n\n"} {"package":"adabag","topic":"margins","snippet":"### Name: margins\n### Title: Calculates the margins\n### Aliases: margins\n### Keywords: tree classif\n\n### ** Examples\n\n\n#Iris example\nlibrary(rpart)\ndata(iris)\nsub <- c(sample(1:50, 25), sample(51:100, 25), sample(101:150, 25))\niris.adaboost <- boosting(Species ~ ., data=iris[sub,], mfinal=3)\nmargins(iris.adaboost,iris[sub,])->iris.margins # training set\nplot.margins(iris.margins)\n\n# test set\niris.predboosting<- predict.boosting(iris.adaboost, newdata=iris[-sub,])\nmargins(iris.predboosting,iris[-sub,])->iris.predmargins \nplot.margins(iris.predmargins,iris.margins)\n\n#Examples with bagging\niris.bagging <- bagging(Species ~ ., data=iris[sub,], mfinal=3)\nmargins(iris.bagging,iris[sub,])->iris.bagging.margins # training set\n\niris.predbagging<- predict.bagging(iris.bagging, newdata=iris[-sub,])\nmargins(iris.predbagging,iris[-sub,])->iris.bagging.predmargins # test set\npar(bg=\"lightyellow\")\nplot.margins(iris.bagging.predmargins,iris.bagging.margins)\n\n\n\n\n"} {"package":"adabag","topic":"plot.errorevol","snippet":"### Name: plot.errorevol\n### Title: Plots the error evolution of the ensemble\n### Aliases: plot.errorevol\n### Keywords: tree classif\n\n### ** Examples\n\ndata(iris)\ntrain <- c(sample(1:50, 25), sample(51:100, 25), sample(101:150, 25))\n\ncntrl<-rpart.control(maxdepth=1)\n#increase mfinal in your own execution of this example to see \n#the real usefulness of this function\niris.adaboost <- boosting(Species ~ ., data=iris[train,], mfinal=10, control=cntrl)\n\n#Error evolution along the iterations in training set \nerrorevol(iris.adaboost,iris[train,])->evol.train\nplot.errorevol(evol.train)\n\n#comparing error evolution in training and test set\nerrorevol(iris.adaboost,iris[-train,])->evol.test\nplot.errorevol(evol.test, evol.train)\n\n# See the help of the functions error evolution and boosting \n# for more examples of the use of the error evolution\n\n\n\n"} {"package":"adabag","topic":"plot.margins","snippet":"### Name: plot.margins\n### Title: Plots the margins of the ensemble\n### Aliases: plot.margins\n### Keywords: tree classif\n\n### ** Examples\n\nlibrary(mlbench)\ndata(BreastCancer)\nl <- length(BreastCancer[,1])\nsub <- sample(1:l,2*l/3)\ncntrl <- rpart.control(maxdepth = 3, minsplit = 0, cp = -1)\n\nBC.adaboost <- boosting(Class ~.,data=BreastCancer[sub,-1],mfinal=5, control=cntrl)\nBC.adaboost.pred <- predict.boosting(BC.adaboost,newdata=BreastCancer[-sub,-1])\n\nBC.margins<-margins(BC.adaboost,BreastCancer[sub,-1]) # training set\nBC.predmargins<-margins(BC.adaboost.pred,BreastCancer[-sub,-1]) # test set\nplot.margins(BC.predmargins,BC.margins)\n\n\n\n\n"} {"package":"adabag","topic":"predict.bagging","snippet":"### Name: predict.bagging\n### Title: Predicts from a fitted bagging object\n### Aliases: predict.bagging\n### Keywords: tree classif\n\n### ** Examples\n\n#library(rpart)\n#data(iris)\n#sub <- c(sample(1:50, 25), sample(51:100, 25), sample(101:150, 25))\n#iris.bagging <- bagging(Species ~ ., data=iris[sub,], mfinal=5)\n#iris.predbagging<- predict.bagging(iris.bagging, newdata=iris[-sub,])\n#iris.predbagging\n\n## rpart and mlbench libraries should be loaded\nlibrary(rpart)\nlibrary(mlbench)\ndata(BreastCancer)\nl <- length(BreastCancer[,1])\nsub <- sample(1:l,2*l/3)\nBC.bagging <- bagging(Class ~.,data=BreastCancer[,-1],mfinal=5, \ncontrol=rpart.control(maxdepth=3))\nBC.bagging.pred <- predict.bagging(BC.bagging,newdata=BreastCancer[-sub,-1])\nBC.bagging.pred$prob\nBC.bagging.pred$confusion\nBC.bagging.pred$error\n\n\n\n\n"} {"package":"adabag","topic":"predict.boosting","snippet":"### Name: predict.boosting\n### Title: Predicts from a fitted boosting object\n### Aliases: predict.boosting\n### Keywords: tree classif\n\n### ** Examples\n\n## rpart library should be loaded\n#This example has been hidden to fulfill execution time <5s \n#library(rpart)\n#data(iris)\n#sub <- c(sample(1:50, 25), sample(51:100, 25), sample(101:150, 25))\n#iris.adaboost <- boosting(Species ~ ., data=iris[sub,], mfinal=10)\n#iris.predboosting<- predict.boosting(iris.adaboost, newdata=iris[-sub,])\n#iris.predboosting$prob\n\n## rpart and mlbench libraries should be loaded\n## Comparing the test error of rpart and adaboost.M1\nlibrary(rpart)\nlibrary(mlbench)\ndata(BreastCancer)\nl <- length(BreastCancer[,1])\nsub <- sample(1:l,2*l/3)\n\nBC.rpart <- rpart(Class~.,data=BreastCancer[sub,-1], maxdepth=3)\nBC.rpart.pred <- predict(BC.rpart,newdata=BreastCancer[-sub,-1],type=\"class\")\ntb <-table(BC.rpart.pred,BreastCancer$Class[-sub])\nerror.rpart <- 1-(sum(diag(tb))/sum(tb))\ntb\nerror.rpart\n\nBC.adaboost <- boosting(Class ~.,data=BreastCancer[,-1],mfinal=10, coeflearn=\"Freund\", \nboos=FALSE , control=rpart.control(maxdepth=3))\n\n#Using the pruning option\nBC.adaboost.pred <- predict.boosting(BC.adaboost,newdata=BreastCancer[-sub,-1], newmfinal=10)\nBC.adaboost.pred$confusion\nBC.adaboost.pred$error\n\n\n\n\n\n\n"} {"package":"adabag","topic":"prep_data","snippet":"### Name: prep_data\n### Title: Prepare Ranking Data for Item-Weighted Ensemble Algorithm\n### Aliases: prep_data\n\n### ** Examples\n\n # Prepare item-weighted ranking data\n y <- matrix(c(1, 2, 3, 4, 2, 3, 1, 4, 4, 1, 3, 2, 2, 3, 1, 4), nrow = 4, ncol = 4, byrow = TRUE)\n x <- matrix(c(0.5, 0.8, 1.2, 0.7, 1.1, 0.9, 0.6, 1.3, 0.4, 1.5, 0.7, 0.9), nrow = 4, ncol = 3)\n iw <- c(2, 5, 5, 2)\n dati <- prep_data(y, x, iw)\n\n\n"} {"package":"ndjson","topic":"flatten","snippet":"### Name: flatten\n### Title: Flatten a character vector of individual JSON lines into a\n### 'data.table'\n### Aliases: flatten\n\n### ** Examples\n\nflatten('{\"top\":{\"next\":{\"final\":1,\"end\":true},\"another\":\"yes\"},\"more\":\"no\"}')\n\n\n"} {"package":"ndjson","topic":"stream_in","snippet":"### Name: stream_in\n### Title: Stream in & flatten an ndjson file into a 'data.table'\n### Aliases: stream_in\n\n### ** Examples\n\nf <- system.file(\"extdata\", \"test.json\", package=\"ndjson\")\nnrow(stream_in(f))\n\ngzf <- system.file(\"extdata\", \"testgz.json.gz\", package=\"ndjson\")\nnrow(stream_in(gzf))\n\n\n"} {"package":"ndjson","topic":"validate","snippet":"### Name: validate\n### Title: Validate ndjson file\n### Aliases: validate\n\n### ** Examples\n\nf <- system.file(\"extdata\", \"test.json\", package=\"ndjson\")\nvalidate(f)\n\ngzf <- system.file(\"extdata\", \"testgz.json.gz\", package=\"ndjson\")\nvalidate(gzf)\n\n\n"} {"package":"jackknifeKME","topic":"jackknifeKME","snippet":"### Name: jackknifeKME\n### Title: Jackknife estimates of Kaplan-Meier estimators or integrals\n### Aliases: jackknifeKME\n### Keywords: jackknife bias\n\n### ** Examples\n\n#For full data typically used for AFT models (using imputeYn (2015) package). \n#For mean lifetime estimator.\ndata<-data(n=100, p=4, r=0, b1=c(2,2,3,3), sig=1, Cper=0)\nkme1<-jackknifeKME(data$x,data$y, data$delta, method=\"condMean\",estimator = 1)\n## No test: \nkme1\n## End(No test)\n\n#Estimates are for mean lifetime estimators.Data contain only status and survival time. \ndata2<-simdata(n = 100,lambda = 2.04) \ndata2$delta[length(data2$delta)]<-0 \nkme2<-jackknifeKME(, data2$Y, data2$delta, method=\"PDQ\",estimator = 1)\n## No test: \nkme2\n## End(No test)\n\n#Estimates are for Kaplan-Meier 2nd order F-moment.\ndata3<-simdata(n = 100,lambda = 2.04) \ndata3$delta[length(data3$delta)]<-0 \nkme3<-jackknifeKME(, data3$Y, data3$delta, method=\"PDQ\",estimator = 2)\n## No test: \nkme3\n## End(No test)\n\n\n"} {"package":"jackknifeKME","topic":"kmweight","snippet":"### Name: kmweight\n### Title: Compute Kaplan-Meier weights\n### Aliases: kmweight\n\n### ** Examples\n\n#Using simdata function and considering censoring level at 50%.\ndata<-simdata(n = 100,lambda = 2.04) \nkmw<-kmweight(data$Y, data$delta)\nkmw\n\n\n"} {"package":"jackknifeKME","topic":"kmweight.corr","snippet":"### Name: kmweight.corr\n### Title: Compute corrected Kaplan-Meier weights for jackknifing\n### Aliases: kmweight.corr\n\n### ** Examples\n\n#Using simdata function. Censoring level is 50%.\ndata1<-simdata(n = 100,lambda = 2.04) \nkmwc<-kmweight.corr(data1$Y, data1$delta)\nkmwc\n\n\n"} {"package":"jackknifeKME","topic":"simdata","snippet":"### Name: simdata\n### Title: Generating survival data\n### Aliases: simdata\n\n### ** Examples\n\n#For Cper = 30%.\ndata<-simdata(n = 100,lambda = 3.48)\ndata \n\n#For Cper = 50%.\ndata2<-simdata(n = 100,lambda = 2.04) \ndata2\n\n#For Cper = 80%.\ndata3<-simdata(n = 100,lambda = 0.87) \ndata3\n\n\n"} {"package":"FinancialInstrument","topic":"C2M","snippet":"### Name: C2M\n### Title: Month-to-Code and Code-to-Month\n### Aliases: C2M M2C M2C\n\n### ** Examples\n\nC2M()\nC2M(\"M\")\nC2M()[6]\nM2C()\nM2C(\"Sep\")\nM2C()[9]\n\n\n"} {"package":"FinancialInstrument","topic":"CompareInstrumentFiles","snippet":"### Name: CompareInstrumentFiles\n### Title: Compare Instrument Files\n### Aliases: CompareInstrumentFiles\n\n### ** Examples\n\n## Not run: \n##D #backup current .instrument environment\n##D bak <- as.list(FinancialInstrument:::.instrument, all.names=TRUE) \n##D old.wd <- getwd()\n##D tmpdir <- tempdir()\n##D setwd(tmpdir)\n##D rm_instruments(keep=FALSE)\n##D # create some instruments and save\n##D stock(c(\"SPY\", \"DIA\", \"GLD\"), currency(\"USD\"))\n##D saveInstruments(\"MyInstruments1\")\n##D # make some changes\n##D rm_stocks(\"GLD\")\n##D stock(\"QQQ\", \"USD\")\n##D instrument_attr(\"SPY\", \"description\", \"S&P ETF\")\n##D saveInstruments(\"MyInstruments2\")\n##D CompareInstrumentFiles(\"MyInstruments1\", \"MyInstruments2\")\n##D #Clean up\n##D setwd(old.wd)\n##D reloadInstruments(bak)\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"FinancialInstrument-package","snippet":"### Name: FinancialInstrument-package\n### Title: Construct, manage and store contract specifications for trading\n### Aliases: FinancialInstrument-package FinancialInstrument\n### Keywords: package\n\n### ** Examples\n\n## Not run: \n##D # Construct instruments for several different asset classes\n##D # Define a currency and some stocks\n##D require(\"FinancialInstrument\")\n##D currency(c(\"USD\", \"EUR\")) # define some currencies\n##D stock(c(\"SPY\", \"LQD\", \"IBM\", \"GS\"), currency=\"USD\") # define some stocks\n##D exchange_rate(\"EURUSD\") # define an exchange rate\n##D \n##D ls_stocks() #get the names of all the stocks\n##D ls_instruments() # all instruments\n##D \n##D getInstrument(\"IBM\")\n##D update_instruments.yahoo(ls_stocks())\n##D update_instruments.TTR(ls_stocks()) # doesn't update ETFs\n##D update_instruments.masterDATA(ls_stocks()) # only updates ETFs\n##D getInstrument(\"SPY\")\n##D \n##D ## Compare instruments with all.equal.instrument method\n##D all.equal(getInstrument(\"USD\"), getInstrument(\"USD\"))\n##D all.equal(getInstrument(\"USD\"), getInstrument(\"EUR\"))\n##D all.equal(getInstrument(\"SPY\"), getInstrument(\"LQD\"))\n##D \n##D ## Search for the tickers of instruments that contain words\n##D find.instrument(\"computer\") #IBM\n##D find.instrument(\"bond\") #LQD\n##D \n##D ## Find only the ETFs; update_instruments.masterDATA added a \"Fund.Type\" field\n##D ## to the ETFs, but not to the stocks\n##D ls_instruments_by(\"Fund.Type\") # all instruments that have a \"Fund.Type\" field\n##D \n##D # build data.frames with instrument attributes\n##D buildHierarchy(ls_stocks(), \"Name\", \"type\", \"avg.volume\")\n##D \n##D ## before defining a derivative, must define the root (can define the underlying \n##D ## in the same step)\n##D future(\"ES\", \"USD\", multiplier=50, tick_size=0.25, \n##D underlying_id=synthetic(\"SPX\", \"USD\", src=list(src='yahoo', name='^GSPC')))\n##D \n##D # above, in addition to defining the future root \"ES\", we defined an instrument \n##D # named \"SPX\". Using the \"src\" argument causes setSymbolLookup to be called.\n##D # Using the \"src\" arg as above is the same as \n##D # setSymbolLookup(SPX=list(src='yahoo', name='^GSPC'))\n##D getSymbols(\"SPX\") # this now works even though the Symbol used by \n##D # getSymbols.yahoo is \"^GSPC\", not \"SPX\"\n##D \n##D ## Back to the futures; we can define a future_series\n##D future_series(\"ES_U2\", identifiers=list(other=\"ESU2\"))\n##D # identifiers are not necessary, but they allow for the instrument to be found \n##D # by more than one name\n##D getInstrument(\"ESU2\") #this will find the instrument even though the primary_id \n##D #is \"ES_U2\"\n##D # can also add indentifiers later\n##D add.identifier(\"ES_U2\", inhouse=\"ES_U12\")\n##D \n##D # can add an arbitrary field with instrument_attr\n##D instrument_attr(\"ES_U2\", \"description\", \"S&P 500 e-mini\")\n##D getInstrument(\"ES_U2\")\n##D \n##D option_series.yahoo(\"GS\") # define a bunch of options on \"GS\"\n##D # option root was automatically created\n##D getInstrument(\".GS\")\n##D # could also find \".GS\" by looking for \"GS\", but specifiying type\n##D getInstrument(\"GS\", type='option')\n##D \n##D # if you do not know what type of instrument you need to define, try\n##D instrument.auto(\"ESM3\")\n##D getInstrument(\"ESM3\")\n##D instrument.auto(\"USDJPY\")\n##D getInstrument(\"USDJPY\")\n##D \n##D instrument.auto(\"QQQ\") #doesn't work as well on ambigous tickers \n##D getInstrument(\"QQQ\")\n##D \n##D # Some functions that make it easier to work with futures\n##D M2C() # Month To Code\n##D M2C()[5]\n##D M2C(\"may\")\n##D C2M() # Code To Month\n##D C2M(\"J\")\n##D C2M()[7]\n##D MC2N(\"G\") # Month Code to Numeric\n##D MC2N(\"H,K,M\")\n##D \n##D parse_id(\"ES_U3\")\n##D parse_id(\"EURUSD\")\n##D \n##D next.future_id(\"ES_U2\")\n##D next.future_id(\"ZC_H2\", \"H,K,N,U,Z\")\n##D prev.future_id(\"CL_H2\", 1:12)\n##D \n##D sort_ids(ls_instruments()) # sort by expiration date, then alphabetically for \n##D # things that don't expire.\n##D \n##D format_id(\"ES_U2\", \"CYY\")\n##D format_id(\"ES_U2\", \"CYY\", sep=\"\")\n##D format_id(\"ES_U2\", \"MMMYY\")\n##D \n##D ## Saving the instrument environment to disk\n##D tmpdir <- tempdir()\n##D saveInstruments(\"MyInstruments.RData\", dir=tmpdir)\n##D rm_instruments(keep.currencies=FALSE)\n##D ls_instruments() #NULL\n##D loadInstruments(\"MyInstruments.RData\", dir=tmpdir)\n##D ls_instruments()\n##D unlink(tmpdir, recursive=TRUE)\n##D \n##D #build a spread:\n##D fn_SpreadBuilder(getSymbols(c(\"IBM\", \"SPY\"), src='yahoo'))\n##D head(IBM.SPY)\n##D getInstrument(\"IBM.SPY\")\n##D \n##D # alternatively, define a spread, then build it\n##D spread(members=c(\"IBM\", \"GS\", \"SPY\"), memberratio=c(1, -2, 1))\n##D buildSpread(\"IBM.GS.SPY\") #Since we hadn't yet downloaded \"GS\", buildSpread \n##D #downloaded it temporarily\n##D chartSeries(IBM.GS.SPY)\n##D \n##D ## fn_SpreadBuilder will return as many columns as it can \n##D ## (Bid, Ask, Mid, or Op, Cl, Ad), but only works on 2 instrument spreads\n##D ## buildSpread works with any number of legs, but returns a single price column\n##D \n##D getFX(\"EUR/USD\", from=Sys.Date()-499) # download exchange rate from Oanda\n##D \n##D IBM.EUR <- redenominate(\"IBM\", \"EUR\") #price IBM in EUR instead of dollars\n##D chartSeries(IBM, subset='last 500 days', TA=NULL)\n##D addTA(Ad(IBM.EUR), on=1, col='red')\n##D \n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"FindCommonInstrumentAttributes","snippet":"### Name: FindCommonInstrumentAttributes\n### Title: Find attributes that more than one instrument have in common\n### Aliases: FindCommonInstrumentAttributes\n\n### ** Examples\n\n## Not run: \n##D ibak <- as.list(FinancialInstrument:::.instrument, all.names=TRUE)\n##D Symbols <- c(\"SPY\", \"AAPL\")\n##D define_stocks(Symbols, addIBslot=FALSE)\n##D update_instruments.SPDR(\"SPY\")\n##D update_instruments.TTR(\"AAPL\", exchange=\"NASDAQ\")\n##D FindCommonInstrumentAttributes(Symbols)\n##D FindCommonInstrumentAttributes(c(Symbols, \"USD\"))\n##D reloadInstruments(ibak)\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"Notionalize","snippet":"### Name: Notionalize\n### Title: Convert price series to/from notional value\n### Aliases: Notionalize Denotionalize\n\n### ** Examples\n\n## Not run: \n##D source(\"http://tinyurl.com/download-tblox\")\n##D getSymbols(\"CL\", src='tblox')\n##D define_futures.tblox()\n##D tail(Notionalize(CL, \"CL\"))\n##D tail(Denotionalize(Notionalize(CL), \"CL\"))\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"to_secBATV","snippet":"### Name: to_secBATV\n### Title: Convert tick data to one-second data\n### Aliases: to_secBATV alltick2sec\n\n### ** Examples\n\n## Not run: \n##D getSymbols(\"CLU1\")\n##D system.time(xsec <- to_secBATV(CLU1))\n##D convert.log <- alltick2sec()\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"add.defined.by","snippet":"### Name: add.defined.by\n### Title: Add a source to the defined.by field of an 'instrument'\n### Aliases: add.defined.by\n\n### ** Examples\n\n## Not run: \n##D update_instruments.TTR(\"GS\")\n##D getInstrument(\"GS\")$defined.by #TTR\n##D add.defined.by(\"GS\", \"gsee\", \"demo\")\n##D add.defined.by(\"GS\", \"gsee;demo\") #same\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"add.identifier","snippet":"### Name: add.identifier\n### Title: Add an identifier to an 'instrument'\n### Aliases: add.identifier\n\n### ** Examples\n\n## Not run: \n##D stock(\"XXX\", currency(\"USD\"))\n##D add.identifier(\"XXX\", yahoo=\"^XXX\") \n##D getInstrument(\"^XXX\")\n##D add.identifier(\"^XXX\", \"x3\")\n##D all.equal(getInstrument(\"x3\"), getInstrument(\"XXX\")) #TRUE\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"all.equal.instrument","snippet":"### Name: all.equal.instrument\n### Title: instrument all.equal method\n### Aliases: all.equal.instrument\n### Keywords: internal utilities\n\n### ** Examples\n\n## Not run: \n##D currency(\"USD\")\n##D stock(\"SPY\", \"USD\", validExchanges=c(\"SMART\", \"ARCA\", \"BATS\", \"BEX\"))\n##D stock(\"DIA\", \"USD\", validExchanges=c(\"SMART\", \"ARCA\", \"ISLAND\"), \n##D ExtraField=\"something\")\n##D \n##D all.equal(getInstrument(\"SPY\"), getInstrument(\"DIA\"))\n##D all.equal(getInstrument(\"SPY\"), getInstrument(\"DIA\"), char.n=5)\n##D all.equal(getInstrument(\"SPY\"), getInstrument(\"DIA\"), char.n=5, collapse=NULL)\n##D \n##D all.equal(getInstrument(\"DIA\"), getInstrument(\"USD\"))\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"buildHierarchy","snippet":"### Name: buildHierarchy\n### Title: Construct a hierarchy of instruments useful for aggregation\n### Aliases: buildHierarchy\n\n### ** Examples\n\n## Not run: \n##D # rm_instruments(keep.currencies=FALSE)\n##D ## Define some stocks\n##D update_instruments.TTR(c(\"XOM\", \"IBM\", \"CVX\", \"WMT\", \"GE\"), exchange=\"NYSE\")\n##D \n##D buildHierarchy(ls_instruments(), \"type\")\n##D buildHierarchy(ls_stocks(), c(\"Name\", \"Sector\"))\n##D buildHierarchy(ls_stocks(), \"Industry\", \"MarketCap\")\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"buildRatio","snippet":"### Name: buildRatio\n### Title: construct price ratios of 2 instruments\n### Aliases: buildRatio\n\n### ** Examples\n\n\n## Not run: \n##D syms <- c(\"SPY\",\"DIA\")\n##D getSymbols(syms)\n##D rat <- buildRatio(syms)\n##D summary(rat)\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"buildSpread","snippet":"### Name: buildSpread\n### Title: Construct a price/level series for pre-defined multi-leg spread\n### instrument\n### Aliases: buildSpread buildBasket\n\n### ** Examples\n\n## Not run: \n##D currency(\"USD\")\n##D stock(\"SPY\",\"USD\",1)\n##D stock(\"DIA\",\"USD\",1)\n##D getSymbols(c(\"SPY\",\"DIA\")) \n##D \n##D spread(\"SPYDIA\", \"USD\", c(\"SPY\",\"DIA\"),c(1,-1)) #define it.\n##D buildSpread('SPYDIA') #build it.\n##D head(SPYDIA)\n##D \n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"expires","snippet":"### Name: expires\n### Title: extract the correct expires value from an 'instrument'\n### Aliases: expires\n\n### ** Examples\n\n## Not run: \n##D instr <- instrument(\"FOO_U1\", currency=currency(\"USD\"), multiplier=1,\n##D expires=c(\"2001-09-01\", \"2011-09-01\", \"2021-09-01\"), \n##D assign_i=FALSE)\n##D #Last value of expires that's not after Sys.Date\n##D expires(instr) \n##D # First value of expires that hasn't already passed.\n##D expires(instr, expired=FALSE)\n##D # last value that's not after 2011-01-01\n##D expires(instr, Date=\"2011-01-01\") \n##D # first value that's not before 2011-01-01\n##D expires(instr, Date=\"2011-01-01\", expired=FALSE) \n##D \n##D ## expires.character\n##D expires(\"FOO_U1\") # warning that FOO_U1 is not defined\n##D instrument(\"FOO_U1\", currency=currency(\"USD\"), multiplier=1,\n##D expires=c(\"2001-09-01\", \"2011-09-01\", \"2021-09-01\"), \n##D assign_i=TRUE)\n##D expires(\"FOO_U1\")\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"find.instrument","snippet":"### Name: find.instrument\n### Title: Find the primary_ids of instruments that contain certain strings\n### Aliases: find.instrument\n\n### ** Examples\n\n## Not run: \n##D instruments.bak <- as.list(FinancialInstrument:::.instrument, all.names=TRUE)\n##D rm_instruments(keep.currencies=FALSE)\n##D currency(\"USD\")\n##D stock(\"SPY\", \"USD\", description=\"S&P 500 ETF\")\n##D stock(\"DIA\", \"USD\", description=\"DJIA ETF\")\n##D stock(c(\"AA\", \"AXP\", \"BA\", \"BAC\", \"CAT\"), \"USD\", members.of='DJIA')\n##D stock(\"BMW\", currency(\"EUR\"))\n##D find.instrument(\"ETF\")\n##D find.instrument(\"DJIA\") \n##D find.instrument(\"DJIA\", \"members.of\")\n##D find.instrument(\"USD\")\n##D find.instrument(\"EUR\")\n##D find.instrument(\"EUR\", Symbols=ls_stocks())\n##D find.instrument(\"USD\", \"type\")\n##D \n##D ## Can be combined with buildHierachy\n##D buildHierarchy(find.instrument(\"ETF\"), \"type\", \"description\")\n##D \n##D ## Cleanup. restore previous instrument environment\n##D rm_instruments(); rm_currencies()\n##D loadInstruments(instruments.bak)\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"fn_SpreadBuilder","snippet":"### Name: fn_SpreadBuilder\n### Title: Calculate prices of a spread from 2 instruments.\n### Aliases: fn_SpreadBuilder\n\n### ** Examples\n\n## Not run: \n##D currency(\"USD\")\n##D stock(\"SPY\", \"USD\")\n##D stock(\"DIA\", \"USD\")\n##D getSymbols(c(\"SPY\",\"DIA\"))\n##D \n##D #can call with names of instrument/xts ojects\n##D fSB <- fn_SpreadBuilder(\"SPY\",\"DIA\") \n##D fSB2 <- fn_SpreadBuilder(SPY,DIA) # or you can pass xts objects\n##D \n##D #assuming you first somehow calculated the ratio to be a constant 1.1\n##D fSB3 <- fn_SpreadBuilder(\"SPY\",\"DIA\",1.1) \n##D head(fSB)\n##D \n##D # Call fn_SpreadBuilder with vector of 2 instrument names\n##D # in 1 arg instead of using both prod1 and prod2.\n##D fSB4 <- fn_SpreadBuilder(c(\"SPY\",\"DIA\"))\n##D #download data and plot the closing values of a spread in one line\n##D chartSeries(Cl(fn_SpreadBuilder(getSymbols(c(\"SPY\",\"DIA\")),auto.assign=FALSE)))\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"format_id","snippet":"### Name: format_id\n### Title: format an id\n### Aliases: format_id\n\n### ** Examples\n\nformat_id('U1', format='MMMYY', parse='suffix')\nformat_id('ES_JUN2011', format='CYY', parse='id')\nformat_id(\"SPY_20110826P129\",\"opt2\")\n#several at once\nid3 <- c('VX_aug1','ES_U1', 'VX_U11')\nformat_id(id3,'MMMYY')\nformat_id(id3,'CYY')\nformat_id(id3,'CY',sep=\"\")\n\n\n"} {"package":"FinancialInstrument","topic":"getInstrument","snippet":"### Name: getInstrument\n### Title: Primary accessor function for getting objects of class\n### 'instrument'\n### Aliases: getInstrument\n\n### ** Examples\n\n## Not run: \n##D option('..VX', multiplier=100, \n##D underlying_id=future('.VX',multiplier=1000, \n##D underlying_id=synthetic('VIX', currency(\"USD\"))))\n##D \n##D getInstrument(\"VIX\")\n##D getInstrument('VX') #returns the future\n##D getInstrument(\"VX\",type='option')\n##D getInstrument('..VX') #finds the option\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"getSymbols.FI","snippet":"### Name: getSymbols.FI\n### Title: getSymbols method for loading data from split files\n### Aliases: getSymbols.FI\n\n### ** Examples\n\n## Not run: \n##D getSymbols(\"SPY\", src='yahoo')\n##D dir.create(\"tmpdata\")\n##D saveSymbols.common(\"SPY\", base_dir=\"tmpdata\")\n##D rm(\"SPY\")\n##D getSymbols(\"SPY\", src='FI', dir=\"tmpdata\", split_method='common')\n##D unlink(\"tmpdata/SPY\", recursive=TRUE)\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":".get_rate","snippet":"### Name: .get_rate\n### Title: get an exchange rate series\n### Aliases: .get_rate\n\n### ** Examples\n\n\n## Not run: \n##D EURUSD <- getSymbols(\"EURUSD=x\",src='yahoo',auto.assign=FALSE)\n##D USDEUR <- .get_rate(\"USD\",\"EUR\")\n##D head(USDEUR)\n##D head(EURUSD)\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"instrument.auto","snippet":"### Name: instrument.auto\n### Title: Create an instrument based on name alone\n### Aliases: instrument.auto\n\n### ** Examples\n\n## Not run: \n##D instrument.auto(\"CL_H1.U1\")\n##D getInstrument(\"CL_H1.U1\") #guaranteed_spread\n##D \n##D instrument.auto(\"ES_H1.YM_H1\")\n##D getInstrument(\"ES_H1.YM_H1\") #synthetic\n##D \n##D currency(c(\"USD\",\"EUR\"))\n##D instrument.auto(\"EURUSD\")\n##D getInstrument(\"EURUSD\") #made an exchange_rate\n##D \n##D instrument.auto(\"VX_H11\") #no root future defined yet!\n##D getInstrument(\"VX_H11\") #couldn't find future, didnt make future_series\n##D future(\"VX\",\"USD\",1000,underlying_id=synthetic(\"SPX\",\"USD\")) #make the root \n##D instrument.auto(\"VX_H11\") #and try again\n##D getInstrument(\"VX_H11\") #made a future_series\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"instrument.table","snippet":"### Name: instrument.table\n### Title: Create data.frame with attributes of all instruments\n### Aliases: instrument.table\n\n### ** Examples\n\n\n## Not run: \n##D currency('USD')\n##D stock('GM','USD',exchange='NYSE')\n##D stock('XOM','USD',description='Exxon Mobil')\n##D instrument.table()\n##D #Usually, currencies will not have as many attribute levels\n##D #as other instruments, so you may want to exclude them from the table.\n##D it <- instrument.table(exclude=\"USD|GM\", attrs.of = \"XOM\") #columns created based on XOM instrument\n##D #it <- instrument.table(exclude=c('USD','GM'), attrs.of = \"XOM\") #same thing\n##D it <- instrument.table(exclude='tick_size|description|exchange')\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"instrument_attr","snippet":"### Name: instrument_attr\n### Title: Add or change an attribute of an instrument\n### Aliases: instrument_attr\n\n### ** Examples\n\n## Not run: \n##D currency(\"USD\")\n##D stock(\"SPY\",\"USD\")\n##D instrument_attr(\"USD\",\"description\",\"U.S. Dollar\")\n##D instrument_attr(\"SPY\", \"description\", \"An ETF\")\n##D getInstrument(\"USD\")\n##D getInstrument(\"SPY\")\n##D \n##D #Call with value=NULL to remove an attribute\n##D instrument_attr(\"SPY\", \"description\", NULL)\n##D getInstrument(\"SPY\")\n##D \n##D instrument_attr(\"SPY\",\"primary_id\",\"SPX\") #move/rename it\n##D instrument_attr(\"SPX\",\"type\",\"synthetic\") #re-class\n##D instrument_attr(\"SPX\",\"src\",list(src='yahoo',name='^GSPC')) #setSymbolLookup\n##D getSymbols(\"SPX\") #knows where to look because the last line setSymbolLookup\n##D getInstrument(\"SPX\")\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"load.instruments","snippet":"### Name: load.instruments\n### Title: load instrument metadata into the .instrument environment\n### Aliases: load.instruments\n\n### ** Examples\n\n## Not run: \n##D load.instruments(system.file('data/currencies.csv.gz',package='FinancialInstrument'))\n##D load.instruments(system.file('data/root_contracts.csv.gz',package='FinancialInstrument'))\n##D load.instruments(system.file('data/future_series.csv.gz',package='FinancialInstrument'))\n##D \n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"ls_by_currency","snippet":"### Name: ls_by_currency\n### Title: shows or removes instruments of given currency denomination(s)\n### Aliases: ls_by_currency rm_by_currency ls_USD ls_AUD ls_GBP ls_CAD\n### ls_EUR ls_JPY ls_CHF ls_HKD ls_SEK ls_NZD rm_by_currency ls_USD\n### ls_AUD ls_GBP ls_CAD ls_EUR ls_JPY ls_CHF ls_HKD ls_SEK ls_NZD\n\n### ** Examples\n\n\n## Not run: \n##D #First create instruments\n##D currency(c('USD','CAD','GBP')\n##D stock(c('CM','CNQ'),'CAD')\n##D stock(c('BET','BARC'),'GBP')\n##D stock(c('SPY','DIA'),'USD')\n##D \n##D #now the examples\n##D ls_by_currency(c('CAD','GBP'))\n##D \n##D ls_USD()\n##D ls_CAD()\n##D \n##D #2 ways to remove all instruments of a currency\n##D rm_instruments(ls_USD()) \n##D #rm_instruments(ls_GBP(),keep.currencies=FALSE)\n##D rm_by_currency( ,'CAD') \n##D #rm_by_currency( ,'CAD', keep.currencies=FALSE)\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"ls_by_expiry","snippet":"### Name: ls_by_expiry\n### Title: list or remove instruments by expiration date\n### Aliases: ls_by_expiry rm_by_expiry rm_by_expiry\n\n### ** Examples\n\n\n## Not run: \n##D ls_by_expiry('20110917')\n##D ls_by_expiry('20110917',ls_options())\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"ls_expiries","snippet":"### Name: ls_expiries\n### Title: show unique expiration dates of instruments\n### Aliases: ls_expiries ls_expires ls_expires\n\n### ** Examples\n\n\n## Not run: \n##D option_series.yahoo('SPY')\n##D option_series.yahoo('DIA',NULL)\n##D ls_expiries()\n##D \n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"ls_instruments","snippet":"### Name: ls_instruments\n### Title: List or Remove instrument objects\n### Aliases: ls_instruments ls_stocks ls_options ls_option_series\n### ls_futures ls_future_series ls_currencies ls_non_currencies\n### ls_exchange_rates ls_FX ls_bonds ls_funds ls_spreads\n### ls_guaranteed_spreads ls_synthetics ls_derivatives ls_non_derivatives\n### ls_calls ls_puts rm_instruments rm_stocks rm_options rm_option_series\n### rm_futures rm_future_series rm_currencies rm_exchange_rates rm_FX\n### rm_bonds rm_funds rm_spreads rm_synthetics rm_derivatives\n### rm_non_derivatives ls_stocks ls_options ls_option_series ls_futures\n### ls_future_series ls_currencies ls_non_currencies ls_exchange_rates\n### ls_FX ls_bonds ls_funds ls_spreads ls_guaranteed_spreads\n### ls_synthetics ls_ICS ls_ICS_roots ls_derivatives ls_non_derivatives\n### ls_calls ls_puts rm_instruments rm_stocks rm_options rm_option_series\n### rm_futures rm_future_series rm_currencies rm_exchange_rates rm_FX\n### rm_bonds rm_funds rm_spreads rm_synthetics rm_derivatives\n### rm_non_derivatives\n\n### ** Examples\n\n\n## Not run: \n##D #rm_instruments(keep.currencies=FALSE) #remove everything from .instrument\n##D \n##D # First, create some instruments\n##D currency(c(\"USD\", \"EUR\", \"JPY\"))\n##D #stocks\n##D stock(c(\"S\", \"SE\", \"SEE\", \"SPY\"), 'USD')\n##D synthetic(\"SPX\", \"USD\", src=list(src='yahoo', name='^GSPC'))\n##D #derivatives\n##D option('.SPY', 'USD', multiplier=100, underlying_id='SPY')\n##D option_series(root_id=\"SPY\", expires='2011-06-18', callput='put', strike=130)\n##D option_series(root_id=\"SPY\", expires='2011-09-17', callput='put', strike=130)\n##D option_series(root_id=\"SPY\", expires='2011-06-18', callput='call', strike=130)\n##D future('ES', 'USD', multiplier=50, expires='2011-09-16', underlying_id=\"SPX\")\n##D option('.ES','USD',multiplier=1, expires='2011-06',strike=1350, right='C', underlying_id='ES')\n##D \n##D # Now, the examples\n##D ls_instruments() #all instruments\n##D ls_instruments(\"SE\") #only the one stock\n##D ls_instruments(\"S\", match=FALSE) #anything with \"S\" in name\n##D \n##D ls_currencies()\n##D ls_stocks() \n##D ls_options() \n##D ls_futures() \n##D ls_derivatives()\n##D ls_puts()\n##D ls_non_derivatives()\n##D #ls_by_expiry('20110618',ls_puts()) #put options that expire on Jun 18th, 2011\n##D #ls_puts(ls_by_expiry('20110618')) #same thing\n##D \n##D rm_options('SPY_110618C130')\n##D rm_futures()\n##D ls_instruments()\n##D #rm_instruments('EUR') #Incorrect\n##D rm_instruments('EUR', keep.currencies=FALSE) #remove the currency\n##D rm_currencies('JPY') #or remove currency like this\n##D ls_currencies()\n##D ls_instruments()\n##D \n##D rm_instruments() #remove all but currencies\n##D rm_currencies()\n##D \n##D option_series.yahoo('DIA')\n##D ls_instruments_by('underlying_id','DIA') #underlying_id must exactly match 'DIA'\n##D ls_derivatives('DIA',match=FALSE) #primary_ids that contain 'DIA'\n##D rm_instruments()\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"ls_instruments_by","snippet":"### Name: ls_instruments_by\n### Title: Subset names of instruments\n### Aliases: ls_instruments_by\n\n### ** Examples\n\n\n## Not run: \n##D stock(c(\"GOOG\",\"INTC\"),currency(\"USD\"))\n##D synthetic(\"SnP\",\"USD\",src=list(name='^GSPC',src='yahoo'))\n##D ls_instruments_by('type','stock')\n##D ls_instruments_by(\"name\",NULL,in.slot='src')\n##D ls_instruments_by('src',NULL)\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"ls_strikes","snippet":"### Name: ls_strikes\n### Title: show strike prices of defined options\n### Aliases: ls_strikes\n\n### ** Examples\n\n\n## Not run: \n##D option_series.yahoo('SPY')\n##D ls_strikes(ls_options('SPY'))\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"ls_underlyings","snippet":"### Name: ls_underlyings\n### Title: show names of underlyings\n### Aliases: ls_underlyings\n\n### ** Examples\n\n\n## Not run: \n##D ls_underlyings()\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"make_spread_id","snippet":"### Name: make_spread_id\n### Title: Construct a primary_id for a 'spread' 'instrument' from the\n### primary_ids of its members\n### Aliases: make_spread_id\n\n### ** Examples\n\nids <- c('VX_aug1','VX_U11')\nmake_spread_id(ids, format='CY')\nmake_spread_id(ids, format=FALSE)\nmake_spread_id(c(\"VIX_JAN11\",\"VIX_FEB11\"),root='VX',format='CY')\n\n\n"} {"package":"FinancialInstrument","topic":"month_cycle2numeric","snippet":"### Name: month_cycle2numeric\n### Title: coerce month_cycle to a numeric vector\n### Aliases: month_cycle2numeric MC2N\n\n### ** Examples\n\nMC2N(\"H,M,U,Z\") # from single string\nMC2N(c(\"H\",\"M\",\"U\",\"Z\")) # from single vector\nMC2N(\"h\", \"M\", \"u\", \"Z\") # from multiple strings\nMC2N(c(\"F\",\"G\"), \"H\", c(\"X\",\"Z\")) # from multiple vectors\nmonth_cycle2numeric(\"Mar,jun,SEP,dEc\") \nmonth_cycle2numeric(\"Mar\", \"jun\", \"SEP\", \"dEc\")\nMC2N(\"March,june,sep,decem\")\nMC2N(\"March, june, sep, decem\") #spaces between commas are ok\nmonth_cycle2numeric(\"3,6,9,12\")\nmonth_cycle2numeric(seq(3,12,3))\n\n\n"} {"package":"FinancialInstrument","topic":"next.future_id","snippet":"### Name: next.future_id\n### Title: Get the primary_id of the next-to-expire (previously expiring)\n### future_series instrument\n### Aliases: next.future_id prev.future_id\n\n### ** Examples\n\nnext.future_id(\"ES_Z1\",\"H,M,U,Z\", format=NULL) \nnext.future_id(\"VIXAUG11\", 1:12, root='VIX', format=NULL)\nnext.future_id(\"YM_Q11\", seq(3,12,3)) #gives a warning about 'Q' not being part of month_cycle\n\n\n"} {"package":"FinancialInstrument","topic":"option_series.yahoo","snippet":"### Name: option_series.yahoo\n### Title: constructor for series of options using yahoo data\n### Aliases: option_series.yahoo\n\n### ** Examples\n\n## Not run: \n##D option_series.yahoo('SPY') #only nearby calls and puts\n##D option_series.yahoo('DIA', Exp=NULL) #all chains\n##D ls_instruments()\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"parse_id","snippet":"### Name: parse_id\n### Title: Parse a primary_id\n### Aliases: parse_id\n\n### ** Examples\n\nparse_id(\"ES_Z11\")\nparse_id(\"CLZ1\")\nparse_id(\"SPY_111217C130\")\n\n\n"} {"package":"FinancialInstrument","topic":"parse_suffix","snippet":"### Name: parse_suffix\n### Title: parse a suffix_id\n### Aliases: parse_suffix\n\n### ** Examples\n\nparse_suffix(\"U11\")\nparse_suffix(\"110917C125\")\n\n\n"} {"package":"FinancialInstrument","topic":"redenominate","snippet":"### Name: redenominate\n### Title: Redenominate (change the base of) an instrument\n### Aliases: redenominate\n\n### ** Examples\n\n\n## Not run: \n##D require(quantmod)\n##D EURUSD <- getSymbols(\"EURUSD=x\",src='yahoo',auto.assign=FALSE)\n##D GLD <- getSymbols(\"GLD\", src='yahoo', auto.assign=FALSE)\n##D GLD.EUR <- redenominate(GLD,\"EUR\",\"USD\") #can call with xts object\n##D \n##D currency(\"USD\")\n##D stock(\"GLD\",\"USD\")\n##D GLD.EUR <- redenominate('GLD','EUR') #can also call with instrument name\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"saveInstruments","snippet":"### Name: saveInstruments\n### Title: Save and Load all instrument definitions\n### Aliases: saveInstruments loadInstruments loadInstruments\n### reloadInstruments\n\n### ** Examples\n\n## Not run: \n##D stock(\"SPY\", currency(\"USD\"), 1)\n##D tmpdir <- tempdir()\n##D saveInstruments(\"MyInstruments.RData\", dir=tmpdir)\n##D rm_instruments(keep.currencies=FALSE)\n##D loadInstruments(\"MyInstruments.RData\", dir=tmpdir)\n##D # write .R file that can be sourced\n##D saveInstruments(\"MyInstruments.R\", dir=tmpdir)\n##D rm_instruments(keep.currencies=FALSE)\n##D loadInstruments(\"MyInstruments.R\", dir=tmpdir)\n##D #source(file=paste(tmpdir, \"MyInstruments.R\", sep=\"/\")) # same\n##D unlink(tmpdir, recursive=TRUE) \n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"saveSymbols.days","snippet":"### Name: saveSymbols.days\n### Title: Save data to disk\n### Aliases: saveSymbols.days saveSymbols.common\n\n### ** Examples\n\n## Not run: \n##D getSymbols(\"SPY\", src='yahoo')\n##D dir.create(\"tmpdata\")\n##D saveSymbols.common(\"SPY\", base_dir=\"tmpdata\")\n##D rm(\"SPY\")\n##D getSymbols(\"SPY\", src='FI', dir=\"tmpdata\", split_method='common')\n##D unlink(\"tmpdata/SPY\", recursive=TRUE)\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"future_series","snippet":"### Name: future_series\n### Title: Constructors for series contracts\n### Aliases: future_series option_series bond_series option_series\n### bond_series\n\n### ** Examples\n\n## Not run: \n##D currency(\"USD\")\n##D future(\"ES\",\"USD\",multiplier=50, tick_size=0.25)\n##D future_series('ES_U1')\n##D future_series(root_id='ES',suffix_id='Z11')\n##D stock('SPY','USD')\n##D option('.SPY','USD',multiplier=100,underlying_id='SPY')\n##D #can use either .SPY or SPY for the root_id. \n##D #it will find the one that is option specs.\n##D option_series('SPY_110917C125', expires='2011-09-16')\n##D option_series(root_id='SPY',suffix_id='111022P125')\n##D option_series(root_id='.SPY',suffix_id='111119C130')\n##D #multiple series instruments at once.\n##D future_series(c(\"ES_H12\",\"ES_M12\"))\n##D option_series(c(\"SPY_110917C115\",\"SPY_110917P115\"))\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"sort_ids","snippet":"### Name: sort_ids\n### Title: sort primary_ids of instruments\n### Aliases: sort_ids\n\n### ** Examples\n\n## Not run: \n##D ids <- c(\"ES_U11\",'GLD','SPY',\"YM_Jun11\",'DIA','VX_V10')\n##D sort_ids(ids)\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"synthetic","snippet":"### Name: synthetic\n### Title: synthetic instrument constructors\n### Aliases: synthetic synthetic.instrument synthetic spread\n### guaranteed_spread butterfly spread butterfly guaranteed_spread\n### ICS_root ICS\n\n### ** Examples\n\n\n## Not run: \n##D stock('SPY','USD',1)\n##D stock('DIA','USD',1)\n##D spread('SPY.DIA','USD',c('SPY','DIA'),c(1,-1))\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"update_instruments.iShares","snippet":"### Name: update_instruments.iShares\n### Title: update iShares and SPDR ETF metadata\n### Aliases: update_instruments.iShares update_instruments.SPDR\n\n### ** Examples\n\n## Not run: \n##D stock(\"IWC\", currency(\"USD\"))\n##D update_instruments.iShares(\"IWC\")\n##D getInstrument(\"IWC\")\n##D \n##D Symbols <- stock(c(\"SPY\", \"JNK\"), currency(\"USD\"))\n##D update_instruments.SPDR(Symbols)\n##D buildHierarchy(c(\"SPY\", \"JNK\"), \"Name\")\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"update_instruments.instrument","snippet":"### Name: update_instruments.instrument\n### Title: Update instruments with metadata from another instrument.\n### Aliases: update_instruments.instrument\n\n### ** Examples\n\n## Not run: \n##D #rm_instruments()\n##D currency(\"USD\")\n##D synthetic(\"SPX\", \"USD\", identifiers=list(yahoo=\"GSPC\"),\n##D tick_size=0.01,\n##D liquidHours=\"T08:30:00/T15:00:00\", \n##D extraField='something else', \n##D assign_i=TRUE)\n##D stock(\"SPY\", \"USD\", liquidHours=\"\", assign_i=TRUE)\n##D all.equal(getInstrument(\"SPX\"), getInstrument(\"SPY\"))\n##D getInstrument(\"SPY\")\n##D ## update SPY metadata based on the metadata of SPX\n##D ## Only attributes that == \"\" are updated by default\n##D update_instruments.instrument(\"SPY\", \"SPX\", assign_i=FALSE) #liquidHours\n##D update_instruments.instrument(\"SPY\", \"SPX\", create.new=TRUE,\n##D ignore=c(\"identifiers\", \"type\"), \n##D assign_i=FALSE)\n##D # Although you probably do NOT want to, this will\n##D # copy everything new -- including identifiers and type!\n##D update_instruments.instrument(\"SPY\", \"SPX\", create.new=TRUE, ignore=NULL, \n##D assign_i=FALSE) \n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"update_instruments.masterDATA","snippet":"### Name: update_instruments.masterDATA\n### Title: Update instrument metadata for ETFs\n### Aliases: update_instruments.masterDATA update_instruments.md\n\n### ** Examples\n\n## Not run: \n##D stock(s <- c(\"SPY\", \"DIA\"), currency(\"USD\"))\n##D update_instruments.masterDATA(s)\n##D buildHierarchy(s, \"Name\", \"Fund.Type\", \"defined.by\")\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"update_instruments.morningstar","snippet":"### Name: update_instruments.morningstar\n### Title: Update instrument metadata for ETFs\n### Aliases: update_instruments.morningstar update_instruments.ms\n\n### ** Examples\n\n## Not run: \n##D ## backup .instrument environment\n##D ibak <- as.list(FinancialInstrument:::.instrument) \n##D rm_instruments()\n##D stock(s <- c(\"SPY\", \"USO\", \"LQD\"), currency(\"USD\"))\n##D update_instruments.morningstar(s)\n##D instrument.table(s)\n##D ## cleanup and restore instrument environment\n##D rm_instruments(keep.currencies=FALSE)\n##D loadInstruments(ibak)\n## End(Not run)\n\n\n"} {"package":"FinancialInstrument","topic":"update_instruments.yahoo","snippet":"### Name: update_instruments.yahoo\n### Title: updates instrument metadata with data from yahoo\n### Aliases: update_instruments.yahoo update_instruments.TTR\n### update_instruments.TTR\n\n### ** Examples\n\n## Not run: \n##D \t\n##D \tstock('GS',currency('USD'))\n##D update_instruments.yahoo('GS')\n##D \tgetInstrument('GS')\n##D update_instruments.TTR('GS')\n##D getInstrument('GS')\n## End(Not run)\n\n\n"} {"package":"cgrcusum","topic":"bercusum","snippet":"### Name: bercusum\n### Title: Risk-adjusted Bernoulli CUSUM\n### Aliases: bercusum\n\n### ** Examples\n\nvarsanalysis <- c(\"age\", \"sex\", \"BMI\")\nexprfitber <- as.formula(paste(\"(entrytime <= 365) & (censorid == 1)~\",\n paste(varsanalysis, collapse='+')))\nsurgerydat$instance <- surgerydat$Hosp_num\nglmmodber <- glm(exprfitber, data = surgerydat, family = binomial(link = \"logit\"))\nbercus <- bercusum(data = subset(surgerydat, Hosp_num == 14), glmmod = glmmodber,\n followup = 100, theta = log(2))\nplot(bercus)\n\n\n"} {"package":"cgrcusum","topic":"bkcusum","snippet":"### Name: bkcusum\n### Title: Continuous time BK-CUSUM\n### Aliases: bkcusum\n\n### ** Examples\n\nrequire(survival)\ntdat <- subset(surgerydat, Hosp_num == 14)\ntcbaseh <- function(t) chaz_exp(t, lambda = 0.01)\nvarsanalysis <- c(\"age\", \"sex\", \"BMI\")\nexprfit <- as.formula(paste(\"Surv(survtime, censorid) ~\" ,paste(varsanalysis, collapse='+')))\ntcoxmod <- coxph(exprfit, data= surgerydat)\n#Alternatively, cbaseh can be left empty when specifying coxphmod through coxph()\nbk <- bkcusum(data = tdat, theta = log(2), coxphmod = tcoxmod, cbaseh = tcbaseh, pb = TRUE)\nplot(bk)\n\n\n"} {"package":"cgrcusum","topic":"calc_risk","snippet":"### Name: calc_risk\n### Title: Calculate the Cox risk associated with the covariates of the\n### individual\n### Aliases: calc_risk\n\n### ** Examples\n\ncrdat <- data.frame(age = rnorm(10, 40, 5), BMI = rnorm(10, 24, 3))\ncrlist <- list(formula = as.formula(\"~age + BMI\"), coefficients = c(\"age\"= 0.02, \"BMI\"= 0.009))\ncalc_risk(crdat, crlist)\n\n\n"} {"package":"cgrcusum","topic":"cgr_helper","snippet":"### Name: cgr_helper\n### Title: Continuous time Generalized Rapid response CUSUM (CGR-CUSUM)\n### helper - single time point\n### Aliases: cgr_helper\n\n### ** Examples\n\n#TO-DO\n\n\n"} {"package":"cgrcusum","topic":"cgr_helper_mat","snippet":"### Name: cgr_helper_mat\n### Title: Continuous time Generalized Rapid response CUSUM (CGR-CUSUM)\n### helper - matrix formulation of the problem\n### Aliases: cgr_helper_mat\n\n### ** Examples\n\n## Not run: \n##D require(survival)\n##D tdat <- subset(surgerydat, Hosp_num == 1)\n##D tdat$otime <- tdat$entrytime + tdat$survtime\n##D tcbaseh <- function(t) chaz_exp(t, lambda = 0.01)\n##D varsanalysis <- c(\"age\", \"sex\", \"BMI\")\n##D exprfit <- as.formula(paste(\"Surv(survtime, censorid) ~\" ,paste(varsanalysis, collapse='+')))\n##D tcoxmod <- coxph(exprfit, data= surgerydat)\n##D #Alternatively, cbaseh can be left empty when specifying coxphmod through coxph()\n##D cgrv1 <- cgr_helper_mat(data = tdat, ctimes = unique(tdat$entrytime + tdat$survtime),\n##D coxphmod = tcoxmod, cbaseh = tcbaseh, displaypb = TRUE)\n## End(Not run)\n\n\n"} {"package":"cgrcusum","topic":"cgr_helper_mat_2","snippet":"### Name: cgr_helper_mat_2\n### Title: Continuous time Generalized Rapid response CUSUM (CGR-CUSUM)\n### helper - matrix formulation of the problem - version 2\n### Aliases: cgr_helper_mat_2\n\n### ** Examples\n\n## Not run: \n##D require(survival)\n##D tdat <- subset(surgerydat, Hosp_num == 1)\n##D tdat$otime <- tdat$entrytime + tdat$survtime\n##D tcbaseh <- function(t) chaz_exp(t, lambda = 0.01)\n##D varsanalysis <- c(\"age\", \"sex\", \"BMI\")\n##D exprfit <- as.formula(paste(\"Surv(survtime, censorid) ~\" ,paste(varsanalysis, collapse='+')))\n##D tcoxmod <- coxph(exprfit, data= surgerydat)\n##D #Alternatively, cbaseh can be left empty when specifying coxphmod through coxph()\n##D cgr2 <- cgr_helper_mat_2(data = tdat, ctimes = unique(tdat$entrytime + tdat$survtime),\n##D coxphmod = tcoxmod, cbaseh = tcbaseh, displaypb = TRUE)\n## End(Not run)\n\n\n"} {"package":"cgrcusum","topic":"cgr_helper_mat_3","snippet":"### Name: cgr_helper_mat_3\n### Title: Continuous time Generalized Rapid response CUSUM (CGR-CUSUM)\n### helper - matrix formulation of the problem - version 3\n### Aliases: cgr_helper_mat_3\n\n### ** Examples\n\n## Not run: \n##D require(survival)\n##D tdat <- subset(surgerydat, Hosp_num == 1)\n##D tdat$otime <- tdat$entrytime + tdat$survtime\n##D tcbaseh <- function(t) chaz_exp(t, lambda = 0.01)\n##D varsanalysis <- c(\"age\", \"sex\", \"BMI\")\n##D exprfit <- as.formula(paste(\"Surv(survtime, censorid) ~\" ,paste(varsanalysis, collapse='+')))\n##D tcoxmod <- coxph(exprfit, data= surgerydat)\n##D #Alternatively, cbaseh can be left empty when specifying coxphmod through coxph()\n##D cgr3 <- cgr_helper_mat_3(data = tdat, ctimes = unique(tdat$entrytime + tdat$survtime),\n##D coxphmod = tcoxmod, cbaseh = tcbaseh, displaypb = TRUE)\n## End(Not run)\n\n\n"} {"package":"cgrcusum","topic":"cgrcusum","snippet":"### Name: cgrcusum\n### Title: Continuous time Generalized Rapid response CUSUM (CGR-CUSUM)\n### Aliases: cgrcusum\n\n### ** Examples\n\nrequire(survival)\ntdat <- subset(surgerydat, Hosp_num == 1)\ntcbaseh <- function(t) chaz_exp(t, lambda = 0.01)\nvarsanalysis <- c(\"age\", \"sex\", \"BMI\")\nexprfit <- as.formula(paste(\"Surv(survtime, censorid) ~\" ,paste(varsanalysis, collapse='+')))\ntcoxmod <- coxph(exprfit, data= surgerydat)\n#Alternatively, cbaseh can be left empty when specifying coxphmod through coxph()\ncgr <- cgrcusum(data = tdat, coxphmod = tcoxmod, cbaseh = tcbaseh, pb = TRUE)\nplot(cgr)\n\n\n"} {"package":"cgrcusum","topic":"funnelplot","snippet":"### Name: funnelplot\n### Title: Risk-adjusted funnel plot\n### Aliases: funnelplot\n\n### ** Examples\n\nvarsanalysis <- c(\"age\", \"sex\", \"BMI\")\nexprfitfunnel <- as.formula(paste(\"(entrytime <= 365) & (censorid == 1)~\",\n paste(varsanalysis, collapse='+')))\nsurgerydat$instance <- surgerydat$Hosp_num\nglmmodfun <- glm(exprfitfunnel, data = surgerydat, family = binomial(link = \"logit\"))\nfunnel <- funnelplot(data = surgerydat, ctime = 3*365, glmmod = glmmodfun, followup = 100)\nplot(funnel)\n\n\n"} {"package":"cgrcusum","topic":"gen_arriv_times","snippet":"### Name: gen_arriv_times\n### Title: Generate arrival times according to a Poisson point process\n### Aliases: gen_arriv_times\n\n### ** Examples\n\ngen_arriv_times(psi = 0.3, t = 5)\n\n\n"} {"package":"cgrcusum","topic":"gen_surv_times","snippet":"### Name: gen_surv_times\n### Title: Generate survival times\n### Aliases: gen_surv_times\n\n### ** Examples\n\ngen_surv_times(invchaz = function(t) inv_chaz_exp(t, lambda = 0.01), data = 5)\n\n\n"} {"package":"cgrcusum","topic":"runlength","snippet":"### Name: runlength\n### Title: Determine run length of a CUSUM chart\n### Aliases: runlength runlength.cgrcusum runlength.bkcusum\n### runlength.bercusum\n\n### ** Examples\n\nvarsanalysis <- c(\"age\", \"sex\", \"BMI\")\nexprfitber <- as.formula(paste(\"(entrytime <= 365) & (censorid == 1)~\",\n paste(varsanalysis, collapse='+')))\nsurgerydat$instance <- surgerydat$Hosp_num\nglmmodber <- glm(exprfitber, data = surgerydat, family = binomial(link = \"logit\"))\nbercus <- bercusum(data = subset(surgerydat, Hosp_num == 14), glmmod = glmmodber,\n followup = 100, theta = log(2))\nrunlength(bercus, h = 2)\n\n\n"} {"package":"RolWinMulCor","topic":"rolwinmulcor_1win","snippet":"### Name: rolwinmulcor_1win\n### Title: Estimate the Rolling Window Correlation for the multi-variate\n### case to plot its outputs as a single one window\n### Aliases: rolwinmulcor_1win rolwinmulcor_1win\n### Keywords: rolwincor_multivariate rolwincor_multivariate_simply_red\n\n### ** Examples\n\n # Testing the function rolwinmulcor_1win (multi-variate case)\n # Estimates the outputs to generate Fig. 6 in Polanco-Martinez (2020). \n test_rolwinmulcor_1win <- rolwinmulcor_1win(YX_ecological_data, widthwin=61,\n Align=\"center\", pvalcorectmethod=\"BH\")\n\n\n"} {"package":"RolWinMulCor","topic":"rolwinmulcor_heatmap","snippet":"### Name: rolwinmulcor_heatmap\n### Title: Estimate the Rolling Window Correlation for the multi-variate\n### case to plot the results as a heat map\n### Aliases: rolwinmulcor_heatmap rolwinmulcor_heatmap\n### Keywords: rolwincor_multivariate rolwincor_multivariate_heatmap\n\n### ** Examples\n\n## No test: \n# Testing the function rolwinmulcor_heatmap\ntest_rolwinmulcor_heatmap <- rolwinmulcor_heatmap(YX_ecological_data, \n typewidthwin=\"PARTIAL\", widthwin_1=11, widthwin_N=101, \n Align=\"center\", pvalcorectmethod=\"BH\")\n## End(No test)\n\n\n"} {"package":"qrmdata","topic":"OIL_Brent","snippet":"### Name: commodities\n### Title: Commodity Data\n### Aliases: OIL_Brent GOLD\n### Keywords: datasets\n\n### ** Examples\n\ndata(\"OIL_Brent\")\ndata(\"GOLD\")\n\n\n"} {"package":"qrmdata","topic":"crypto","snippet":"### Name: crypto\n### Title: Cryptocurrency Prices in USD\n### Aliases: crypto\n### Keywords: datasets\n\n### ** Examples\n\ndata(\"crypto\")\nstr(crypto)\nlibrary(xts)\nplot.zoo(crypto, main = \"Cryptocurrencies in USD\", xlab = \"Time\")\n\n\n"} {"package":"qrmdata","topic":"SP_defaults","snippet":"### Name: default\n### Title: Standard & Poor's Default Data\n### Aliases: SP_defaults\n### Keywords: datasets\n\n### ** Examples\n\ndata(\"SP_defaults\")\n\n\n"} {"package":"qrmdata","topic":"CAD_USD","snippet":"### Name: fx\n### Title: Foreign Exchange Rate Data\n### Aliases: CAD_USD GBP_USD EUR_USD CHF_USD JPY_USD CNY_USD CAD_GBP\n### USD_GBP EUR_GBP CHF_GBP JPY_GBP CNY_GBP\n### Keywords: datasets\n\n### ** Examples\n\ndata(\"CAD_USD\")\ndata(\"GBP_USD\")\ndata(\"EUR_USD\")\ndata(\"CHF_USD\")\ndata(\"JPY_USD\")\ndata(\"CNY_USD\")\ndata(\"CAD_GBP\")\ndata(\"USD_GBP\")\ndata(\"EUR_GBP\")\ndata(\"CHF_GBP\")\ndata(\"JPY_GBP\")\ndata(\"CNY_GBP\")\n\n\n"} {"package":"qrmdata","topic":"ZCB_CAD","snippet":"### Name: interest_rates\n### Title: Interest-Rate Data\n### Aliases: ZCB_CAD ZCB_USD\n### Keywords: datasets\n\n### ** Examples\n\ndata(\"ZCB_CAD\")\ndata(\"ZCB_USD\")\nmat <- as.matrix(ZCB_USD['2015-01-01/2015-12-31',])\ndf <- data.frame(Day = rep(1:nrow(mat), each = ncol(mat)),\n Maturity = rep(1:ncol(mat), nrow(mat)),\n Value = as.vector(t(mat)))\nlattice::wireframe(Value ~ Day * Maturity, data = df,\n alpha.regions = 0.5,\n scales = list(arrows = FALSE, col = \"black\"),\n par.settings = list(axis.line = list(col = \"transparent\")))\n\n\n"} {"package":"qrmdata","topic":"fire","snippet":"### Name: losses\n### Title: Loss Datasets\n### Aliases: fire DNB\n### Keywords: datasets\n\n### ** Examples\n\nlibrary(xts)\n## Danish fire losses\ndata(\"fire\")\nstr(fire)\nstopifnot(inherits(fire, \"xts\"), length(fire) == 2167)\nplot.zoo(fire, ylab = \"Fire insurance claim\")\n\n## Largest 1% of simulated DNB losses\ndata(\"DNB\")\nstopifnot(dim(DNB) == c(25000, 3))\n\n\n"} {"package":"qrmdata","topic":"SP500","snippet":"### Name: stock_indices\n### Title: Stock Index Data\n### Aliases: SP500 DJ NASDAQ FTSE SMI EURSTOXX CAC DAX CSI HSI SSEC NIKKEI\n### Keywords: datasets\n\n### ** Examples\n\ndata(\"SP500\")\ndata(\"DJ\")\ndata(\"NASDAQ\")\ndata(\"FTSE\")\ndata(\"SMI\")\ndata(\"EURSTOXX\")\ndata(\"CAC\")\ndata(\"DAX\")\ndata(\"CSI\")\ndata(\"HSI\")\ndata(\"SSEC\")\ndata(\"NIKKEI\")\n\n\n"} {"package":"qrmdata","topic":"SP500_const","snippet":"### Name: stock_indices_constituents\n### Title: Stock Index Constituents Data\n### Aliases: SP500_const SP500_const_info DJ_const FTSE_const EURSTX_const\n### HSI_const\n### Keywords: datasets\n\n### ** Examples\n\ndata(\"SP500_const\")\ndata(\"DJ_const\")\ndata(\"FTSE_const\")\ndata(\"EURSTX_const\")\ndata(\"HSI_const\")\n\n\n"} {"package":"qrmdata","topic":"RSHCQ","snippet":"### Name: stock_data\n### Title: (Single) Stock Data\n### Aliases: RSHCQ\n### Keywords: datasets\n\n### ** Examples\n\ndata(\"RSHCQ\")\n\n\n"} {"package":"qrmdata","topic":"VIX","snippet":"### Name: volatility\n### Title: Volatility Index\n### Aliases: VIX\n### Keywords: datasets\n\n### ** Examples\n\ndata(\"VIX\")\n\n\n"} {"package":"devtools","topic":"check_man","snippet":"### Name: check_man\n### Title: Check documentation, as R CMD check does.\n### Aliases: check_man\n\n### ** Examples\n\n## Not run: \n##D check_man(\"mypkg\")\n## End(Not run)\n\n\n"} {"package":"devtools","topic":"dev_mode","snippet":"### Name: dev_mode\n### Title: Activate and deactivate development mode.\n### Aliases: dev_mode\n\n### ** Examples\n\n## Not run: \n##D dev_mode()\n##D dev_mode()\n## End(Not run)\n\n\n"} {"package":"devtools","topic":"dev_sitrep","snippet":"### Name: dev_sitrep\n### Title: Report package development situation\n### Aliases: dev_sitrep\n\n### ** Examples\n\n## Not run: \n##D dev_sitrep()\n## End(Not run)\n\n\n"} {"package":"devtools","topic":"install_deps","snippet":"### Name: install_deps\n### Title: Install package dependencies if needed.\n### Aliases: install_deps install_dev_deps\n\n### ** Examples\n\n## Not run: install_deps(\".\")\n\n\n"} {"package":"devtools","topic":"load_all","snippet":"### Name: load_all\n### Title: Load complete package\n### Aliases: load_all\n\n### ** Examples\n\n## Not run: \n##D # Load the package in the current directory\n##D load_all(\"./\")\n##D \n##D # Running again loads changed files\n##D load_all(\"./\")\n##D \n##D # With reset=TRUE, unload and reload the package for a clean start\n##D load_all(\"./\", TRUE)\n##D \n##D # With export_all=FALSE, only objects listed as exports in NAMESPACE\n##D # are exported\n##D load_all(\"./\", export_all = FALSE)\n## End(Not run)\n\n\n"} {"package":"devtools","topic":"package_file","snippet":"### Name: package_file\n### Title: Find file in a package.\n### Aliases: package_file\n### Keywords: internal\n\n### ** Examples\n\n## Not run: \n##D package_file(\"figures\", \"figure_1\")\n## End(Not run)\n\n\n"} {"package":"devtools","topic":"reload","snippet":"### Name: reload\n### Title: Unload and reload package.\n### Aliases: reload\n\n### ** Examples\n\n## Not run: \n##D # Reload package that is in current directory\n##D reload(\".\")\n##D \n##D # Reload package that is in ./ggplot2/\n##D reload(\"ggplot2/\")\n##D \n##D # Can use inst() to find the package path\n##D # This will reload the installed ggplot2 package\n##D reload(pkgload::inst(\"ggplot2\"))\n## End(Not run)\n\n\n"} {"package":"devtools","topic":"revdep","snippet":"### Name: revdep\n### Title: Reverse dependency tools.\n### Aliases: revdep revdep_maintainers\n### Keywords: internal\n\n### ** Examples\n\n## Not run: \n##D revdep(\"ggplot2\")\n##D \n##D revdep(\"ggplot2\", ignore = c(\"xkcd\", \"zoo\"))\n## End(Not run)\n\n\n"} {"package":"devtools","topic":"source_gist","snippet":"### Name: source_gist\n### Title: Run a script on gist\n### Aliases: source_gist\n\n### ** Examples\n\n## Not run: \n##D # You can run gists given their id\n##D source_gist(6872663)\n##D source_gist(\"6872663\")\n##D \n##D # Or their html url\n##D source_gist(\"https://gist.github.com/hadley/6872663\")\n##D source_gist(\"gist.github.com/hadley/6872663\")\n##D \n##D # It's highly recommend that you run source_gist with the optional\n##D # sha1 argument - this will throw an error if the file has changed since\n##D # you first ran it\n##D source_gist(6872663, sha1 = \"54f1db27e60\")\n##D # Wrong hash will result in error\n##D source_gist(6872663, sha1 = \"54f1db27e61\")\n##D \n##D #' # You can speficy a particular R file in the gist\n##D source_gist(6872663, filename = \"hi.r\")\n##D source_gist(6872663, filename = \"hi.r\", sha1 = \"54f1db27e60\")\n## End(Not run)\n\n\n"} {"package":"devtools","topic":"source_url","snippet":"### Name: source_url\n### Title: Run a script through some protocols such as http, https, ftp,\n### etc.\n### Aliases: source_url\n\n### ** Examples\n\n## Not run: \n##D \n##D source_url(\"https://gist.github.com/hadley/6872663/raw/hi.r\")\n##D \n##D # With a hash, to make sure the remote file hasn't changed\n##D source_url(\"https://gist.github.com/hadley/6872663/raw/hi.r\",\n##D sha1 = \"54f1db27e60bb7e0486d785604909b49e8fef9f9\")\n##D \n##D # With a truncated hash\n##D source_url(\"https://gist.github.com/hadley/6872663/raw/hi.r\",\n##D sha1 = \"54f1db27e60\")\n## End(Not run)\n\n\n"} {"package":"stellaR","topic":"getHbgrid","snippet":"### Name: getHbgrid\n### Title: HB models from ZAHB to thermal pulses\n### Aliases: getHbgrid\n### Keywords: manip\n\n### ** Examples\n\n\n\n ### slow!\n ## Not run: \n##D hbgrid <- getHbgrid(0.002, 0.25, 1.7, 0)\n##D \n##D ### get data from local directory /data\n##D hbgrid <- getHbgrid(0.002, 0.25, 1.7, 0, baseURL=\"/data/\")\n## End(Not run)\n\n\n"} {"package":"stellaR","topic":"getIso","snippet":"### Name: getIso\n### Title: Import stellar isochrones data\n### Aliases: getIso\n### Keywords: manip\n\n### ** Examples\n\n ## Not run: \n##D iso <- getIso(12.0, 0.002, 0.25, 1.7, 0)\n##D \n##D ### get data from local directory /data\n##D iso <- getIso(12.0, 0.002, 0.25, 1.7, 0, baseURL=\"/data/\")\n## End(Not run)\n\n\n"} {"package":"stellaR","topic":"getTrk","snippet":"### Name: getTrk\n### Title: Import stellar track data\n### Aliases: getTrk getHb\n### Keywords: manip\n\n### ** Examples\n\n\n ## Not run: \n##D trk <- getTrk(0.9, 0.002, 0.25, 1.7, 0)\n##D \n##D ### get data from local directory /data\n##D trk <- getTrk(0.9, 0.002, 0.25, 1.7, 0, baseURL=\"/data/\")\n##D \n##D ### multi-panel plot of the various quantities with time\n##D track <- getTrk(0.80, 0.001, 0.25, 1.90, 0)\n##D if(!is.na(track)[1]) {\n##D trkdata <- within(stack(track$data), time <- rep(track$data$time,\n##D length.out=length(values)) )\n##D require(lattice)\n##D xyplot( values ~ time | ind, data=trkdata, type=\"l\",\n##D scales=list(y=list(relation=\"free\")))\n##D } \n## End(Not run)\n\n\n"} {"package":"stellaR","topic":"getTrkSet","snippet":"### Name: getTrkSet\n### Title: Import a set of data\n### Aliases: getTrkSet getIsoSet\n### Keywords: manip\n\n### ** Examples\n\n ## Not run: \n##D ### get two masses\n##D trkset <- getTrkSet(c(0.9, 1.0), 0.002, 0.25, 1.7, 0)\n##D \n##D ### get two masses at two metallicity, for a total of 4 objects\n##D trkset <- getTrkSet(c(0.9, 1.0), c(0.002, 0.01), 0.25, 1.7, 0)\n##D \n##D ### get data from local directory /data\n##D set <- getTrkSet(0.9, 0.002, c(0.25, 0.33), 1.7, 0, baseURL=\"/data/\")\n## End(Not run)\n\n\n\n"} {"package":"stellaR","topic":"getZahb","snippet":"### Name: getZahb\n### Title: Import stellar track data for ZAHB\n### Aliases: getZahb\n### Keywords: manip\n\n### ** Examples\n\n ## Not run: \n##D zahb <- getZahb(0.002, 0.25, 1.7, 0)\n##D \n##D ### get data from local directory /data\n##D zahb <- getZahb(0.002, 0.25, 1.7, 0, baseURL=\"/data/\")\n## End(Not run)\n\n\n"} {"package":"stellaR","topic":"interpTrk","snippet":"### Name: interpTrk\n### Title: Interpolate stellar tracks data\n### Aliases: interpTrk\n### Keywords: math\n\n### ** Examples\n\n\n ### slow!\n ## Not run: \n##D \n##D iptrk <- interpTrk(0.002, 0.25, 1.74, 0)\n##D \n##D ### get data from local directory /data\n##D iptrk <- interpTrk(0.002, 0.25, 1.74, 0, baseURL=\"/data/\")\n## End(Not run)\n\n\n"} {"package":"stellaR","topic":"keypoints","snippet":"### Name: keypoints\n### Title: Extract relevant evolutionary points from stellar tracks or\n### isochrones\n### Aliases: keypoints keypoints.trk keypoints.trkset keypoints.iso\n### keypoints.isoset\n### Keywords: manip\n\n### ** Examples\n\n\n ## Not run: \n##D trk <- getTrk(0.9, 0.002, 0.25, 1.7, 0)\n##D ### check return value from CDS\n##D if(!is.na(trk)[1]) keypoints(trk)\n##D \n##D is <- getIso(11, 0.002, 0.25, 1.7, 0)\n##D ### check return value from CDS\n##D if(!is.na(is)[1]) keypoints(is)\n## End(Not run)\n\n\n"} {"package":"stellaR","topic":"makeIso","snippet":"### Name: makeIso\n### Title: Construct stellar isochrones from tracks\n### Aliases: makeIso\n### Keywords: math\n\n### ** Examples\n\n\n ### slow!\n ## Not run: \n##D isoset <- makeIso(11.2, 0.002, 0.25, 1.7, 0)\n## End(Not run)\n\n ### get data from local directory /data\n ## Not run: isoset <- makeIso(c(11.2, 12.4), 0.002, 0.25, 1.7, 0, baseURL=\"/data/\")\n\n\n"} {"package":"stellaR","topic":"plot.trk","snippet":"### Name: plot.trk\n### Title: Plot stellar track or isochrones objects\n### Aliases: plot.trk plot.hb plot.zahb plot.iso\n### Keywords: manip\n\n### ** Examples\n\n\n ## Not run: \n##D trk <- getTrk(0.9, 0.002, 0.25, 1.7, 0)\n##D \n##D ### check return value from CDS\n##D if(!is.na(trk)[1]) plot(trk) \n## End(Not run)\n\n\n\n"} {"package":"stellaR","topic":"plot.trkset","snippet":"### Name: plot.trkset\n### Title: Plot a set stellar objects\n### Aliases: plot.trkset plot.hbset plot.isoset\n### Keywords: hplot\n\n### ** Examples\n\n ## Not run: \n##D trkset <- getTrkSet(c(0.7, 0.9, 1.1), 0.002, 0.25, 1.7, 0)\n##D \n##D ### check return value from CDS\n##D if(!is.na(trkset)[1]) plot(trkset)\n## End(Not run)\n\n\n\n"} {"package":"stellaR","topic":"plotAstro","snippet":"### Name: plotAstro\n### Title: Plot function for stellar objects\n### Aliases: plotAstro\n### Keywords: hplot\n\n### ** Examples\n\n\n ## Not run: \n##D trk <- getTrk(0.9, 0.002, 0.25, 1.7, 0)\n##D \n##D ### check return value from CDS\n##D if(!is.na(trk)[1]) plot(trk) \n## End(Not run)\n\n\n\n"} {"package":"stellaR","topic":"print.trk","snippet":"### Name: print.trk\n### Title: Print stellar track objects\n### Aliases: print.trk print.hb print.zahb print.iso\n### Keywords: manip\n\n### ** Examples\n\n\n ## Not run: \n##D trk <- getTrk(0.9, 0.002, 0.25, 1.7, 0)\n##D trk\n##D \n## End(Not run)\n\n\n"} {"package":"stellaR","topic":"showComposition","snippet":"### Name: showComposition\n### Title: Show the chemical and physical combinations in the database\n### Aliases: showComposition\n### Keywords: manip\n\n### ** Examples\n\n\n showComposition()\n\n\n"} {"package":"stellaR","topic":"testComposition","snippet":"### Name: testComposition\n### Title: Check the existence of a record in the database\n### Aliases: testComposition\n### Keywords: manip\n\n### ** Examples\n\n\n testComposition(0.002, 0.25, 1.7, 0)\n\n\n"} {"package":"hdbinseg","topic":"dcbs.alg","snippet":"### Name: dcbs.alg\n### Title: Double CUSUM Binary Segmentation\n### Aliases: dcbs.alg\n\n### ** Examples\n\nx <- matrix(rnorm(10*100), nrow = 10)\ndcbs.alg(x, cp.type = 1, phi=.5, temporal = FALSE, do.parallel = 0)$ecp\n## No test: \nx <- matrix(rnorm(100*300), nrow = 100)\nx[1:10, 151:300] <- x[1:10, 151:300] + 1\ndcbs.alg(x, cp.type = 1, phi=-1, temporal = FALSE, do.parallel = 0)$ecp\n## End(No test)\n\n\n"} {"package":"hdbinseg","topic":"sbs.alg","snippet":"### Name: sbs.alg\n### Title: Sparsified Binary Segmentation\n### Aliases: sbs.alg\n\n### ** Examples\n\nx <- matrix(rnorm(20*300), nrow = 20)\nsbs.alg(x, cp.type = 2, scales = -1, diag = TRUE, do.parallel = 0)$ecp\n## No test: \nx <- matrix(rnorm(100*300), nrow = 100)\nx[1:10, 151:300] <- x[1:10, 151:300]*sqrt(2)\nsbs.alg(x, cp.type = 2, scales = -1, diag = TRUE, do.parallel = 0)$ecp\n## End(No test)\n\n\n"} {"package":"cornet","topic":"coef.cornet","snippet":"### Name: coef.cornet\n### Title: Extract estimated coefficients\n### Aliases: coef.cornet\n\n### ** Examples\n\nn <- 100; p <- 200\ny <- rnorm(n)\nX <- matrix(rnorm(n*p),nrow=n,ncol=p)\nnet <- cornet(y=y,cutoff=0,X=X)\ncoef(net)\n\n\n\n"} {"package":"cornet","topic":"cornet","snippet":"### Name: cornet\n### Title: Combined regression\n### Aliases: cornet cornet-package\n\n### ** Examples\n\nn <- 100; p <- 200\ny <- rnorm(n)\nX <- matrix(rnorm(n*p),nrow=n,ncol=p)\nnet <- cornet(y=y,cutoff=0,X=X)\nnet\n\n\n\n"} {"package":"cornet","topic":"cv.cornet","snippet":"### Name: cv.cornet\n### Title: Performance measurement\n### Aliases: cv.cornet\n\n### ** Examples\n\n## Don't show: \n#n <- 50; p <- 20\n#y <- rnorm(n)\n#X <- matrix(rnorm(n*p),nrow=n,ncol=p)\n#loss <- cv.cornet(y=y,cutoff=0,X=X,nfolds.ext=2)\n#loss\n## End(Don't show)\n## Not run: \n##D n <- 100; p <- 200\n##D y <- rnorm(n)\n##D X <- matrix(rnorm(n*p),nrow=n,ncol=p)\n##D start <- Sys.time()\n##D loss <- cv.cornet(y=y,cutoff=0,X=X)\n##D end <- Sys.time()\n##D end - start\n##D \n##D loss\n## End(Not run)\n\n\n\n"} {"package":"cornet","topic":".check","snippet":"### Name: .check\n### Title: Arguments\n### Aliases: .check\n\n### ** Examples\n\ncornet:::.check(0.5,type=\"scalar\",min=0,max=1)\n\n\n\n"} {"package":"cornet","topic":".equal","snippet":"### Name: .equal\n### Title: Equality\n### Aliases: .equal\n\n### ** Examples\n\ncornet:::.equal(1,1,1)\n\n\n\n"} {"package":"cornet","topic":".simulate","snippet":"### Name: .simulate\n### Title: Data simulation\n### Aliases: .simulate\n\n### ** Examples\n\ndata <- cornet:::.simulate(n=10,p=20)\nnames(data)\n\n\n\n"} {"package":"cornet","topic":".test","snippet":"### Name: .test\n### Title: Single-split test\n### Aliases: .test\n\n### ** Examples\n\nn <- 100; p <- 200\ny <- rnorm(n)\nX <- matrix(rnorm(n*p),nrow=n,ncol=p)\ncornet:::.test(y=y,cutoff=0,X=X)\n\n\n\n"} {"package":"cornet","topic":"plot.cornet","snippet":"### Name: plot.cornet\n### Title: Plot loss matrix\n### Aliases: plot.cornet\n\n### ** Examples\n\nn <- 100; p <- 200\ny <- rnorm(n)\nX <- matrix(rnorm(n*p),nrow=n,ncol=p)\nnet <- cornet(y=y,cutoff=0,X=X)\nplot(net)\n\n\n\n"} {"package":"cornet","topic":"predict.cornet","snippet":"### Name: predict.cornet\n### Title: Predict binary outcome\n### Aliases: predict.cornet\n\n### ** Examples\n\nn <- 100; p <- 200\ny <- rnorm(n)\nX <- matrix(rnorm(n*p),nrow=n,ncol=p)\nnet <- cornet(y=y,cutoff=0,X=X)\npredict(net,newx=X)\n\n\n\n"} {"package":"cornet","topic":"print.cornet","snippet":"### Name: print.cornet\n### Title: Combined regression\n### Aliases: print.cornet\n\n### ** Examples\n\nn <- 100; p <- 200\ny <- rnorm(n)\nX <- matrix(rnorm(n*p),nrow=n,ncol=p)\nnet <- cornet(y=y,cutoff=0,X=X)\nprint(net)\n\n\n\n"} {"package":"bkmrhat","topic":"as.mcmc.bkmrfit","snippet":"### Name: as.mcmc.bkmrfit\n### Title: Convert bkmrfit to mcmc object for coda MCMC diagnostics\n### Aliases: as.mcmc.bkmrfit\n\n### ** Examples\n\n\n# following example from https://jenfb.github.io/bkmr/overview.html\n ## No test: \nset.seed(111)\nlibrary(coda)\nlibrary(bkmr)\ndat <- bkmr::SimData(n = 50, M = 4)\ny <- dat$y\nZ <- dat$Z\nX <- dat$X\nset.seed(111)\nfitkm <- kmbayes(y = y, Z = Z, X = X, iter = 500, verbose = FALSE,\n varsel = FALSE)\nmcmcobj <- as.mcmc(fitkm, iterstart=251)\nsummary(mcmcobj) # posterior summaries of model parameters\n# compare with default from bkmr package, which omits first 1/2 of chain\nsummary(fitkm)\n# note this only works on multiple chains (see kmbayes_parallel)\n# gelman.diag(mcmcobj)\n# lots of functions in the coda package to use\ntraceplot(mcmcobj)\n# will also fail with delta functions (when using variable selection)\ntry(geweke.plot(mcmcobj))\n## End(No test)\n\n\n"} {"package":"bkmrhat","topic":"as.mcmc.list.bkmrfit.list","snippet":"### Name: as.mcmc.list.bkmrfit.list\n### Title: Convert multi-chain bkmrfit to mcmc.list for coda MCMC\n### diagnostics\n### Aliases: as.mcmc.list.bkmrfit.list\n\n### ** Examples\n\n# following example from https://jenfb.github.io/bkmr/overview.html\n ## No test: \nset.seed(111)\nlibrary(coda)\ndat <- bkmr::SimData(n = 50, M = 4)\ny <- dat$y\nZ <- dat$Z\nX <- dat$X\nset.seed(111)\n\nfuture::plan(strategy = future::multisession, workers=2)\n# run 2 parallel Markov chains (more usually better)\nfitkm.list <- kmbayes_parallel(nchains=2, y = y, Z = Z, X = X, iter = 1000,\n verbose = FALSE, varsel = FALSE)\nmcmcobj = as.mcmc.list(fitkm.list)\nsummary(mcmcobj)\n# Gelman/Rubin diagnostics won't work on certain objects,\n# like delta parameters (when using variable selection),\n# so the rstan version of this will work better (does not give errors)\n try(gelman.diag(mcmcobj))\n# lots of functions in the coda package to use\nplot(mcmcobj)\n# both of these will also fail with delta functions (when using variable selection)\ntry(gelman.plot(mcmcobj))\ntry(geweke.plot(mcmcobj))\n\ncloseAllConnections()\n## End(No test)\n\n\n"} {"package":"bkmrhat","topic":"kmbayes_combine","snippet":"### Name: kmbayes_combine\n### Title: Combine multiple BKMR chains\n### Aliases: kmbayes_combine comb_bkmrfits\n\n### ** Examples\n\n## No test: \n# following example from https://jenfb.github.io/bkmr/overview.html\nset.seed(111)\nlibrary(bkmr)\ndat <- bkmr::SimData(n = 50, M = 4)\ny <- dat$y\nZ <- dat$Z\nX <- dat$X\nset.seed(111)\n\nfuture::plan(strategy = future::multisession, workers=2)\n# run 4 parallel Markov chains (low iterations used for illustration)\nfitkm.list <- kmbayes_parallel(nchains=2, y = y, Z = Z, X = X, iter = 500,\n verbose = FALSE, varsel = TRUE)\n# use bkmr defaults for burnin, but keep them\nbigkm = kmbayes_combine(fitkm.list, excludeburnin=FALSE)\nests = ExtractEsts(bigkm) # defaults to keeping second half of samples\nExtractPIPs(bigkm)\npred.resp.univar <- PredictorResponseUnivar(fit = bigkm)\nrisks.overall <- OverallRiskSummaries(fit = bigkm, y = y, Z = Z, X = X,\n qs = seq(0.25, 0.75, by = 0.05), q.fixed = 0.5, method = \"exact\")\n\n# additional objects that are not in a standard bkmrfit object:\nsummary(bigkm$iters) # note that this reflects how fits are re-ordered to reflect burnin\ntable(bigkm$chain)\n## End(No test)\n\ncloseAllConnections()\n\n\n\n"} {"package":"bkmrhat","topic":"kmbayes_combine_lowmem","snippet":"### Name: kmbayes_combine_lowmem\n### Title: Combine multiple BKMR chains in lower memory settings\n### Aliases: kmbayes_combine_lowmem comb_bkmrfits_lowmem\n\n### ** Examples\n\n## No test: \n# following example from https://jenfb.github.io/bkmr/overview.html\nset.seed(111)\nlibrary(bkmr)\ndat <- bkmr::SimData(n = 50, M = 4)\ny <- dat$y\nZ <- dat$Z\nX <- dat$X\nset.seed(111)\n\nfuture::plan(strategy = future::multisession, workers=2)\n# run 4 parallel Markov chains (low iterations used for illustration)\nfitkm.list <- kmbayes_parallel(nchains=2, y = y, Z = Z, X = X, iter = 500,\n verbose = FALSE, varsel = TRUE)\n# use bkmr defaults for burnin, but keep them\nbigkm = kmbayes_combine_lowmem(fitkm.list, excludeburnin=FALSE)\nests = ExtractEsts(bigkm) # defaults to keeping second half of samples\nExtractPIPs(bigkm)\npred.resp.univar <- PredictorResponseUnivar(fit = bigkm)\nrisks.overall <- OverallRiskSummaries(fit = bigkm, y = y, Z = Z, X = X,\n qs = seq(0.25, 0.75, by = 0.05), q.fixed = 0.5, method = \"exact\")\n\n# additional objects that are not in a standard bkmrfit object:\nsummary(bigkm$iters) # note that this reflects how fits are re-ordered to reflect burnin\ntable(bigkm$chain)\n## End(No test)\n\ncloseAllConnections()\n\n\n\n"} {"package":"bkmrhat","topic":"kmbayes_continue","snippet":"### Name: kmbayes_continue\n### Title: Continue sampling from existing bkmr fit\n### Aliases: kmbayes_continue\n\n### ** Examples\n\nset.seed(111)\ndat <- bkmr::SimData(n = 50, M = 4)\ny <- dat$y\nZ <- dat$Z\nX <- dat$X\n## Not run: \n##D fitty1 = bkmr::kmbayes(y=y,Z=Z,X=X, est.h=TRUE, iter=100)\n##D # do some diagnostics here to see if 100 iterations (default) is enough\n##D # add 100 additional iterations (for illustration - still will not be enough)\n##D fitty2 = kmbayes_continue(fitty1, iter=100)\n##D cobj = as.mcmc(fitty2)\n##D varnames(cobj)\n##D \n## End(Not run)\n\n\n\n"} {"package":"bkmrhat","topic":"kmbayes_diagnose","snippet":"### Name: kmbayes_diagnose\n### Title: MCMC diagnostics using rstan\n### Aliases: kmbayes_diagnose kmbayes_diag\n\n### ** Examples\n\n## No test: \nset.seed(111)\ndat <- bkmr::SimData(n = 50, M = 4)\ny <- dat$y\nZ <- dat$Z\nX <- dat$X\nset.seed(111)\n\nfuture::plan(strategy = future::multisession)\nfitkm.list <- kmbayes_parallel(nchains=2, y = y, Z = Z, X = X, iter = 1000,\n verbose = FALSE, varsel = TRUE)\nkmbayes_diag(fitkm.list)\nkmbayes_diag(fitkm.list[[1]]) # just the first chain\n\ncloseAllConnections()\n## End(No test)\n\n\n"} {"package":"bkmrhat","topic":"kmbayes_parallel","snippet":"### Name: kmbayes_parallel\n### Title: Run multiple BKMR chains in parallel\n### Aliases: kmbayes_parallel\n\n### ** Examples\n\n## No test: \nset.seed(111)\ndat <- bkmr::SimData(n = 50, M = 4)\ny <- dat$y\nZ <- dat$Z\nX <- dat$X\nset.seed(111)\n\nfuture::plan(strategy = future::multisession, workers=2)\n# only 50 iterations fit to save installation time\nfitkm.list <- kmbayes_parallel(nchains=2, y = y, Z = Z, X = X, iter = 50,\n verbose = FALSE, varsel = TRUE)\ncloseAllConnections()\n## End(No test)\n\n\n"} {"package":"bkmrhat","topic":"kmbayes_parallel_continue","snippet":"### Name: kmbayes_parallel_continue\n### Title: Continue sampling from existing bkmr_parallel fit\n### Aliases: kmbayes_parallel_continue\n\n### ** Examples\n\nset.seed(111)\ndat <- bkmr::SimData(n = 50, M = 4)\ny <- dat$y\nZ <- dat$Z\nX <- dat$X\n## Not run: \n##D \n##D future::plan(strategy = future::multisession, workers=2)\n##D fitty1p = kmbayes_parallel(nchains=2, y=y,Z=Z,X=X)\n##D \n##D fitty2p = kmbayes_parallel_continue(fitty1p, iter=3000)\n##D cobj = as.mcmc.list(fitty2p)\n##D plot(cobj)\n## End(Not run)\n\n\n"} {"package":"bkmrhat","topic":"predict.bkmrfit","snippet":"### Name: predict.bkmrfit\n### Title: Posterior mean/sd predictions\n### Aliases: predict.bkmrfit\n\n### ** Examples\n\n# following example from https://jenfb.github.io/bkmr/overview.html\n## No test: \nlibrary(bkmr)\nset.seed(111)\ndat <- bkmr::SimData(n = 50, M = 4)\ny <- dat$y\nZ <- dat$Z\nX <- dat$X\nset.seed(111)\nfitkm <- kmbayes(y = y, Z = Z, X = X, iter = 200, verbose = FALSE,\n varsel = TRUE)\npostmean = predict(fitkm)\npostmean2 = predict(fitkm, Znew=Z/2)\n# mean difference in posterior means\nmean(postmean-postmean2)\n## End(No test)\n\n\n"} {"package":"MatrixExtra","topic":"assignment","snippet":"### Name: assignment\n### Title: Assignment operator for CSR matrices\n### Aliases: assignment [<-,dgRMatrix,index,index,replValue-method\n### [<-,dgRMatrix,missing,index,replValue-method\n### [<-,dgRMatrix,index,missing,replValue-method\n### [<-,dgRMatrix,missing,missing,replValue-method\n### [<-,dgRMatrix,index,index,sparseVector-method\n### [<-,dgRMatrix,missing,index,sparseVector-method\n### [<-,dgRMatrix,index,missing,sparseVector-method\n### [<-,dgRMatrix,missing,missing,sparseVector-method\n### [<-,ANY,nsparseVector,nsparseVector,replValue-method\n### [<-,ANY,missing,nsparseVector,replValue-method\n### [<-,ANY,nsparseVector,missing,replValue-method\n### [<-,ANY,lsparseVector,lsparseVector,replValue-method\n### [<-,ANY,missing,lsparseVector,replValue-method\n### [<-,ANY,lsparseVector,missing,replValue-method\n### [<-,ANY,nsparseVector,nsparseVector,ANY-method\n### [<-,ANY,missing,nsparseVector,ANY-method\n### [<-,ANY,nsparseVector,missing,ANY-method\n### [<-,ANY,lsparseVector,lsparseVector,ANY-method\n### [<-,ANY,missing,lsparseVector,ANY-method\n### [<-,ANY,lsparseVector,missing,ANY-method\n\n### ** Examples\n\nlibrary(Matrix)\nlibrary(MatrixExtra)\nset.seed(1)\nX <- rsparsematrix(5, 3, .5, repr=\"R\")\nX[1:3] <- 0\nprint(X)\n\n\n"} {"package":"MatrixExtra","topic":"cbind2-method","snippet":"### Name: cbind2-method\n### Title: Concatenate sparse matrices by columns\n### Aliases: cbind2-method cbind2,TsparseMatrix,TsparseMatrix-method\n### cbind2,TsparseMatrix,sparseVector-method\n### cbind2,sparseVector,TsparseMatrix-method\n### cbind2,CsparseMatrix,sparseVector-method\n### cbind2,sparseVector,CsparseMatrix-method\n### cbind2,sparseVector,sparseVector-method\n### cbind2,RsparseMatrix,RsparseMatrix-method\n### cbind2,TsparseMatrix,RsparseMatrix-method\n### cbind2,RsparseMatrix,TsparseMatrix-method\n### cbind2,RsparseMatrix,numeric-method\n### cbind2,RsparseMatrix,integer-method\n### cbind2,RsparseMatrix,logical-method\n### cbind2,RsparseMatrix,sparseVector-method\n### cbind2,numeric,RsparseMatrix-method\n### cbind2,integer,RsparseMatrix-method\n### cbind2,logical,RsparseMatrix-method\n### cbind2,sparseVector,RsparseMatrix-method\n\n### ** Examples\n\nlibrary(Matrix)\nlibrary(MatrixExtra)\nset.seed(1)\nX <- rsparsematrix(3, 4, .3)\nX <- as(X, \"TsparseMatrix\")\ninherits(cbind2(X, X), \"TsparseMatrix\")\n\n\n"} {"package":"MatrixExtra","topic":"conversions","snippet":"### Name: conversions\n### Title: Conversions between matrix types\n### Aliases: conversions as.csr.matrix as.csc.matrix as.coo.matrix\n### as.sparse.vector\n\n### ** Examples\n\nlibrary(Matrix)\nlibrary(MatrixExtra)\n\nm.coo <- as(matrix(1:3), \"TsparseMatrix\")\nas.csr.matrix(m.coo)\nas.csr.matrix(1:3) # <- assumes it's a row vector\nas.csc.matrix(1:3) # <- assumes it's a column vector\n\n### Using the new conversion methods\n### (these would fail if 'MatrixExtra' is not loaded)\nas(matrix(1:3), \"ngRMatrix\")\nas(as.csc.matrix(m.coo), \"dgRMatrix\")\n\n\n"} {"package":"MatrixExtra","topic":"emptySparse","snippet":"### Name: emptySparse\n### Title: Create Empty Sparse Matrix\n### Aliases: emptySparse\n\n### ** Examples\n\n### This is very fast despite the large dimensions,\n### as no data is held in the resulting object\nlibrary(MatrixExtra)\nX <- emptySparse(nrow=2^20, ncol=2^25, format=\"T\")\n\n\n"} {"package":"MatrixExtra","topic":"filterSparse","snippet":"### Name: filterSparse\n### Title: Filter values of a sparse matrix or vector\n### Aliases: filterSparse\n\n### ** Examples\n\nlibrary(Matrix)\nlibrary(MatrixExtra)\n\n### Random sparse matrix\nset.seed(1)\nX <- rsparsematrix(nrow=20, ncol=10, density=0.3)\n\n### Take only values above 0.5\nX_filtered <- filterSparse(X, function(x) x >= 0.5)\n\n### Only elements with absolute values less than 0.3\nX_filtered <- filterSparse(X, function(x) abs(x) <= 0.3)\n\n### Only values above the mean (among non-zeros)\nX_filtered <- filterSparse(X, function(x) x > mean(x))\n\n\n"} {"package":"MatrixExtra","topic":"mapSparse","snippet":"### Name: mapSparse\n### Title: Map values of a sparse matrix/vector\n### Aliases: mapSparse\n\n### ** Examples\n\nlibrary(Matrix)\nlibrary(MatrixExtra)\n\nset.seed(1)\nX <- rsparsematrix(10, 5, .5)\nprint(mapSparse(X, function(x) abs(x)+1))\n\n\n"} {"package":"MatrixExtra","topic":"mathematical-functions","snippet":"### Name: mathematical-functions\n### Title: Mathematical functions for CSR and COO matrices\n### Aliases: mathematical-functions sqrt,RsparseMatrix-method\n### sqrt,TsparseMatrix-method abs,RsparseMatrix-method\n### abs,TsparseMatrix-method log1p,RsparseMatrix-method\n### log1p,TsparseMatrix-method sin,RsparseMatrix-method\n### sin,TsparseMatrix-method tan,RsparseMatrix-method\n### tan,TsparseMatrix-method tanh,RsparseMatrix-method\n### tanh,TsparseMatrix-method tanpi,RsparseMatrix-method\n### tanpi,TsparseMatrix-method sinh,RsparseMatrix-method\n### sinh,TsparseMatrix-method atanh,RsparseMatrix-method\n### atanh,TsparseMatrix-method expm1,RsparseMatrix-method\n### expm1,TsparseMatrix-method sign,RsparseMatrix-method\n### sign,TsparseMatrix-method ceiling,RsparseMatrix-method\n### ceiling,TsparseMatrix-method floor,RsparseMatrix-method\n### floor,TsparseMatrix-method trunc,RsparseMatrix-method\n### trunc,TsparseMatrix-method round,RsparseMatrix-method\n### round,TsparseMatrix-method signif,RsparseMatrix-method\n### signif,TsparseMatrix-method\n\n### ** Examples\n\nlibrary(Matrix)\nlibrary(MatrixExtra)\noptions(\"MatrixExtra.quick_show\" = FALSE)\nset.seed(1)\nX <- as.csr.matrix(rsparsematrix(4, 3, .4))\nabs(X)\nsqrt(X^2)\n### This will output CSC\nround(X, 1:2)\n\n\n"} {"package":"MatrixExtra","topic":"matmult","snippet":"### Name: matmult\n### Title: Multithreaded Sparse-Dense Matrix and Vector Multiplications\n### Aliases: matmult %*%,matrix,CsparseMatrix-method\n### %*%,float32,CsparseMatrix-method\n### tcrossprod,matrix,RsparseMatrix-method\n### tcrossprod,float32,RsparseMatrix-method\n### crossprod,matrix,CsparseMatrix-method\n### crossprod,float32,CsparseMatrix-method\n### tcrossprod,RsparseMatrix,matrix-method\n### %*%,RsparseMatrix,matrix-method %*%,RsparseMatrix,float32-method\n### tcrossprod,RsparseMatrix,float32-method\n### %*%,RsparseMatrix,numeric-method %*%,RsparseMatrix,logical-method\n### %*%,RsparseMatrix,integer-method\n### %*%,RsparseMatrix,sparseVector-method\n\n### ** Examples\n\nlibrary(Matrix)\nlibrary(MatrixExtra)\n### To use all available threads (default)\noptions(\"MatrixExtra.nthreads\" = parallel::detectCores())\n### Example will run with only 1 thread (CRAN policy)\noptions(\"MatrixExtra.nthreads\" = 1)\n\n## Generate random matrices\nset.seed(1)\nA <- rsparsematrix(5,4,.5)\nB <- rsparsematrix(4,3,.5)\n\n## Now multiply in some supported combinations\nas.matrix(A) %*% as.csc.matrix(B)\nas.csr.matrix(A) %*% as.matrix(B)\ncrossprod(as.matrix(B), as.csc.matrix(B))\ntcrossprod(as.csr.matrix(A), as.matrix(A))\n\n### Restore the number of threads\noptions(\"MatrixExtra.nthreads\" = parallel::detectCores())\n\n\n"} {"package":"MatrixExtra","topic":"operators","snippet":"### Name: operators\n### Title: Mathematical operators on sparse matrices and sparse vectors\n### Aliases: operators *,RsparseMatrix,sparseMatrix-method\n### *,ngRMatrix,sparseMatrix-method *,lgRMatrix,sparseMatrix-method\n### *,sparseMatrix,RsparseMatrix-method *,sparseMatrix,ngRMatrix-method\n### *,sparseMatrix,lgRMatrix-method *,CsparseMatrix,TsparseMatrix-method\n### *,TsparseMatrix,CsparseMatrix-method\n### &,RsparseMatrix,sparseMatrix-method &,ngRMatrix,sparseMatrix-method\n### &,lgRMatrix,sparseMatrix-method &,sparseMatrix,RsparseMatrix-method\n### &,sparseMatrix,ngRMatrix-method &,sparseMatrix,lgRMatrix-method\n### &,CsparseMatrix,TsparseMatrix-method\n### &,TsparseMatrix,CsparseMatrix-method *,RsparseMatrix,matrix-method\n### *,ngRMatrix,matrix-method *,lgRMatrix,matrix-method\n### *,RsparseMatrix,float32-method *,ngRMatrix,float32-method\n### *,lgRMatrix,float32-method *,matrix,RsparseMatrix-method\n### *,matrix,ngRMatrix-method *,matrix,lgRMatrix-method\n### *,float32,RsparseMatrix-method *,float32,ngRMatrix-method\n### *,float32,lgRMatrix-method &,RsparseMatrix,matrix-method\n### &,ngRMatrix,matrix-method &,lgRMatrix,matrix-method\n### &,matrix,RsparseMatrix-method &,matrix,ngRMatrix-method\n### &,matrix,lgRMatrix-method *,TsparseMatrix,matrix-method\n### *,TsparseMatrix,float32-method *,ngTMatrix,matrix-method\n### *,lgTMatrix,matrix-method *,ngTMatrix,float32-method\n### *,lgTMatrix,float32-method *,matrix,TsparseMatrix-method\n### *,float32,TsparseMatrix-method *,matrix,ngTMatrix-method\n### *,matrix,lgTMatrix-method *,float32,ngTMatrix-method\n### *,float32,lgTMatrix-method &,TsparseMatrix,matrix-method\n### &,ngTMatrix,matrix-method &,lgTMatrix,matrix-method\n### &,matrix,TsparseMatrix-method &,matrix,ngTMatrix-method\n### &,matrix,lgTMatrix-method *,CsparseMatrix,matrix-method\n### *,CsparseMatrix,float32-method *,matrix,CsparseMatrix-method\n### *,float32,CsparseMatrix-method &,CsparseMatrix,matrix-method\n### &,CsparseMatrix,float32-method &,matrix,CsparseMatrix-method\n### &,float32,CsparseMatrix-method +,RsparseMatrix,sparseMatrix-method\n### +,ngRMatrix,sparseMatrix-method +,lgRMatrix,sparseMatrix-method\n### +,sparseMatrix,RsparseMatrix-method +,sparseMatrix,ngRMatrix-method\n### +,sparseMatrix,lgRMatrix-method +,CsparseMatrix,TsparseMatrix-method\n### +,TsparseMatrix,CsparseMatrix-method\n### -,RsparseMatrix,sparseMatrix-method -,ngRMatrix,sparseMatrix-method\n### -,lgRMatrix,sparseMatrix-method -,sparseMatrix,RsparseMatrix-method\n### -,sparseMatrix,ngRMatrix-method -,sparseMatrix,lgRMatrix-method\n### -,CsparseMatrix,TsparseMatrix-method\n### -,TsparseMatrix,CsparseMatrix-method\n### |,RsparseMatrix,sparseMatrix-method |,ngRMatrix,sparseMatrix-method\n### |,lgRMatrix,sparseMatrix-method |,sparseMatrix,RsparseMatrix-method\n### |,sparseMatrix,ngRMatrix-method |,sparseMatrix,lgRMatrix-method\n### |,CsparseMatrix,TsparseMatrix-method\n### |,TsparseMatrix,CsparseMatrix-method *,RsparseMatrix,integer-method\n### *,RsparseMatrix,numeric-method *,RsparseMatrix,logical-method\n### *,integer,RsparseMatrix-method *,numeric,RsparseMatrix-method\n### *,logical,RsparseMatrix-method &,RsparseMatrix,integer-method\n### &,RsparseMatrix,numeric-method &,RsparseMatrix,logical-method\n### &,integer,RsparseMatrix-method &,numeric,RsparseMatrix-method\n### &,logical,RsparseMatrix-method /,RsparseMatrix,integer-method\n### /,RsparseMatrix,numeric-method /,RsparseMatrix,logical-method\n### /,RsparseMatrix,matrix-method /,integer,RsparseMatrix-method\n### /,numeric,RsparseMatrix-method /,logical,RsparseMatrix-method\n### /,matrix,RsparseMatrix-method %%,RsparseMatrix,integer-method\n### %%,RsparseMatrix,numeric-method %%,RsparseMatrix,logical-method\n### %%,RsparseMatrix,matrix-method %%,integer,RsparseMatrix-method\n### %%,numeric,RsparseMatrix-method %%,logical,RsparseMatrix-method\n### %%,matrix,RsparseMatrix-method %/%,RsparseMatrix,integer-method\n### %/%,RsparseMatrix,numeric-method %/%,RsparseMatrix,logical-method\n### %/%,RsparseMatrix,matrix-method %/%,integer,RsparseMatrix-method\n### %/%,numeric,RsparseMatrix-method %/%,logical,RsparseMatrix-method\n### %/%,matrix,RsparseMatrix-method ^,RsparseMatrix,integer-method\n### ^,RsparseMatrix,numeric-method ^,RsparseMatrix,logical-method\n### ^,RsparseMatrix,matrix-method ^,integer,RsparseMatrix-method\n### ^,numeric,RsparseMatrix-method ^,logical,RsparseMatrix-method\n### ^,matrix,RsparseMatrix-method *,TsparseMatrix,integer-method\n### *,TsparseMatrix,numeric-method *,TsparseMatrix,logical-method\n### *,integer,TsparseMatrix-method *,numeric,TsparseMatrix-method\n### *,logical,TsparseMatrix-method &,TsparseMatrix,integer-method\n### &,TsparseMatrix,numeric-method &,TsparseMatrix,logical-method\n### &,integer,TsparseMatrix-method &,numeric,TsparseMatrix-method\n### &,logical,TsparseMatrix-method /,TsparseMatrix,integer-method\n### /,TsparseMatrix,numeric-method /,TsparseMatrix,logical-method\n### /,TsparseMatrix,matrix-method /,integer,TsparseMatrix-method\n### /,numeric,TsparseMatrix-method /,logical,TsparseMatrix-method\n### /,matrix,TsparseMatrix-method %%,TsparseMatrix,integer-method\n### %%,TsparseMatrix,numeric-method %%,TsparseMatrix,logical-method\n### %%,TsparseMatrix,matrix-method %%,integer,TsparseMatrix-method\n### %%,numeric,TsparseMatrix-method %%,logical,TsparseMatrix-method\n### %%,matrix,TsparseMatrix-method %/%,TsparseMatrix,integer-method\n### %/%,TsparseMatrix,numeric-method %/%,TsparseMatrix,logical-method\n### %/%,TsparseMatrix,matrix-method %/%,integer,TsparseMatrix-method\n### %/%,numeric,TsparseMatrix-method %/%,logical,TsparseMatrix-method\n### %/%,matrix,TsparseMatrix-method ^,TsparseMatrix,integer-method\n### ^,TsparseMatrix,numeric-method ^,TsparseMatrix,logical-method\n### ^,TsparseMatrix,matrix-method ^,integer,TsparseMatrix-method\n### ^,numeric,TsparseMatrix-method ^,logical,TsparseMatrix-method\n### ^,matrix,TsparseMatrix-method *,RsparseMatrix,sparseVector-method\n### *,sparseVector,RsparseMatrix-method *,matrix,sparseVector-method\n### *,sparseVector,matrix-method *,float32,sparseVector-method\n### *,sparseVector,float32-method\n\n### ** Examples\n\nlibrary(Matrix)\nlibrary(MatrixExtra)\nset.seed(1)\nX <- rsparsematrix(4, 3, .5, repr=\"R\")\noptions(\"MatrixExtra.quick_show\" = FALSE)\nX + X\nX * X\nX * as.coo.matrix(X)\nX * 2\nX * 1:4\nX ^ 2\nX ^ (1:4)\n\n### Beware\nset_new_matrix_behavior()\nprint(suppressWarnings(X / 0))\nrestore_old_matrix_behavior()\nprint(suppressWarnings(X / 0))\n\n\n"} {"package":"MatrixExtra","topic":"rbind2-method","snippet":"### Name: rbind2-method\n### Title: Concatenate sparse matrices/vectors by rows\n### Aliases: rbind2-method rbind2,RsparseMatrix,RsparseMatrix-method\n### rbind2,sparseVector,RsparseMatrix-method\n### rbind2,RsparseMatrix,sparseVector-method\n### rbind2,sparseVector,sparseVector-method\n### rbind2,CsparseMatrix,CsparseMatrix-method\n### rbind2,sparseVector,CsparseMatrix-method\n### rbind2,CsparseMatrix,sparseVector-method\n### rbind2,RsparseMatrix,CsparseMatrix-method\n### rbind2,CsparseMatrix,RsparseMatrix-method\n### rbind2,RsparseMatrix,TsparseMatrix-method\n### rbind2,TsparseMatrix,RsparseMatrix-method\n### rbind2,RsparseMatrix,numeric-method\n### rbind2,RsparseMatrix,integer-method\n### rbind2,RsparseMatrix,logical-method\n### rbind2,numeric,RsparseMatrix-method\n### rbind2,integer,RsparseMatrix-method\n### rbind2,logical,RsparseMatrix-method\n### rbind2,TsparseMatrix,TsparseMatrix-method\n### rbind2,TsparseMatrix,sparseVector-method\n### rbind2,sparseVector,TsparseMatrix-method\n### rbind2,TsparseMatrix,CsparseMatrix-method\n### rbind2,CsparseMatrix,TsparseMatrix-method\n\n### ** Examples\n\nlibrary(Matrix)\nlibrary(MatrixExtra)\nset.seed(1)\nX <- rsparsematrix(3, 4, .3)\nX <- as(X, \"RsparseMatrix\")\ninherits(rbind2(X, X), \"RsparseMatrix\")\ninherits(rbind(X, X, as.csc.matrix(X), X), \"RsparseMatrix\")\ninherits(rbind2(as.coo.matrix(X), as.coo.matrix(X)), \"TsparseMatrix\")\ninherits(rbind2(as.csc.matrix(X), as.csc.matrix(X)), \"CsparseMatrix\")\n\n\n"} {"package":"MatrixExtra","topic":"rbind_csr","snippet":"### Name: rbind_csr\n### Title: Concatenate inputs by rows into a CSR matrix\n### Aliases: rbind_csr\n\n### ** Examples\n\nlibrary(Matrix)\nlibrary(MatrixExtra)\noptions(\"MatrixExtra.quick_show\" = FALSE)\nv <- as(1:10, \"sparseVector\")\nrbind_csr(v, v, v)\n\nX <- matrix(1:20, nrow=2)\nrbind_csr(X, v)\n\n\n"} {"package":"MatrixExtra","topic":"show","snippet":"### Name: show\n### Title: Quick Glance at Sparse Objects\n### Aliases: show show,sparseMatrix-method show,sparseVector-method\n### print,sparseVector-method\n\n### ** Examples\n\nlibrary(Matrix)\nlibrary(MatrixExtra)\n\nset.seed(1)\nX <- Matrix::rsparsematrix(5, 5, .2)\nset_new_matrix_behavior()\nshow(X)\nprint(X)\nX\n\nrestore_old_matrix_behavior()\nshow(X)\nprint(X)\nX\n\n\n"} {"package":"MatrixExtra","topic":"slice","snippet":"### Name: slice\n### Title: Sparse Matrices Slicing\n### Aliases: slice [,RsparseMatrix,index,index,logical-method\n### [,RsparseMatrix,missing,index,logical-method\n### [,RsparseMatrix,index,missing,logical-method\n### [,RsparseMatrix,missing,missing,logical-method\n### [,RsparseMatrix,index,index,missing-method\n### [,RsparseMatrix,missing,index,missing-method\n### [,RsparseMatrix,index,missing,missing-method\n### [,RsparseMatrix,missing,missing,missing-method\n### [,ANY,nsparseVector,nsparseVector,logical-method\n### [,ANY,missing,nsparseVector,logical-method\n### [,ANY,nsparseVector,missing,logical-method\n### [,ANY,index,nsparseVector,logical-method\n### [,ANY,nsparseVector,index,logical-method\n### [,ANY,nsparseVector,nsparseVector,missing-method\n### [,ANY,missing,nsparseVector,missing-method\n### [,ANY,nsparseVector,missing,missing-method\n### [,ANY,index,nsparseVector,missing-method\n### [,ANY,nsparseVector,index,missing-method\n### [,ANY,lsparseVector,lsparseVector,logical-method\n### [,ANY,missing,lsparseVector,logical-method\n### [,ANY,lsparseVector,missing,logical-method\n### [,ANY,index,lsparseVector,logical-method\n### [,ANY,lsparseVector,index,logical-method\n### [,ANY,lsparseVector,lsparseVector,missing-method\n### [,ANY,missing,lsparseVector,missing-method\n### [,ANY,lsparseVector,missing,missing-method\n### [,ANY,index,lsparseVector,missing-method\n### [,ANY,lsparseVector,index,missing-method\n### [,CsparseMatrix,index,index,logical-method\n### [,CsparseMatrix,missing,index,logical-method\n### [,CsparseMatrix,index,missing,logical-method\n### [,CsparseMatrix,missing,missing,logical-method\n### [,CsparseMatrix,index,index,missing-method\n### [,CsparseMatrix,missing,index,missing-method\n### [,CsparseMatrix,index,missing,missing-method\n### [,CsparseMatrix,missing,missing,missing-method\n### [,TsparseMatrix,index,index,logical-method\n### [,TsparseMatrix,missing,index,logical-method\n### [,TsparseMatrix,index,missing,logical-method\n### [,TsparseMatrix,missing,missing,logical-method\n### [,TsparseMatrix,index,index,missing-method\n### [,TsparseMatrix,missing,index,missing-method\n### [,TsparseMatrix,index,missing,missing-method\n### [,TsparseMatrix,missing,missing,missing-method\n\n### ** Examples\n\nlibrary(Matrix)\nlibrary(MatrixExtra)\nm <- rsparsematrix(20, 20, 0.1, repr=\"R\")\ninherits(m[1:2, ], \"RsparseMatrix\")\ninherits(m[1:2, 3:4], \"RsparseMatrix\")\ninherits(as.coo.matrix(m)[1:2, 3:4], \"TsparseMatrix\")\ninherits(as.csc.matrix(m)[1:2, 3:4], \"CsparseMatrix\")\n\n### New: slice with a sparse vector\nm[as(c(TRUE,FALSE), \"sparseVector\"), ]\n\n### Important!!!\n### This differs from Matrix\nset_new_matrix_behavior()\ninherits(m[1,,drop=TRUE], \"sparseVector\")\n\n### To bring back the old behavior:\nrestore_old_matrix_behavior()\ninherits(m[1,,drop=TRUE], \"numeric\")\n\n\n"} {"package":"MatrixExtra","topic":"t_shallow","snippet":"### Name: t_shallow\n### Title: Transpose a sparse matrix by changing its format\n### Aliases: t_shallow t_deep t,RsparseMatrix-method t,CsparseMatrix-method\n### t,TsparseMatrix-method t,dgCMatrix-method t,ngCMatrix-method\n### t,lgCMatrix-method t,dtCMatrix-method t,ntCMatrix-method\n### t,ltCMatrix-method t,dsCMatrix-method t,nsCMatrix-method\n### t,lsCMatrix-method t,sparseVector-method\n\n### ** Examples\n\nlibrary(Matrix)\nlibrary(MatrixExtra)\nset.seed(1)\nX <- rsparsematrix(3, 4, .5, repr=\"C\")\ninherits(X, \"CsparseMatrix\")\nXtrans <- t_shallow(X)\ninherits(Xtrans, \"RsparseMatrix\")\nnrow(X) == ncol(Xtrans)\nncol(X) == nrow(Xtrans)\n\nXorig <- t_shallow(Xtrans)\ninherits(Xorig, \"CsparseMatrix\")\ninherits(t_deep(Xtrans), \"RsparseMatrix\")\n\n### Important!!!\n### This package makes 't_shallow' the default\nset_new_matrix_behavior()\ninherits(X, \"CsparseMatrix\")\ninherits(t(X), \"RsparseMatrix\")\n\n### Can be changed back to 't_deep' like this:\nrestore_old_matrix_behavior()\ninherits(t(X), \"CsparseMatrix\")\n\n\n"} {"package":"GWLelast","topic":"GWLelast.sel.bw","snippet":"### Name: GWLelast.sel.bw\n### Title: GWLelast.sel.bw\n### Aliases: GWLelast.sel.bw\n\n### ** Examples\n\n######################\n# Need to add\n\n\n"} {"package":"PoPdesign","topic":"get.boundary.pop","snippet":"### Name: get.boundary.pop\n### Title: Generate the dose escalation and de-escalation boundaries for\n### single-agent trials.\n### Aliases: get.boundary.pop\n\n### ** Examples\n\n\n## get the dose escalation and deescalation boundaries for PoP design with\n## the target DLT rate of 0.3, maximum sample size of 30, and cohort size of 3\nbound <- get.boundary.pop(target=0.5, n.cohort = 10, cohortsize = 3,\n cutoff=2.5,K=4,cutoff_e=5/24)\nsummary(bound) # get the descriptive summary of the boundary\nplot(bound) # plot the flowchart of the design along with decision boundaries\n\n\n\n"} {"package":"PoPdesign","topic":"get.oc.pop","snippet":"### Name: get.oc.pop\n### Title: Operating characteristics for single-agent trials\n### Aliases: get.oc.pop\n\n### ** Examples\n\n\n## get the operating characteristics for single-agent trials\noc <- get.oc.pop(target=0.3,n.cohort=10,cohortsize=3,titration=TRUE,\n cutoff=2.5,cutoff_e=5/24,\n skeleton=c(0.3,0.4,0.5,0.6),n.trial=1000,\n risk.cutoff=0.8,earlyterm=TRUE,start=1, seed=123)\n\nsummary(oc) # summarize design operating characteristics\nplot(oc)\n\n\n\n"} {"package":"PoPdesign","topic":"select.mtd.pop","snippet":"### Name: select.mtd.pop\n### Title: Maximum tolerated dose (MTD) selection for single-agent trials\n### Aliases: select.mtd.pop\n\n### ** Examples\n\n\n### select the MTD for PoP trial\nn <- c(4, 4, 16, 8, 0)\ny <- c(0, 0, 5, 5, 0)\nselmtd <- select.mtd.pop(target=0.3,n.pts=n, n.tox=y)\nsummary(selmtd)\nplot(selmtd)\n\n\n\n"} {"package":"PoPdesign","topic":"summary.pop","snippet":"### Name: summary.pop\n### Title: Generate descriptive summary for objects returned by other\n### functions in PoPdesign\n### Aliases: summary.pop\n\n### ** Examples\n\n## summarize the results returned by get.boundary.pop()\nbound <- get.boundary.pop(n.cohort = 10, cohortsize = 3, target=0.3,\n cutoff=exp(1), K=3,cutoff_e=exp(-1))\nsummary(bound)\n\n## summarize the results returned by get.oc.pop()\noc <- get.oc.pop(target=0.3,n.cohort=10,cohortsize=3,titration=TRUE,\n cutoff=TRUE,cutoff_e=exp(-1),skeleton=c(0.3,0.4,0.5,0.6),n.trial=1000,\n risk.cutoff=0.8,earlyterm=TRUE,start=1)\nsummary(oc)\n\n### summarize the results returned by select.mtd.pop()\nn <- c(3, 3, 15, 9, 0)\ny <- c(0, 0, 4, 4, 0)\nselmtd <- select.mtd.pop(target=0.3,n.pts=n, n.tox=y)\nsummary(selmtd)\n\n\n\n"} {"package":"carcass","topic":"CIetterson","snippet":"### Name: CIetterson\n### Title: Confidence interval for the functions ettersonEq14,\n### ettersonEq14v1 and ettersonEq14v2\n### Aliases: CIetterson\n### Keywords: misc\n\n### ** Examples\n\nJ <- c(2,3,2,4,3,5,3,2,3,4)\ns <- plogis(seq(0.2, 2, length=sum(J)))\nf <- plogis(seq(1.5, 0.9, length=length(J)))\n\ns.lwr<- plogis(seq(0.2, 2, length=sum(J))-0.5)\nf.lwr <- plogis(seq(1.5, 0.9, length=length(J))-0.3)\n\ns.upr <- plogis(seq(0.2, 2, length=sum(J))+0.5)\nf.upr <- plogis(seq(1.5, 0.9, length=length(J))+0.3)\n\nCIetterson(s=s, s.lwr=s.lwr, s.upr=s.upr, f=f, f.lwr=f.lwr, f.upr=f.upr, J=J, nsim=100)\n # nsim is too low, please, increase!\n\n\n"} {"package":"carcass","topic":"batdist","snippet":"### Name: batdist\n### Title: Distribution of bat carcasses below wind turbines\n### Aliases: batdist\n### Keywords: datasets\n\n### ** Examples\n\ndata(batdist)\nbatdist\n\n\n"} {"package":"carcass","topic":"estimateN","snippet":"### Name: estimateN\n### Title: Estimation of number of killed animals based on carcass searches\n### and estimates for detection probability\n### Aliases: estimateN\n### Keywords: methods misc\n\n### ** Examples\n\n\nestimateN(count=3, f=0.72, f.lower=0.62, f.upper=0.81, s=0.84, s.lower=0.64, \n s.upper=0.94, d=2, pform=\"korner\", n=100, maxn=500, nsim=1000, \n plot=TRUE)\n\nestimateN(count=3, f=0.72, f.lower=0.62, f.upper=0.81, s=0.84, s.lower=0.64, \n s.upper=0.94, d=2, pform=\"huso\", maxn=500, nsim=1000, plot=TRUE)\n\nres.p <- pkorner(f=0.72, f.lower=0.62, f.upper=0.81, s=0.84, s.lower=0.64, s.upper=0.94, \n d=2, n=100, CI=TRUE)\nestimateN(count=3, p=res.p[\"p\"], p.lower=res.p[\"2.5%\"], p.upper=res.p[\"97.5%\"])\n\n\n\n"} {"package":"carcass","topic":"ettersonEq14","snippet":"### Name: ettersonEq14\n### Title: Equation 14 of Etterson (2013) Ecological Applications 23,\n### 1915-1925\n### Aliases: ettersonEq14\n### Keywords: methods misc\n\n### ** Examples\n\n\n# in case of regular search intervals, the calculations below give the same results\nettersonEq14(s=0.8, f=0.8, J=c(3,3,3,3,3))\npkorner(s=0.8, f=0.8, d=3, n=5)\n\n\n # in case of irregular search intervals the function ettersonEq14 is more appropriate\nettersonEq14(s=0.8, f=0.8, J=c(3,5,1,4,2))\npkorner(s=0.8, f=0.8, d=mean(c(3,5,1,4,2)), n=5)\n\n\n\n"} {"package":"carcass","topic":"ettersonEq14v1","snippet":"### Name: ettersonEq14v1\n### Title: Equation 14 of Etterson (2013) Ecological Applications 23,\n### 1915-1925, adapted so that persistence probability and searcher\n### efficiency can vary with calender date\n### Aliases: ettersonEq14v1\n### Keywords: methods misc\n\n### ** Examples\n\n\nJ <- c(2,3,2,4,3,5,3,2,3,4)\ns <- plogis(seq(0.2, 2, length=sum(J)))\nf <- plogis(seq(1.5, 0.9, length=length(J)))\nettersonEq14v1(s,f,J)\n\n\n\n"} {"package":"carcass","topic":"ettersonEq14v2","snippet":"### Name: ettersonEq14v2\n### Title: Equation 14 of Etterson (2013) Ecological Applications 23,\n### 1915-1925, adapted so that persistence probability and searcher\n### efficiency can vary with age of the carcass\n### Aliases: ettersonEq14v2\n### Keywords: methods misc\n\n### ** Examples\n\nJ <- c(2,3,2,4,3,5,3,2,3,4)\ns <- plogis(seq(0.2, 2, length=sum(J)))\nf <- plogis(seq(1.5, 0.9, length=length(J)))\nettersonEq14v2(s,f,J)\n\n\n"} {"package":"carcass","topic":"integrate.persistence","snippet":"### Name: integrate.persistence\n### Title: Integrate persistence probability over the discrete time\n### intervals (e.g. days) to account for constant arrival of carcasses\n### Aliases: integrate.persistence\n### Keywords: misc\n\n### ** Examples\n\nintegrate.persistence(0.5)\nintegrate.persistence(c(0.8,0.7,0.6,0.55))\n\n\n"} {"package":"carcass","topic":"perickson","snippet":"### Name: perickson\n### Title: Carcass detection probability acording to Erickson et al. 2004\n### Aliases: perickson\n### Keywords: methods misc\n\n### ** Examples\n\nperickson(t.bar=30, f=0.8, d=1)\n\n\n"} {"package":"carcass","topic":"persistence","snippet":"### Name: persistence\n### Title: Times until removal for brown mice carcasses\n### Aliases: persistence\n### Keywords: datasets\n\n### ** Examples\n\ndata(persistence)\nhead(persistence)\n\n\n"} {"package":"carcass","topic":"persistence.prob","snippet":"### Name: persistence.prob\n### Title: Estimates carcass persistence probability based on carcass\n### removal experiment data\n### Aliases: persistence.prob\n### Keywords: methods misc\n\n### ** Examples\n\ndata(persistence)\npersistence.prob(persistence$turbineID, persistence$perstime, persistence$status)\npersistence.prob(persistence$turbineID, persistence$perstime, persistence$status, \n pers.const=TRUE)\n\n\n"} {"package":"carcass","topic":"phuso","snippet":"### Name: phuso\n### Title: Carcass detection probability acording to Huso 2010\n### Aliases: phuso\n### Keywords: methods misc\n\n### ** Examples\n\nphuso(s=0.8, f=0.7, d=7)\n\n\n"} {"package":"carcass","topic":"pkorner","snippet":"### Name: pkorner\n### Title: Carcass detection probability acording to Korner-Nievergelt et\n### al. 2011\n### Aliases: pkorner\n### Keywords: methods misc\n\n### ** Examples\n\n\n### Data\n f <- 0.72\n s <- 0.8\n data(persistence)\n attach(persistence)\n sv <- persistence.prob(turbineID, perstime, status)$persistence.prob[,1]\n sv.lower <- persistence.prob(turbineID, perstime, status)$lower[,1]\n sv.upper <- persistence.prob(turbineID, perstime, status)$upper[,1]\n n <- 4\n d <- 3\n \n### Constant search efficiency and constant persistence probability\n pkorner(s=s, f=f, d=d, n=n)\n pkorner(s=s, s.lower=0.6, s.upper=0.9, f=f, f.lower=0.6, f.upper=0.8, \n d=d, n=n, CI=TRUE)\n\n### Decreasing search efficiency and constant persistence probability\n pkorner(s=s, f=f, d=d, n=n, k=0.25, search.efficiency.constant=FALSE)\n\n### Constant search efficiency and decreasing persistence probability\n pkorner(s=sv, f=f, d=d, n=n)\n\n### Decreasing search efficiency and decreasing persistence probability\n pkorner(s=sv, f=f, d=d, n=n, search.efficiency.constant=FALSE)\n pkorner(s=sv, s.lower=sv.lower, s.upper=sv.upper, f=f, f.lower=0.6, \n f.upper=0.8, d=d, n=n, search.efficiency.constant=FALSE, CI=TRUE)\n\n\n\n"} {"package":"carcass","topic":"posteriorN","snippet":"### Name: posteriorN\n### Title: Posterior distribution of the number of fatalities based on the\n### number of carcasses found and the probability of finding a carcass.\n### Aliases: posteriorN posterior.N\n### Keywords: methods misc\n\n### ** Examples\n\nposteriorN(p=0.5, nf=3, dist=TRUE, maxN=15)\n\n\n"} {"package":"carcass","topic":"search.efficiency","snippet":"### Name: search.efficiency\n### Title: Estimates detection probability per person and visibility\n### classes using a binomial model\n### Aliases: search.efficiency\n### Keywords: misc\n\n### ** Examples\n\ndata(searches)\nsearches\n\n# Call to the function with data provided as data.frame:\n## Not run: search.efficiency(searches)\n\n# Alternative:\nper <- searches$person\nvisi <- searches$visibility\ndet <- searches$detected\nnotdet <- searches$notdetected\n## Not run: search.efficiency(person=per, visibility=visi, detected=det, notdetected=notdet)\n\n\n\n"} {"package":"carcass","topic":"searches","snippet":"### Name: searches\n### Title: Data of a searcher efficiency trial\n### Aliases: searches\n### Keywords: datasets\n\n### ** Examples\n\ndata(searches)\nstr(searches)\nsearch.efficiency(searches)\n\n\n"} {"package":"carcass","topic":"shapeparameter","snippet":"### Name: shapeparameter\n### Title: Shapeparameters of a beta-distribution from the mean, the lower\n### and upper limit of the 95% confidence or credible interval\n### Aliases: shapeparameter\n### Keywords: methods misc\n\n### ** Examples\n\n\na <- shapeparameter(0.8, 0.72, 0.88)$a\nb <- shapeparameter(0.8, 0.72, 0.88)$b\nx <- seq(0, 1, by=0.01)\ny <- dbeta(x, a, b)\nplot(x, y, type=\"l\")\n\n\n\n"} {"package":"logbin","topic":"B","snippet":"### Name: B.Iso\n### Title: Defining Smooths in logbin.smooth Formulae\n### Aliases: B Iso\n### Keywords: smooth\n\n### ** Examples\n\n## See example(logbin.smooth) for an example of specifying smooths in model\n## formulae.\n\n\n"} {"package":"logbin","topic":"anova.logbin","snippet":"### Name: anova.logbin\n### Title: Analysis of Deviance for logbin Fits\n### Aliases: anova.logbin anova.logbinlist\n### Keywords: models regression\n\n### ** Examples\n\n## For an example, see example(logbin)\n\n\n"} {"package":"logbin","topic":"confint.logbin","snippet":"### Name: confint.logbin\n### Title: Confidence Intervals for logbin Model Parameters\n### Aliases: confint.logbin\n### Keywords: models\n\n### ** Examples\n\n## For an example, see example(logbin)\n\n\n"} {"package":"logbin","topic":"contr.isotonic.rev","snippet":"### Name: contr.isotonic.rev\n### Title: Contrast Matrix for Reversed Isotonic Covariate\n### Aliases: contr.isotonic.rev\n### Keywords: design\n\n### ** Examples\n\ncontr.isotonic.rev(4,1:4)\ncontr.isotonic.rev(4,c(1,3,2,4))\n\n# Show how contr.isotonic.rev applies within model.matrix\nx <- factor(round(runif(20,0,2)))\nmf <- model.frame(~x)\ncontrasts(x) <- contr.isotonic.rev(levels(x), levels(x))\nmodel.matrix(mf)\n\n\n"} {"package":"logbin","topic":"interpret.logbin.smooth","snippet":"### Name: interpret.logbin.smooth\n### Title: Interpret a logbin.smooth Formula\n### Aliases: interpret.logbin.smooth\n### Keywords: smooth models\n\n### ** Examples\n\n# Specify a smooth model with knot.range\nres <- interpret.logbin.smooth(y ~ B(x, knot.range = 0:2) + x2)\n# The knot.range is removed from the full.formula...\nprint(res$full.formula)\n# ...but is stored in the $smooth.spec component of the result:\nprint(res$smooth.spec$x$knot.range)\n\n\n"} {"package":"logbin","topic":"logbin-package","snippet":"### Name: logbin-package\n### Title: Relative Risk Regression Using the Log-Binomial Model\n### Aliases: logbin-package\n### Keywords: package regression\n\n### ** Examples\n\n## For examples, see example(logbin) and example(logbin.smooth)\n\n\n"} {"package":"logbin","topic":"logbin","snippet":"### Name: logbin\n### Title: Log-Binomial Regression\n### Aliases: logbin\n### Keywords: models regression\n\n### ** Examples\n\nrequire(glm2)\ndata(heart)\n\n#======================================================\n# Model with periodic non-convergence when glm is used\n#======================================================\n\nstart.p <- sum(heart$Deaths) / sum(heart$Patients)\n\nfit.glm <- glm(cbind(Deaths, Patients-Deaths) ~ factor(AgeGroup) + factor(Severity) +\n factor(Delay) + factor(Region), family = binomial(log), \n start = c(log(start.p), rep(c(0.2, 0.4), 4)), data = heart,\n trace = TRUE, maxit = 100)\n\nfit.logbin <- logbin(formula(fit.glm), data = heart, \n start = c(log(start.p), rep(c(0.2, 0.4), 4)),\n trace = 1)\nsummary(fit.logbin)\n\n# Speed up convergence by using single EM algorithm\nfit.logbin.em <- update(fit.logbin, method = \"em\")\n\n# Speed up convergence by using acceleration methods\nfit.logbin.acc <- update(fit.logbin, accelerate = \"squarem\")\nfit.logbin.em.acc <- update(fit.logbin.em, accelerate = \"squarem\")\n\n## No test: \n#=============================\n# Model with interaction term\n#=============================\n\nheart$AgeSev <- 10 * heart$AgeGroup + heart$Severity\n\nfit.logbin.int <- logbin(cbind(Deaths, Patients-Deaths) ~ factor(AgeSev) +\n factor(Delay) + factor(Region), data = heart, trace = 1, maxit = 100000)\n \nsummary(fit.logbin.int)\nvcov(fit.logbin.int)\nconfint(fit.logbin.int)\nsummary(predict(fit.logbin.int, type = \"response\"))\nanova(fit.logbin, fit.logbin.int, test = \"Chisq\")\n## End(No test)\n\n\n"} {"package":"logbin","topic":"logbin.control","snippet":"### Name: logbin.control\n### Title: Auxiliary for Controlling logbin Fitting\n### Aliases: logbin.control\n### Keywords: optimize models\n\n### ** Examples\n\n## Variation on example(glm.control) :\n\nevts <- c(18,17,15,20,10,20,25,13,12)\nobs <- rep(30,9)\noutcome <- gl(3,1,9)\ntreatment <- gl(3,3)\noo <- options(digits = 12)\nlogbin.D93X <- logbin(cbind(evts,obs-evts) ~ outcome + treatment, trace = 2, epsilon = 1e-2)\noptions(oo)\ncoef(logbin.D93X)\n\n\n"} {"package":"logbin","topic":"logbin.smooth","snippet":"### Name: logbin.smooth\n### Title: Smooth Log-Binomial Regression\n### Aliases: logbin.smooth\n### Keywords: regression smooth\n\n### ** Examples\n\n## Simple example\nx <- c(0.3, 0.2, 0.0, 0.1, 0.2, 0.1, 0.7, 0.2, 1.0, 0.9)\ny <- c(5, 4, 6, 4, 7, 3, 6, 5, 9, 8)\nsystem.time(m1 <- logbin.smooth(cbind(y, 10-y) ~ B(x, knot.range = 0:2), mono = 1, trace = 1))\n## Compare with accelerated version\nsystem.time(m1.acc <- update(m1, accelerate = \"squarem\"))\n## Isotonic relationship\nm2 <- logbin.smooth(cbind(y, 10-y) ~ Iso(x))\n## No test: \nplot(m1)\nplot(m2)\n## End(No test)\nsummary(predict(m1, type = \"response\"))\nsummary(predict(m2, type = \"response\"))\n\n\n"} {"package":"logbin","topic":"plot.logbin.smooth","snippet":"### Name: plot.logbin.smooth\n### Title: Default logbin.smooth Plotting\n### Aliases: plot.logbin.smooth\n### Keywords: models regression smooth\n\n### ** Examples\n\n## For an example, see example(logbin.smooth)\n\n\n"} {"package":"logbin","topic":"predict.logbin","snippet":"### Name: predict.logbin\n### Title: Predict Method for logbin Fits\n### Aliases: predict.logbin\n### Keywords: models regression\n\n### ** Examples\n\n## For an example, see example(logbin)\n\n\n"} {"package":"logbin","topic":"predict.logbin.smooth","snippet":"### Name: predict.logbin.smooth\n### Title: Predict Method for logbin.smooth Fits\n### Aliases: predict.logbin.smooth\n### Keywords: models regression smooth\n\n### ** Examples\n\n## For an example, see example(logbin.smooth)\n\n\n"} {"package":"logbin","topic":"summary.logbin","snippet":"### Name: summary.logbin\n### Title: Summarising logbin Model Fits\n### Aliases: summary.logbin print.summary.logbin\n### Keywords: models regression\n\n### ** Examples\n\n## For examples see example(logbin)\n\n\n"} {"package":"logbin","topic":"vcov.logbin","snippet":"### Name: vcov.logbin\n### Title: Calculate Variance-Covariance Matrix for a Fitted logbin Model\n### Object\n### Aliases: vcov.logbin\n### Keywords: models regression\n\n### ** Examples\n\n## For an example see example(logbin)\n\n\n"} {"package":"fat2Lpoly","topic":"fat2Lpoly","snippet":"### Name: fat2Lpoly\n### Title: Two-locus Family-based Association Test with Polytomous Outcome\n### Aliases: fat2Lpoly\n\n### ** Examples\n\npath.data=paste(.libPaths()[which(unlist(lapply(.libPaths(),\nfunction(x) length(grep(\"fat2Lpoly\",dir(x)))))>0)],\"/fat2Lpoly/extdata/\",sep=\"\")\nif(length(path.data)>1) path.data=path.data[length(path.data)]\n\nsnps.anal=c(\"snp3.loc2\",\"snp4.loc2\")\nmicrosat.names.loc2=c(\"2_3_mrk:\",\"2_4_mrk:\")\n\n############ design.endo2disease with conditioning on locus 1 ################\n## Not run: \n##D joint.tests=list(c(2,5))\n##D snp.names.mat=cbind(rep(\"snp4.loc1\",length(snps.anal)),snps.anal)\n##D microsat.names.mat=cbind(rep(\"1_4_mrk:\",length(snps.anal)),microsat.names.loc2)\n##D test=fat2Lpoly(pedfilenames=paste(path.data,c(\"loc1.ped\",\"loc2.ped\"),sep=\"\"),\n##D datfilenames=paste(path.data,c(\"loc1.dat\",\"loc2.dat\"),sep=\"\"),\n##D \t\t\t freq.data=paste(path.data,c(\"loc1.freq\",\"loc2.freq\"),sep=\"\"),\n##D ibdfilenames=paste(path.data,c(\"loc1.ibd\",\"loc2.ibd\"),sep=\"\"),\n##D \t\t snp.names.mat=snp.names.mat,ibd.loci=microsat.names.mat,\n##D \t\t joint.tests=joint.tests,contingency.file=TRUE,\n##D \t\t design.constraint=design.endo2disease,lc=1)\n##D \n##D test$p.values.scores\n## End(Not run)\t\t \n###############################################################################\n\n################### design.endo2disease without conditioning ##################\njoint.tests=list(c(2,5))\nsnp.names.mat=cbind(rep(\"snp4.loc1\",length(snps.anal)),snps.anal)\nmicrosat.names.mat=cbind(rep(\"1_4_mrk:\",length(snps.anal)),microsat.names.loc2)\ntest=fat2Lpoly(pedfilenames=paste(path.data,c(\"loc1.ped\",\"loc2.ped\"),sep=\"\"),\n datfilenames=paste(path.data,c(\"loc1.dat\",\"loc2.dat\"),sep=\"\"),\n\t\t\t freq.data=paste(path.data,c(\"loc1.freq\",\"loc2.freq\"),sep=\"\"),\n ibdfilenames=paste(path.data,c(\"loc1.ibd\",\"loc2.ibd\"),sep=\"\"),\n\t\t snp.names.mat=snp.names.mat,ibd.loci=microsat.names.mat,\n\t\t joint.tests=joint.tests,contingency.file=FALSE,\n\t\t design.constraint=design.endo2disease)\n\ntest$p.values.scores \n###############################################################################\n\n################# design.full with conditioning on locus 1 ##################\n## Not run: \n##D joint.tests=list(c(2,3),c(5,6),c(8,9),c(2,3,5,6,8,9))\n##D snp.names.mat=cbind(rep(\"snp4.loc1\",length(snps.anal)),snps.anal)\n##D microsat.names.mat=cbind(rep(\"1_4_mrk:\",length(snps.anal)),microsat.names.loc2)\n##D test=fat2Lpoly(pedfilenames=paste(path.data,c(\"loc1.ped\",\"loc2.ped\"),sep=\"\"),\n##D datfilenames=paste(path.data,c(\"loc1.dat\",\"loc2.dat\"),sep=\"\"),\n##D \t\t\t freq.data=paste(path.data,c(\"loc1.freq\",\"loc2.freq\"),sep=\"\"),\n##D ibdfilenames=paste(path.data,c(\"loc1.ibd\",\"loc2.ibd\"),sep=\"\"),\n##D \t\t snp.names.mat=snp.names.mat,ibd.loci=microsat.names.mat,\n##D \t\t joint.tests=joint.tests,\n##D design.constraint=design.full,lc=1)\n##D \n##D test$p.values.scores\n## End(Not run)\n##############################################################################\n\n############################# design.1locus #################################\nsnp.names.mat=as.matrix(snps.anal)\nmicrosat.names.mat=as.matrix(microsat.names.loc2)\ntest=fat2Lpoly(pedfilenames=paste(path.data,\"loc2.ped\",sep=\"\"),\n datfilenames=paste(path.data,\"loc2.dat\",sep=\"\"),\n freq.data=paste(path.data,\"loc2.freq\",sep=\"\"),\n\t\t\t ibdfilenames=paste(path.data,\"loc2.ibd\",sep=\"\"),\n\t\t snp.names.mat=snp.names.mat,ibd.loci=microsat.names.mat,\n\t\t\t design.constraint=design.1locus)\n\ntest$p.values.scores\t\t\t \n##############################################################################\n\n############# design.dichotomous with conditioning on locus 1 ##############\n## Not run: \n##D joint.tests=list(c(2,3))\n##D snp.names.mat=cbind(rep(\"snp4.loc1\",length(snps.anal)),snps.anal)\n##D microsat.names.mat=cbind(rep(\"1_4_mrk:\",length(snps.anal)),microsat.names.loc2)\n##D test=fat2Lpoly(pedfilenames=paste(path.data,c(\"loc1.ped\",\"loc2.ped\"),sep=\"\"),\n##D datfilenames=paste(path.data,c(\"loc1.dat\",\"loc2.dat\"),sep=\"\"),\n##D \t\t\t freq.data=paste(path.data,c(\"loc1.freq\",\"loc2.freq\"),sep=\"\"),\n##D ibdfilenames=paste(path.data,c(\"loc1.ibd\",\"loc2.ibd\"),sep=\"\"),\n##D \t\t snp.names.mat=snp.names.mat,ibd.loci=microsat.names.mat,\n##D \t\t joint.tests=joint.tests,\n##D design.constraint=design.dichotomous,lc=1)\n##D \n##D test$p.values.scores\n## End(Not run)\t\t\t \n##############################################################################\n\n\n"} {"package":"fat2Lpoly","topic":"fat2Lpoly.allSNPs","snippet":"### Name: fat2Lpoly.allSNPs\n### Title: Example results output by the function 'fat2Lpoly.withinR'\n### Aliases: fat2Lpoly.allSNPs\n### Keywords: datasets\n\n### ** Examples\n\ndata(fat2Lpoly.allSNPs)\n\n\n"} {"package":"fat2Lpoly","topic":"fat2Lpoly.withinR","snippet":"### Name: fat2Lpoly.withinR\n### Title: Two-locus Family-based Association Test with Polytomous Outcome\n### (all arguments within R)\n### Aliases: fat2Lpoly.withinR\n\n### ** Examples\n\ndata(ped.x.all)\n\n## Not run: \n##D snp.names.mat=cbind(rep(\"snp4.loc1\",2),c(\"snp3.loc2\",\"snp4.loc2\"))\t\n##D microsat.names.mat=cbind(rep(\"1_4_mrk:\",2),c(\"2_3_mrk:\",\"2_4_mrk:\"))\t\n##D fat2Lpoly.allSNPs=fat2Lpoly.withinR(ped.x.all,snp.names.mat,ibd.loci=\n##D microsat.names.mat,contingency.file=TRUE,\n##D \t\t\t\t\t\t\tdesign.constraint=design.endo2disease,\n##D \t\t\t\t\t\t\tlc=1)\n##D \n##D joint.tests=list(c(2,5)) \n##D get.scores.pvalues(fat2Lpoly.allSNPs,joint.tests)\n## End(Not run)\n\n\n"} {"package":"fat2Lpoly","topic":"get.scores.pvalues","snippet":"### Name: get.scores.pvalues\n### Title: function to compute scores and p-values\n### Aliases: get.scores.pvalues\n\n### ** Examples\n\ndata(fat2Lpoly.allSNPs)\n\t\t\t \njoint.tests=list(c(2,5),c(3,4))\n\nget.scores.pvalues(fat2Lpoly.allSNPs, joint.tests)\t\n\t\t\t \n# snp.cond snp.test global_p params.joint_2-5_p params.joint_3-4_p param_1_score \n# 1 snp4.loc1 snp2.loc2 5.80e-03 7.12e-01 0.000954 0.449 \n# 2 snp4.loc1 snp4.loc2 2.14e-07 1.24e-05 0.000954 0.449 \n# 3 snp4.loc1 snp5.loc2 1.14e-03 1.44e-01 0.000954 0.449 \n# 4 snp4.loc1 snp6.loc2 5.59e-04 3.84e-02 0.000954 0.449 \n# 5 snp4.loc1 snp8.loc2 1.15e-03 1.55e-01 0.000954 0.449 \n# param_2_score param_3_score param_4_score param_5_score param_1_p param_2_p\n# 0.333 -1.427 3.638 0.733 0.653 0.739\n# 0.890 -1.427 3.638 4.612 0.653 0.373\t\t\n# 0.776 -1.427 3.638 1.785 0.653 0.438\n# -0.082 -1.427 3.638 2.553 0.653 0.934\n# 0.869 -1.427 3.638 1.695 0.653 0.385\t\t\n# param_3_p param_4_p param_5_p\n# 1 0.154 0.000275 0.464000\n# 2 0.154 0.000275 0.000004\n# 3 0.154 0.000275 0.074200\n# 4 0.154 0.000275 0.010700\n# 5 0.154 0.000275 0.090100\t\n\n\n"} {"package":"fat2Lpoly","topic":"ped.x.all","snippet":"### Name: ped.x.all\n### Title: Example dataset returned by the function 'read.merlin.files'\n### Aliases: ped.x.all\n### Keywords: datasets\n\n### ** Examples\n\ndata(ped.x.all)\n\n\n"} {"package":"fat2Lpoly","topic":"read.merlin.files","snippet":"### Name: read.merlin.files\n### Title: function to read input files in Merlin format\n### Aliases: read.merlin.files\n\n### ** Examples\n\npath.data=paste(.libPaths()[which(unlist(lapply(.libPaths(),\nfunction(x) length(grep(\"fat2Lpoly\",dir(x)))))>0)],\n\"/fat2Lpoly/extdata/\",sep=\"\")\nif(length(path.data)>1) path.data=path.data[length(path.data)]\n\ninput.data=read.merlin.files(pedfilenames=\n paste(path.data,c(\"loc1.ped\",\"loc2.ped\"),sep=\"\"),\n datfilenames=\n\t\t\t\tpaste(path.data,c(\"loc1.dat\",\"loc2.dat\"),sep=\"\"),\n\t\t\t freq.data=\n\t\t\t paste(path.data,c(\"loc1.freq\",\"loc2.freq\"),sep=\"\"),\n ibdfilenames=\n\t\t\t\tpaste(path.data,c(\"loc1.ibd\",\"loc2.ibd\"),sep=\"\"))\n\ninput.data2=read.merlin.files(pedfilenames=\n\t\t\t\t\tpaste(path.data,\"loc2.ped\",sep=\"\"),\n datfilenames=\n paste(path.data,\"loc2.dat\",sep=\"\"),\n freq.data=\n paste(path.data,\"loc2.freq\",sep=\"\"),\n\t\t\t\t\tibdfilenames=\n\t\t\t\t\tpaste(path.data,\"loc2.ibd\",sep=\"\"))\n\n\n"} {"package":"rsinaica","topic":"params_sinaica","snippet":"### Name: params_sinaica\n### Title: Valid air quality parameters\n### Aliases: params_sinaica\n\n### ** Examples\n\nhead(params_sinaica)\n\n\n"} {"package":"rsinaica","topic":"sinaica_param_data","snippet":"### Name: sinaica_param_data\n### Title: Get air quality data from all stations by parameter\n### Aliases: sinaica_param_data\n\n### ** Examples\n\n## Not run: \n##D ## May take several seconds\n##D df <- sinaica_param_data(\"O3\", \"2015-10-14\", \"2015-10-14\")\n##D head(df)\n## End(Not run)\n\n\n"} {"package":"rsinaica","topic":"sinaica_station_data","snippet":"### Name: sinaica_station_data\n### Title: Get air quality data from a single measuring station\n### Aliases: sinaica_station_data\n\n### ** Examples\n\nstations_sinaica[which(stations_sinaica$station_name == \"Xalostoc\"), 1:5]\ndf <- sinaica_station_data(271, \"O3\", \"2015-09-11\", \"2015-09-11\", \"Crude\")\nhead(df)\n\n\n\n"} {"package":"rsinaica","topic":"sinaica_station_dates","snippet":"### Name: sinaica_station_dates\n### Title: Dates supported by a station\n### Aliases: sinaica_station_dates\n\n### ** Examples\n\n## id 271 is Xalostoc. See `stations_sinaica`\ndf <- sinaica_station_dates(271, \"Manual\")\nhead(df)\n\n\n"} {"package":"rsinaica","topic":"sinaica_station_params","snippet":"### Name: sinaica_station_params\n### Title: Parameters supported by a station\n### Aliases: sinaica_station_params\n\n### ** Examples\n\n## id 271 is Xalostoc. See `stations_sinaica`\ndf <- sinaica_station_params(271, \"Crude\")\nhead(df)\n\n\n"} {"package":"rsinaica","topic":"stations_sinaica","snippet":"### Name: stations_sinaica\n### Title: Air quality measuring stations in Mexico\n### Aliases: stations_sinaica\n\n### ** Examples\n\nhead(stations_sinaica)\n\n\n"} {"package":"dendrometry","topic":"Logging","snippet":"### Name: Logging\n### Title: Tree metrics for logging\n### Aliases: Logging\n### Keywords: datasets\n\n### ** Examples\n\n#demo(volume)\n\n\n"} {"package":"dendrometry","topic":"Tree","snippet":"### Name: Tree\n### Title: Dendrometric measures on tree\n### Aliases: Tree\n### Keywords: datasets\n\n### ** Examples\n\n#demo(dendro)\n\n\n"} {"package":"dendrometry","topic":"angle2slope","snippet":"### Name: angle2slope\n### Title: Angle to slope\n### Aliases: angle2slope\n\n### ** Examples\n\nangle2slope(10)\nangle2slope(angle = 45)\nangle2slope(angle = 50, angleUnit = \"deg\")\nangle2slope(1.047198, \"rad\")\nangle2slope(0.2617994, angleUnit = \"rad\")\n\n\n"} {"package":"dendrometry","topic":"basal_i","snippet":"### Name: basal_i\n### Title: Individual basal area\n### Aliases: basal_i basal2dbh\n\n### ** Examples\n\nbasal_i(dbh = 10)\nbasal_i(circum = 31.41)\nbasal2dbh(78.53982)\n\n\n"} {"package":"dendrometry","topic":"circum","snippet":"### Name: circum\n### Title: Circumference or perimeter\n### Aliases: circum\n\n### ** Examples\n\nx = seq(1, 5, .4)\ncircum(x)\n\n\n"} {"package":"dendrometry","topic":"dbh","snippet":"### Name: dbh\n### Title: Diameter or DBH\n### Aliases: dbh\n\n### ** Examples\n\nx = seq(1, 5, .4)\ndbh(x)\n\n\n"} {"package":"dendrometry","topic":"decrease","snippet":"### Name: decrease\n### Title: The decrease coefficient\n### Aliases: decrease\n\n### ** Examples\n\ndecrease(30, 120)\ndecrease(middle = 40, breast = 90)\n\n\n"} {"package":"dendrometry","topic":"decreaseMetric","snippet":"### Name: decreaseMetric\n### Title: Metric scrolling or decay\n### Aliases: decreaseMetric\n\n### ** Examples\n\ndecreaseMetric(dmh = 40, dbh = 90, mh = 7)\ndecreaseMetric(45, 85, 9)\n\n\n"} {"package":"dendrometry","topic":"deg","snippet":"### Name: deg\n### Title: Radians to degrees\n### Aliases: deg\n\n### ** Examples\n\ndeg(pi/2)\n\n\n"} {"package":"dendrometry","topic":"diameterMean","snippet":"### Name: diameterMean\n### Title: Mean diameter\n### Aliases: diameterMean\n\n### ** Examples\n\nset.seed(1)\ndiameter = rnorm(10, 100, 20)\ndiameterMean(dbh = diameter)\n\n\n"} {"package":"dendrometry","topic":"distanceH","snippet":"### Name: distanceH\n### Title: Horizontal distance\n### Aliases: distanceH\n\n### ** Examples\n\ndistanceH(20, 30)\ndistanceH(20, angle = 30, type = \"slope\")\ndistanceH(20, angle = 25, type = \"angle\")\n\n\n"} {"package":"dendrometry","topic":"fiboRate","snippet":"### Name: fiboRate\n### Title: Fibonacci series ratio\n### Aliases: fiboRate\n\n### ** Examples\n\n##Golden number (Le Nombre d'Or)\nfiboRate(n = 18, PrintSer = FALSE, Uo = 0, U1 = 1)\n##(1+sqrt(5))/2\nfiboRate(n = 10, PrintSer = TRUE, Uo = 0, U1 = 1)\n\n\n"} {"package":"dendrometry","topic":"fibonacci","snippet":"### Name: fibonacci\n### Title: Fibonacci series\n### Aliases: fibonacci\n\n### ** Examples\n\nfibonacci(n = 10, PrintFib = TRUE)\nfibonacci(n = 10, Uo = 1, U1 = 3, PrintFib = FALSE)\n\n\n"} {"package":"dendrometry","topic":"height","snippet":"### Name: height\n### Title: Height of tree or vertical object.\n### Aliases: height\n\n### ** Examples\n\nheight(10, 80, 17)\nheight(17, top = -18, base = -113)\nheight(distance = 18, top = 42, base = -12, type = \"angle\", angleUnit = \"deg\")\nheight(distance = 18:21, top = 42:45, base = -12:-15, type = \"angle\", angleUnit = \"deg\")\n## Bellow shows warning messages\nheight(distance = 18:21, top = -42:-45, base = -12:-15, type = \"angle\", angleUnit = \"deg\")\n\n\n"} {"package":"dendrometry","topic":"loreyHeight","snippet":"### Name: loreyHeight\n### Title: Lorey's mean height\n### Aliases: loreyHeight\n\n### ** Examples\n\nset.seed(1)\ndonnee <- data.frame(hauteur = rnorm(10, 12, 3), area = basal_i(rnorm(10, 100, 20)))\nloreyHeight(basal = donnee$area, height = donnee$hauteur)\n\n\n"} {"package":"dendrometry","topic":"makedata","snippet":"### Name: makedata\n### Title: Make stand data\n### Aliases: makedata\n\n### ** Examples\n\n# require(BiodiversityR)\n# data(ifri, package = \"BiodiversityR\")\n#a1=makedata(ifri, factor1 = \"forest\", factor2 = \"plotID\", factor3 = \"species\")\n#a2=makedata(ifri, factor1 = \"species\")\n#makedata(ifri, factor2 = \"\")\n#identical(makedata(ifri), ifri)\n\n\n"} {"package":"dendrometry","topic":"principal","snippet":"### Name: principal\n### Title: Principal measure\n### Aliases: principal\n\n### ** Examples\n\nprincipal(303)\nprincipal(23 * pi/8, \"rad\")\n\n\n"} {"package":"dendrometry","topic":"rad","snippet":"### Name: rad\n### Title: Degrees to radians\n### Aliases: rad\n\n### ** Examples\n\nrad(180)\n\n\n"} {"package":"dendrometry","topic":"reducecoef","snippet":"### Name: reducecoef\n### Title: The reduction coefficient\n### Aliases: reducecoef\n\n### ** Examples\n\nreducecoef(30, 120)\nreducecoef(middle = 40, breast = 90)\n\n\n"} {"package":"dendrometry","topic":"sampleSize","snippet":"### Name: sampleSize\n### Title: Sample size\n### Aliases: sampleSize\n\n### ** Examples\n\nsampleSize(confLev = .95, popPro = 0.4, errorMargin = .05)\nsampleSize(confLev = .95, popPro = 0.5, errorMargin = .05, size = 150)\nsampleSize(confLev = .95, popPro = 0.5, errorMargin = .05, size = 150,\nmethod = \"cauchran\")\nsampleSize()\n\n\n\n"} {"package":"dendrometry","topic":"shape","snippet":"### Name: shape\n### Title: The shape coefficient\n### Aliases: shape\n\n### ** Examples\n\nshape(volume = 10000, 11, dbh = 40)\nshape(volume = 10000, 11, 40)\nshape(volume = 10000, 11, basal = 2256.637)\n## Bellow gives warning\nshape(volume = 10000, height = 11, dbh = 40, basal = 2256.637)\n\n\n"} {"package":"dendrometry","topic":"skewness","snippet":"### Name: skewness\n### Title: Skewness coefficient\n### Aliases: skewness\n\n### ** Examples\n\ndata(\"Logging\")\nskewness(Logging$hauteur)\nhist(Logging$hauteur,3)\n\n\n"} {"package":"dendrometry","topic":"slope2angle","snippet":"### Name: slope2angle\n### Title: Slope to angle\n### Aliases: slope2angle\n\n### ** Examples\n\nslope2angle(100)\nslope2angle(17.6327)\nslope2angle(angle2slope(30))\n\n\n"} {"package":"dendrometry","topic":"volume","snippet":"### Name: volume\n### Title: Tree stem and log Volume\n### Aliases: volume\n\n### ** Examples\n\n## huber method\nvolume(height = 10, dm = 35)\nvolume(height = 10, circum = 100)\n\n## smalian method\nvolume(height = 10, do = 45, ds = 15, method = \"smalian\")\nvolume(height = 10, circumo = 200, circums = 110, method = \"smalian\")\n\n## cone method\nvolume(height = 10, do = 45, ds = 15, method = \"cone\")\nvolume(height = 10, circumo = 200, circums = 110, method = \"cone\")\n\n## newton method\nvolume(height = 10, dm = 35, do = 45, ds = 15, method = \"newton\")\nvolume(height = 10, circum = 100, circumo = 200, circums = 110, method = \"newton\")\n\n\n"} {"package":"colormap","topic":"colormap","snippet":"### Name: colormap\n### Title: A package to generate colors from a list of 44 pre-defined\n### palettes\n### Aliases: colormap colormap-package\n\n### ** Examples\n\ncolormap() # Defaults to 72 colors from the 'viridis' palette.\ncolormap(colormap=colormaps$temperature, nshades=20) # Diff Palette\ncolormap(colormap=c('#000000','#FF0000'), nshades=20) # Colormap as vector of colors\n# list of list. Maximum flexibility\ncolormap(colormap=list(list(index=0,rgb=c(0,0,0)),list(index=1,rgb=c(255,255,255))), nshades=10)\ncolormap(format='rgb',nshades=10) # As rgb\ncolormap(format='rgb',nshades=10,alpha=0.5) # Constant alpha\ncolormap(format='rgbaString',nshades=10) # As rgba string\n\n\n"} {"package":"colormap","topic":"colormap_pal","snippet":"### Name: colormap_pal\n### Title: Create a Palette generating function\n### Aliases: colormap_pal\n\n### ** Examples\n\nscales::show_col(colormap_pal()(10))\nscales::show_col(colormap_pal(colormap=colormaps$viridis)(100), labels=FALSE)\n\n\n\n"} {"package":"tmvmixnorm","topic":"dtuvn","snippet":"### Name: dtuvn\n### Title: Density function of truncated univariate normal distribution\n### Aliases: dtuvn\n\n### ** Examples\n\ndtuvn(x= -3:3, mean=0, sd=1 ,lower= -2, upper=2)\n\n\n\n"} {"package":"tmvmixnorm","topic":"exp_acc_opt","snippet":"### Name: exp_acc_opt\n### Title: Acceptance rate of translated-exponential rejection sampling\n### Aliases: exp_acc_opt\n\n### ** Examples\n\nset.seed(1203)\nexp_acc_opt(1,2)\n\n\n\n"} {"package":"tmvmixnorm","topic":"exp_rej","snippet":"### Name: exp_rej\n### Title: Translated-exponential rejection sampling\n### Aliases: exp_rej\n\n### ** Examples\n\nset.seed(1)\nexp_rej(a=1, b=Inf)\n\n\n\n"} {"package":"tmvmixnorm","topic":"halfnorm_acc","snippet":"### Name: halfnorm_acc\n### Title: Acceptance rate of half-normal rejection sampling\n### Aliases: halfnorm_acc\n\n### ** Examples\n\nset.seed(1203)\nhalfnorm_acc(1,2)\n\n\n\n"} {"package":"tmvmixnorm","topic":"halfnorm_rej","snippet":"### Name: halfnorm_rej\n### Title: Half-normal rejection sampling\n### Aliases: halfnorm_rej\n\n### ** Examples\n\nset.seed(1)\nhalfnorm_rej(a=1, b=Inf)\n\n\n\n"} {"package":"tmvmixnorm","topic":"imp","snippet":"### Name: imp\n### Title: Rejection sampling of standardized truncated univariate normal\n### distribution\n### Aliases: imp\n\n### ** Examples\n\nimp(1,Inf) # Case 1: [a,infty)\nimp(-1,1) # Case 2: 0 in [a,b], a<0=0\n\n\n\n"} {"package":"tmvmixnorm","topic":"imp_acc","snippet":"### Name: imp_acc\n### Title: Acceptance rate of truncated univariate normal distribution\n### rejection sampling\n### Aliases: imp_acc\n\n### ** Examples\n\nimp_acc(1,Inf) # Case 1: [a,infty)\nimp_acc(-1,1) # Case 2: 0 in [a,b], a<0=0\n\n\n\n"} {"package":"tmvmixnorm","topic":"norm_acc","snippet":"### Name: norm_acc\n### Title: Acceptance rate of normal rejection sampling\n### Aliases: norm_acc\n\n### ** Examples\n\nset.seed(1203)\nnorm_acc(1,2)\n\n\n\n"} {"package":"tmvmixnorm","topic":"norm_rej","snippet":"### Name: norm_rej\n### Title: Normal rejection sampling\n### Aliases: norm_rej\n\n### ** Examples\n\nset.seed(1)\nnorm_rej(a=1, b=Inf)\n\n\n\n"} {"package":"tmvmixnorm","topic":"ptuvn","snippet":"### Name: ptuvn\n### Title: Distribution function of truncated univariate normal\n### distribution\n### Aliases: ptuvn\n\n### ** Examples\n\nptuvn(x= -3:3, mean=0, sd=1 ,lower= -2, upper=2)\n\n\n\n"} {"package":"tmvmixnorm","topic":"rtmvn","snippet":"### Name: rtmvn\n### Title: Random number generation for truncated multivariate normal\n### distribution subject to linear inequality constraints\n### Aliases: rtmvn\n\n### ** Examples\n\n# Example for full rank with strong dependence\nd <- 3\nrho <- 0.9\nSigma <- matrix(0, nrow=d, ncol=d)\nSigma <- rho^abs(row(Sigma) - col(Sigma))\n\nD1 <- diag(1,d) # Full rank\n\nset.seed(1203)\nans.1 <- rtmvn(n=1000, Mean=1:d, Sigma, D=D1, lower=rep(-1,d), upper=rep(1,d),\nint=rep(0,d), burn=50)\n\napply(ans.1, 2, summary)\n\n# Example for non-full rank\nd <- 3\nrho <- 0.5\nSigma <- matrix(0, nrow=d, ncol=d)\nSigma <- rho^abs(row(Sigma) - col(Sigma))\n\nD2 <- matrix(c(1,1,1,0,1,0,1,0,1),ncol=d)\nqr(D2)$rank # 2\n\nset.seed(1228)\nans.2 <- rtmvn(n=100, Mean=1:d, Sigma, D=D2, lower=rep(-1,d), upper=rep(1,d), burn=10)\n\napply(ans.2, 2, summary)\n\n\n\n"} {"package":"tmvmixnorm","topic":"rtmvt","snippet":"### Name: rtmvt\n### Title: Random number generation for truncated multivariate Student's t\n### distribution subject to linear inequality constraints\n### Aliases: rtmvt\n\n### ** Examples\n\n# Example for full rank\nd <- 3\nrho <- 0.5\nnu <- 10\nSigma <- matrix(0, nrow=d, ncol=d)\nSigma <- rho^abs(row(Sigma) - col(Sigma))\n\nD1 <- diag(1,d) # Full rank\n\nset.seed(1203)\nans.t <- rtmvt(n=1000, Mean=1:d, Sigma, nu=nu, D=D1, lower=rep(-1,d), upper=rep(1,d),\nburn=50, thin=0)\n\napply(ans.t, 2, summary)\n\n\n\n"} {"package":"tmvmixnorm","topic":"rtuvn","snippet":"### Name: rtuvn\n### Title: Random number generation for truncated univariate normal\n### distribution\n### Aliases: rtuvn\n\n### ** Examples\n\nset.seed(1203)\nans <- rtuvn(n=1000, mean=1, sd=2, lower=-2, upper=3)\nsummary(ans)\n\n# Check if the sample matches with CDF by KS test\nks.test(ans,\"ptuvn\",1,2,-2,3)\n\n\n\n"} {"package":"tmvmixnorm","topic":"unif_acc","snippet":"### Name: unif_acc\n### Title: Acceptance rate of uniform rejection sampling\n### Aliases: unif_acc\n\n### ** Examples\n\nset.seed(1203)\nunif_acc(1,2)\n\n\n\n"} {"package":"tmvmixnorm","topic":"unif_rej","snippet":"### Name: unif_rej\n### Title: Uniform rejection sampling\n### Aliases: unif_rej\n\n### ** Examples\n\nset.seed(1)\nunif_rej(a=1, b=2)\n\n\n\n"} {"package":"tspredit","topic":"bal_oversampling","snippet":"### Name: bal_oversampling\n### Title: Oversampling\n### Aliases: bal_oversampling\n\n### ** Examples\n\ndata(iris)\nmod_iris <- iris[c(1:50,51:71,101:111),]\n\nbal <- bal_oversampling('Species')\nbal <- daltoolbox::fit(bal, mod_iris)\nadjust_iris <- daltoolbox::transform(bal, mod_iris)\ntable(adjust_iris$Species)\n\n\n"} {"package":"tspredit","topic":"bal_subsampling","snippet":"### Name: bal_subsampling\n### Title: Subsampling\n### Aliases: bal_subsampling\n\n### ** Examples\n\ndata(iris)\nmod_iris <- iris[c(1:50,51:71,101:111),]\n\nbal <- bal_subsampling('Species')\nbal <- daltoolbox::fit(bal, mod_iris)\nadjust_iris <- daltoolbox::transform(bal, mod_iris)\ntable(adjust_iris$Species)\n\n\n"} {"package":"tspredit","topic":"cla_fs","snippet":"### Name: cla_fs\n### Title: Feature Selection\n### Aliases: cla_fs\n\n### ** Examples\n\n#See ?cla_fs_fss for an example of feature selection\n\n\n"} {"package":"tspredit","topic":"cla_fs_fss","snippet":"### Name: cla_fs_fss\n### Title: Forward Stepwise Selection\n### Aliases: cla_fs_fss\n\n### ** Examples\n\ndata(iris)\nmyfeature <- daltoolbox::fit(cla_fs_fss(\"Species\"), iris)\ndata <- daltoolbox::transform(myfeature, iris)\nhead(data)\n\n\n"} {"package":"tspredit","topic":"cla_fs_ig","snippet":"### Name: cla_fs_ig\n### Title: Information Gain\n### Aliases: cla_fs_ig\n\n### ** Examples\n\ndata(iris)\nmyfeature <- daltoolbox::fit(cla_fs_ig(\"Species\"), iris)\ndata <- daltoolbox::transform(myfeature, iris)\nhead(data)\n\n\n"} {"package":"tspredit","topic":"cla_fs_lasso","snippet":"### Name: cla_fs_lasso\n### Title: Feature Selection using Lasso\n### Aliases: cla_fs_lasso\n\n### ** Examples\n\ndata(iris)\nmyfeature <- daltoolbox::fit(cla_fs_lasso(\"Species\"), iris)\ndata <- daltoolbox::transform(myfeature, iris)\nhead(data)\n\n\n"} {"package":"tspredit","topic":"cla_fs_relief","snippet":"### Name: cla_fs_relief\n### Title: Relief\n### Aliases: cla_fs_relief\n\n### ** Examples\n\ndata(iris)\nmyfeature <- daltoolbox::fit(cla_fs_relief(\"Species\"), iris)\ndata <- daltoolbox::transform(myfeature, iris)\nhead(data)\n\n\n"} {"package":"tspredit","topic":"fertilizers","snippet":"### Name: fertilizers\n### Title: Fertilizers (Regression)\n### Aliases: fertilizers\n### Keywords: datasets\n\n### ** Examples\n\ndata(fertilizers)\nhead(fertilizers$brazil_n)\n\n\n"} {"package":"tspredit","topic":"ts_aug_awareness","snippet":"### Name: ts_aug_awareness\n### Title: Augmentation by awareness\n### Aliases: ts_aug_awareness\n\n### ** Examples\n\nlibrary(daltoolbox)\ndata(sin_data)\n\n#convert to sliding windows\nxw <- ts_data(sin_data$y, 10)\n\n#data augmentation using awareness\naugment <- ts_aug_awareness()\naugment <- fit(augment, xw)\nxa <- transform(augment, xw)\nts_head(xa)\n\n\n"} {"package":"tspredit","topic":"ts_aug_awaresmooth","snippet":"### Name: ts_aug_awaresmooth\n### Title: Augmentation by awareness smooth\n### Aliases: ts_aug_awaresmooth\n\n### ** Examples\n\nlibrary(daltoolbox)\ndata(sin_data)\n\n#convert to sliding windows\nxw <- ts_data(sin_data$y, 10)\n\n#data augmentation using awareness\naugment <- ts_aug_awaresmooth()\naugment <- fit(augment, xw)\nxa <- transform(augment, xw)\nts_head(xa)\n\n\n"} {"package":"tspredit","topic":"ts_aug_flip","snippet":"### Name: ts_aug_flip\n### Title: Augmentation by flip\n### Aliases: ts_aug_flip\n\n### ** Examples\n\nlibrary(daltoolbox)\ndata(sin_data)\n\n#convert to sliding windows\nxw <- ts_data(sin_data$y, 10)\n\n#data augmentation using flip\naugment <- ts_aug_flip()\naugment <- fit(augment, xw)\nxa <- transform(augment, xw)\nts_head(xa)\n\n\n"} {"package":"tspredit","topic":"ts_aug_jitter","snippet":"### Name: ts_aug_jitter\n### Title: Augmentation by jitter\n### Aliases: ts_aug_jitter\n\n### ** Examples\n\nlibrary(daltoolbox)\ndata(sin_data)\n\n#convert to sliding windows\nxw <- ts_data(sin_data$y, 10)\n\n#data augmentation using flip\naugment <- ts_aug_jitter()\naugment <- fit(augment, xw)\nxa <- transform(augment, xw)\nts_head(xa)\n\n\n"} {"package":"tspredit","topic":"ts_aug_none","snippet":"### Name: ts_aug_none\n### Title: no augmentation\n### Aliases: ts_aug_none\n\n### ** Examples\n\nlibrary(daltoolbox)\ndata(sin_data)\n\n#convert to sliding windows\nxw <- ts_data(sin_data$y, 10)\n\n#no data augmentation\naugment <- ts_aug_none()\naugment <- fit(augment, xw)\nxa <- transform(augment, xw)\nts_head(xa)\n\n\n"} {"package":"tspredit","topic":"ts_aug_shrink","snippet":"### Name: ts_aug_shrink\n### Title: Augmentation by shrink\n### Aliases: ts_aug_shrink\n\n### ** Examples\n\nlibrary(daltoolbox)\ndata(sin_data)\n\n#convert to sliding windows\nxw <- ts_data(sin_data$y, 10)\n\n#data augmentation using flip\naugment <- ts_aug_shrink()\naugment <- fit(augment, xw)\nxa <- transform(augment, xw)\nts_head(xa)\n\n\n"} {"package":"tspredit","topic":"ts_aug_stretch","snippet":"### Name: ts_aug_stretch\n### Title: Augmentation by stretch\n### Aliases: ts_aug_stretch\n\n### ** Examples\n\nlibrary(daltoolbox)\ndata(sin_data)\n\n#convert to sliding windows\nxw <- ts_data(sin_data$y, 10)\n\n#data augmentation using flip\naugment <- ts_aug_stretch()\naugment <- fit(augment, xw)\nxa <- transform(augment, xw)\nts_head(xa)\n\n\n"} {"package":"tspredit","topic":"ts_aug_wormhole","snippet":"### Name: ts_aug_wormhole\n### Title: Augmentation by wormhole\n### Aliases: ts_aug_wormhole\n\n### ** Examples\n\nlibrary(daltoolbox)\ndata(sin_data)\n\n#convert to sliding windows\nxw <- ts_data(sin_data$y, 10)\n\n#data augmentation using flip\naugment <- ts_aug_wormhole()\naugment <- fit(augment, xw)\nxa <- transform(augment, xw)\nts_head(xa)\n\n\n"} {"package":"tspredit","topic":"ts_fil_ema","snippet":"### Name: ts_fil_ema\n### Title: Time Series Exponential Moving Average\n### Aliases: ts_fil_ema\n\n### ** Examples\n\n# time series with noise\nlibrary(daltoolbox)\ndata(sin_data)\nsin_data$y[9] <- 2*sin_data$y[9]\n# convert to sliding windows\nts <- ts_data(sin_data$y, 10)\nts_head(ts, 3)\nsummary(ts[,10])\n\n# filter\nfilter <- ts_fil_ema(ema = 3)\nfilter <- fit(filter, sin_data$y)\ny <- transform(filter, sin_data$y)\n\n# plot\nplot_ts_pred(y=sin_data$y, yadj=y)\n\n\n"} {"package":"tspredit","topic":"ts_fil_hp","snippet":"### Name: ts_fil_hp\n### Title: Hodrick-Prescott Filter\n### Aliases: ts_fil_hp\n\n### ** Examples\n\n# time series with noise\nlibrary(daltoolbox)\ndata(sin_data)\nsin_data$y[9] <- 2*sin_data$y[9]\n\n# filter\nfilter <- ts_fil_hp(lambda = 100*(26)^2) #frequency assumed to be 26\nfilter <- fit(filter, sin_data$y)\ny <- transform(filter, sin_data$y)\n\n# plot\nplot_ts_pred(y=sin_data$y, yadj=y)\n\n\n"} {"package":"tspredit","topic":"ts_fil_kalman","snippet":"### Name: ts_fil_kalman\n### Title: Kalman Filter\n### Aliases: ts_fil_kalman\n\n### ** Examples\n\n# time series with noise\nlibrary(daltoolbox)\ndata(sin_data)\nsin_data$y[9] <- 2*sin_data$y[9]\n\n# filter\nfilter <- ts_fil_kalman()\nfilter <- fit(filter, sin_data$y)\ny <- transform(filter, sin_data$y)\n\n# plot\nplot_ts_pred(y=sin_data$y, yadj=y)\n\n\n"} {"package":"tspredit","topic":"ts_fil_lowess","snippet":"### Name: ts_fil_lowess\n### Title: Lowess Smoothing\n### Aliases: ts_fil_lowess\n\n### ** Examples\n\n# time series with noise\nlibrary(daltoolbox)\ndata(sin_data)\nsin_data$y[9] <- 2*sin_data$y[9]\n\n# filter\nfilter <- ts_fil_lowess(f = 0.2)\nfilter <- fit(filter, sin_data$y)\ny <- transform(filter, sin_data$y)\n\n# plot\nplot_ts_pred(y=sin_data$y, yadj=y)\n\n\n"} {"package":"tspredit","topic":"ts_fil_ma","snippet":"### Name: ts_fil_ma\n### Title: Time Series Moving Average\n### Aliases: ts_fil_ma\n\n### ** Examples\n\n# time series with noise\nlibrary(daltoolbox)\ndata(sin_data)\nsin_data$y[9] <- 2*sin_data$y[9]\n# convert to sliding windows\nts <- ts_data(sin_data$y, 10)\nts_head(ts, 3)\nsummary(ts[,10])\n\n# filter\nfilter <- ts_fil_ma(3)\nfilter <- fit(filter, sin_data$y)\ny <- transform(filter, sin_data$y)\n\n# plot\nplot_ts_pred(y=sin_data$y, yadj=y)\n\n\n"} {"package":"tspredit","topic":"ts_fil_none","snippet":"### Name: ts_fil_none\n### Title: no filter\n### Aliases: ts_fil_none\n\n### ** Examples\n\nlibrary(daltoolbox)\ndata(sin_data)\n\n#convert to sliding windows\nxw <- ts_data(sin_data$y, 10)\n\n#no data filter\nfiltering <- ts_fil_none()\nfiltering <- fit(filtering, xw)\nxa <- transform(filtering, xw)\nts_head(xa)\n\n\n"} {"package":"tspredit","topic":"ts_fil_qes","snippet":"### Name: ts_fil_qes\n### Title: Quadratic Exponential Smoothing\n### Aliases: ts_fil_qes\n\n### ** Examples\n\n# time series with noise\nlibrary(daltoolbox)\ndata(sin_data)\nsin_data$y[9] <- 2*sin_data$y[9]\n# convert to sliding windows\nts <- ts_data(sin_data$y, 10)\nts_head(ts, 3)\nsummary(ts[,10])\n\n# filter\nfilter <- ts_fil_qes()\nfilter <- fit(filter, sin_data$y)\ny <- transform(filter, sin_data$y)\n\n# plot\nplot_ts_pred(y=sin_data$y, yadj=y)\n\n\n"} {"package":"tspredit","topic":"ts_fil_recursive","snippet":"### Name: ts_fil_recursive\n### Title: Recursive Filter\n### Aliases: ts_fil_recursive\n\n### ** Examples\n\n# time series with noise\nlibrary(daltoolbox)\ndata(sin_data)\nsin_data$y[9] <- 2*sin_data$y[9]\n\n# filter\nfilter <- ts_fil_recursive(filter = 0.05)\nfilter <- fit(filter, sin_data$y)\ny <- transform(filter, sin_data$y)\n\n# plot\nplot_ts_pred(y=sin_data$y, yadj=y)\n\n\n"} {"package":"tspredit","topic":"ts_fil_seas_adj","snippet":"### Name: ts_fil_seas_adj\n### Title: Seasonal Adjustment\n### Aliases: ts_fil_seas_adj\n\n### ** Examples\n\n# time series with noise\nlibrary(daltoolbox)\ndata(sin_data)\nsin_data$y[9] <- 2*sin_data$y[9]\n\n# filter\nfilter <- ts_fil_seas_adj(frequency = 26)\nfilter <- fit(filter, sin_data$y)\ny <- transform(filter, sin_data$y)\n\n# plot\nplot_ts_pred(y=sin_data$y, yadj=y)\n\n\n"} {"package":"tspredit","topic":"ts_fil_ses","snippet":"### Name: ts_fil_ses\n### Title: Simple Exponential Smoothing\n### Aliases: ts_fil_ses\n\n### ** Examples\n\n# time series with noise\nlibrary(daltoolbox)\ndata(sin_data)\nsin_data$y[9] <- 2*sin_data$y[9]\n# convert to sliding windows\nts <- ts_data(sin_data$y, 10)\nts_head(ts, 3)\nsummary(ts[,10])\n\n# filter\nfilter <- ts_fil_ses()\nfilter <- fit(filter, sin_data$y)\ny <- transform(filter, sin_data$y)\n\n# plot\nplot_ts_pred(y=sin_data$y, yadj=y)\n\n\n"} {"package":"tspredit","topic":"ts_fil_smooth","snippet":"### Name: ts_fil_smooth\n### Title: Time Series Smooth\n### Aliases: ts_fil_smooth\n\n### ** Examples\n\n# time series with noise\nlibrary(daltoolbox)\ndata(sin_data)\nsin_data$y[9] <- 2*sin_data$y[9]\n\nfilter <- ts_fil_smooth()\nfilter <- fit(filter, sin_data$y)\ny <- transform(filter, sin_data$y)\n\n# plot\nplot_ts_pred(y=sin_data$y, yadj=y)\n\n\n"} {"package":"tspredit","topic":"ts_fil_spline","snippet":"### Name: ts_fil_spline\n### Title: Smoothing Splines\n### Aliases: ts_fil_spline\n\n### ** Examples\n\n# time series with noise\nlibrary(daltoolbox)\ndata(sin_data)\nsin_data$y[9] <- 2*sin_data$y[9]\n\n# filter\nfilter <- ts_fil_spline(spar = 0.5)\nfilter <- fit(filter, sin_data$y)\ny <- transform(filter, sin_data$y)\n\n# plot\nplot_ts_pred(y=sin_data$y, yadj=y)\n\n\n"} {"package":"tspredit","topic":"ts_fil_winsor","snippet":"### Name: ts_fil_winsor\n### Title: Winsorization of Time Series\n### Aliases: ts_fil_winsor\n\n### ** Examples\n\n# time series with noise\nlibrary(daltoolbox)\ndata(sin_data)\nsin_data$y[9] <- 2*sin_data$y[9]\n# convert to sliding windows\nts <- ts_data(sin_data$y, 10)\nts_head(ts, 3)\nsummary(ts[,10])\n\n# filter\nfilter <- ts_fil_winsor()\nfilter <- fit(filter, sin_data$y)\ny <- transform(filter, sin_data$y)\n\n# plot\nplot_ts_pred(y=sin_data$y, yadj=y)\n\n\n"} {"package":"tspredit","topic":"ts_maintune","snippet":"### Name: ts_maintune\n### Title: Time Series Tune\n### Aliases: ts_maintune\n\n### ** Examples\n\nlibrary(daltoolbox)\ndata(sin_data)\nts <- ts_data(sin_data$y, 10)\n\nsamp <- ts_sample(ts, test_size = 5)\nio_train <- ts_projection(samp$train)\nio_test <- ts_projection(samp$test)\n\ntune <- ts_maintune(input_size=c(3:5), base_model = ts_elm(), preprocess = list(ts_norm_gminmax()))\nranges <- list(nhid = 1:5, actfun=c('purelin'))\n\n# Generic model tunning\nmodel <- fit(tune, x=io_train$input, y=io_train$output, ranges)\n\nprediction <- predict(model, x=io_test$input[1,], steps_ahead=5)\nprediction <- as.vector(prediction)\noutput <- as.vector(io_test$output)\n\nev_test <- evaluate(model, output, prediction)\nev_test\n\n\n"} {"package":"tspredit","topic":"ts_norm_none","snippet":"### Name: ts_norm_none\n### Title: no normalization\n### Aliases: ts_norm_none\n\n### ** Examples\n\nlibrary(daltoolbox)\ndata(sin_data)\n\n#convert to sliding windows\nxw <- ts_data(sin_data$y, 10)\n\n#no data normalization\nnormalize <- ts_norm_none()\nnormalize <- fit(normalize, xw)\nxa <- transform(normalize, xw)\nts_head(xa)\n\n\n"} {"package":"xnet","topic":"create_grid","snippet":"### Name: create_grid\n### Title: Create a grid of values for tuning tskrr\n### Aliases: create_grid\n\n### ** Examples\n\ncreate_grid(lim = c(1e-4, 1), ngrid = 5)\n\n\n\n"} {"package":"xnet","topic":"dim,tskrr-method","snippet":"### Name: dim,tskrr-method\n### Title: Get the dimensions of a tskrr object\n### Aliases: dim,tskrr-method dim.tskrr\n\n### ** Examples\n\ndata(drugtarget)\nmod <- tskrr(drugTargetInteraction, targetSim, drugSim)\ndim(mod)\nnrow(mod)\nncol(mod)\n\n\n\n"} {"package":"xnet","topic":"fitted.tskrr","snippet":"### Name: fitted.tskrr\n### Title: extract the predictions\n### Aliases: fitted.tskrr fitted.linearFilter fitted,tskrr-method\n### fitted,linearFilter-method\n\n### ** Examples\n\n\ndata(drugtarget)\n\nmod <- tskrr(drugTargetInteraction, targetSim, drugSim)\npred <- fitted(mod)\n\n\n\n"} {"package":"xnet","topic":"permutations","snippet":"### Name: permutations\n### Title: Getters for permtest objects\n### Aliases: permutations Extract-permtest [,permtest-method\n\n### ** Examples\n\n\ndata(drugtarget)\n\nmod <- tskrr(drugTargetInteraction, targetSim, drugSim)\nptest <- permtest(mod, fun = loss_auc)\n\nloss(ptest)\nptest[c(2,3)]\npermutations(ptest)\n\n\n\n"} {"package":"xnet","topic":"response,tskrr-method","snippet":"### Name: response,tskrr-method\n### Title: Getters for tskrr objects\n### Aliases: response,tskrr-method response lambda,tskrrHomogeneous-method\n### lambda,tskrrHeterogeneous-method lambda is_tskrr is_homogeneous\n### is_heterogeneous symmetry get_eigen get_kernelmatrix has_hat\n### get_kernel\n\n### ** Examples\n\ndata(drugtarget)\n\nmod <- tskrr(drugTargetInteraction, targetSim, drugSim)\nis_homogeneous(mod)\n\nEigR <- get_eigen(mod)\nEigC <- get_eigen(mod, which = 'column')\nlambda(mod)\n\n\n\n"} {"package":"xnet","topic":"has_imputed_values","snippet":"### Name: has_imputed_values\n### Title: Getters for tskrrImpute objects\n### Aliases: has_imputed_values which_imputed is_imputed\n\n### ** Examples\n\n\ndata(drugtarget)\n\nmod <- tskrr(drugTargetInteraction, targetSim, drugSim)\n\nnaid <- sample(length(drugTargetInteraction), 30)\ndrugTargetInteraction[naid] <- NA\n\nimpmod <- impute_tskrr(drugTargetInteraction, targetSim, drugSim)\n\nhas_imputed_values(mod)\nhas_imputed_values(impmod)\n\n# For illustration: extract imputed values\nid <- is_imputed(impmod)\nfitted(impmod)[id]\n\n\n\n"} {"package":"xnet","topic":"is_tuned","snippet":"### Name: is_tuned\n### Title: Getters for tskrrTune objects\n### Aliases: is_tuned get_grid get_loss_values has_onedim\n\n### ** Examples\n\n\ndata(drugtarget)\n\nmod <- tskrr(drugTargetInteraction, targetSim, drugSim)\ntuned <- tune(mod, ngrid = 10)\n\nis_tuned(mod)\nis_tuned(tuned)\n\n# Basic visualization of the grid.\n\ngridvals <- get_grid(tuned)\nz <- get_loss_values(tuned)\n\n## Not run: \n##D image(gridvals$k,gridvals$g,log(z), log = 'xy',\n##D xlab = \"lambda k\", ylab = \"lambda g\")\n## End(Not run)\n\n\n\n"} {"package":"xnet","topic":"alpha","snippet":"### Name: alpha\n### Title: Getters for linearFilter objects\n### Aliases: alpha na_removed getters_linearFilter mean.linearFilter\n### mean,linearFilter-method colMeans,linearFilter-method\n### rowMeans,linearFilter-method alpha,linearFilter-method\n### na_removed,linearFilter-method\n\n### ** Examples\n\ndata(drugtarget)\nlf <- linear_filter(drugTargetInteraction, alpha = 0.25)\nalpha(lf)\nmean(lf)\ncolMeans(lf)\nna_removed(lf)\n\n\n\n"} {"package":"xnet","topic":"impute_tskrr","snippet":"### Name: impute_tskrr\n### Title: Impute missing values in a label matrix\n### Aliases: impute_tskrr\n\n### ** Examples\n\n\ndata(drugtarget)\n\nnaid <- sample(length(drugTargetInteraction), 30)\ndrugTargetInteraction[naid] <- NA\n\nimpute_tskrr(drugTargetInteraction, targetSim, drugSim)\n\n\n\n"} {"package":"xnet","topic":"impute_tskrr.fit","snippet":"### Name: impute_tskrr.fit\n### Title: Impute values based on a two-step kernel ridge regression\n### Aliases: impute_tskrr.fit\n\n### ** Examples\n\n\ndata(drugtarget)\n\nK <- eigen(targetSim)\nG <- eigen(drugSim)\n\nHk <- eigen2hat(K$vectors, K$values, lambda = 0.01)\nHg <- eigen2hat(G$vectors, G$values, lambda = 0.05)\n\ndrugTargetInteraction[c(3,17,123)] <- NA\n\nres <- impute_tskrr.fit(drugTargetInteraction, Hk, Hg,\n niter = 1000, tol = 10e-10,\n start = 0, verbose = FALSE)\n\n\n\n"} {"package":"xnet","topic":"is_symmetric","snippet":"### Name: is_symmetric\n### Title: Test symmetry of a matrix\n### Aliases: is_symmetric\n\n### ** Examples\n\nx <- matrix(1:16,ncol = 4)\nis_symmetric(x)\n\nx <- x %*% t(x)\nis_symmetric(x)\n\n\n\n"} {"package":"xnet","topic":"linear_filter","snippet":"### Name: linear_filter\n### Title: Fit a linear filter over a label matrix\n### Aliases: linear_filter\n\n### ** Examples\n\ndata(drugtarget)\nlinear_filter(drugTargetInteraction, alpha = 0.25)\nlinear_filter(drugTargetInteraction, alpha = c(0.1,0.1,0.4,0.4))\n\n\n\n"} {"package":"xnet","topic":"loo","snippet":"### Name: loo\n### Title: Leave-one-out cross-validation for tskrr\n### Aliases: loo loo,tskrrHeterogeneous-method loo,tskrrHomogeneous-method\n### loo,linearFilter-method\n\n### ** Examples\n\ndata(drugtarget)\n\nmod <- tskrr(drugTargetInteraction, targetSim, drugSim,\n lambda = c(0.01,0.01))\n\ndelta <- loo(mod, exclusion = 'both') - response(mod)\ndelta0 <- loo(mod, replaceby0 = TRUE) - response(mod)\n\n\n\n"} {"package":"xnet","topic":"loss","snippet":"### Name: loss\n### Title: Calculate or extract the loss of a tskrr model\n### Aliases: loss loss,tskrr-method loss,tskrrTune-method\n### loss,permtest-method\n\n### ** Examples\n\ndata(drugtarget)\n\nmod <- tskrr(drugTargetInteraction, targetSim, drugSim)\n\nloss(mod, fun = loss_auc)\n\ntuned <- tune(mod, fun = loss_auc)\n\nloss(tuned)\nloss(tuned, fun = loss_mse)\n\n\n\n"} {"package":"xnet","topic":"loss_functions","snippet":"### Name: loss_functions\n### Title: loss functions\n### Aliases: loss_functions loss_mse loss_auc\n\n### ** Examples\n\n\nx <- c(1,0,0,1,0,0,1,0,1)\ny <- c(0.8,-0.1,0.2,0.2,0.4,0.01,1.12,0.9,0.9)\nloss_mse(x,y)\nloss_auc(x,y)\n\n\n\n"} {"package":"xnet","topic":"match_labels","snippet":"### Name: match_labels\n### Title: Reorder the label matrix\n### Aliases: match_labels\n\n### ** Examples\n\nmat <- matrix(1:6, ncol = 2,\n dimnames = list(c(\"b\", \"a\", \"d\"),\n c(\"ca\", \"cb\"))\n )\n\nmatch_labels(mat, c(\"a\",\"b\", \"d\"), c(\"ca\",\"cb\"))\n\n#Using matrices\ndata(drugtarget)\nout <- match_labels(drugTargetInteraction, targetSim, drugSim)\n\n\n\n"} {"package":"xnet","topic":"permtest","snippet":"### Name: permtest\n### Title: Calculate the relative importance of the edges\n### Aliases: permtest print.permtest permtest,tskrrHeterogeneous-method\n### permtest,tskrrHomogeneous-method permtest,tskrrTune-method\n\n### ** Examples\n\n\n# Heterogeneous network\n\ndata(drugtarget)\n\nmod <- tskrr(drugTargetInteraction, targetSim, drugSim)\npermtest(mod, fun = loss_auc)\n\n\n\n"} {"package":"xnet","topic":"plot.tskrr","snippet":"### Name: plot.tskrr\n### Title: plot a heatmap of the predictions from a tskrr model\n### Aliases: plot.tskrr\n\n### ** Examples\n\ndata(drugtarget)\nmod <- tskrr(drugTargetInteraction, targetSim, drugSim)\n\nplot(mod)\nplot(mod, dendro = \"row\", legend = FALSE)\nplot(mod, col = rainbow(20), dendro = \"none\", which = \"residuals\")\nplot(mod, labCol = NA, labRow = NA, margins = c(0.2,0.2))\n\n\n\n"} {"package":"xnet","topic":"plot_grid","snippet":"### Name: plot_grid\n### Title: Plot the grid of a tuned tskrr model\n### Aliases: plot_grid\n\n### ** Examples\n\n\ndata(drugtarget)\n\n## One dimensional tuning\ntuned1d <- tune(drugTargetInteraction, targetSim, drugSim,\n lim = c(1e-4,2), ngrid = 40,\n fun = loss_auc, onedim = TRUE)\n\nplot_grid(tuned1d)\nplot_grid(tuned1d, lambdapars = list(col = \"green\",\n lty = 1, lwd = 2),\n log = FALSE, las = 2, main = \"1D tuning\")\n\n## Two dimensional tuning\ntuned2d <- tune(drugTargetInteraction, targetSim, drugSim,\n lim = c(1e-4,10), ngrid = 20,\n fun = loss_auc)\n\nplot_grid(tuned2d)\n\n\n\n"} {"package":"xnet","topic":"predict.tskrr","snippet":"### Name: predict.tskrr\n### Title: predict method for tskrr fits\n### Aliases: predict.tskrr predict,tskrr-method\n\n### ** Examples\n\n\n## Predictions for homogeneous networks\n\ndata(proteinInteraction)\n\nidnew <- sample(nrow(Kmat_y2h_sc), 20)\n\ntrainY <- proteinInteraction[-idnew,-idnew]\ntrainK <- Kmat_y2h_sc[-idnew,-idnew]\n\ntestK <- Kmat_y2h_sc[idnew, - idnew]\n\nmod <- tskrr(trainY, trainK, lambda = 0.1)\n# Predict interaction between test vertices\npredict(mod, testK, testK)\n\n# Predict interaction between test and train vertices\npredict(mod, testK)\npredict(mod, g = testK)\n\n## Predictions for heterogeneous networks\ndata(\"drugtarget\")\n\nidnewK <- sample(nrow(targetSim), 10)\nidnewG <- sample(ncol(drugSim), 10)\n\ntrainY <- drugTargetInteraction[-idnewK, -idnewG]\ntrainK <- targetSim[-idnewK, -idnewK]\ntrainG <- drugSim[-idnewG, -idnewG]\n\ntestK <- targetSim[idnewK, -idnewK]\ntestG <- drugSim[idnewG, -idnewG]\n\nmod <- tskrr(trainY, trainK, trainG, lambda = 0.01)\n\n# Predictions for new targets on drugs in model\npredict(mod, testK)\n# Predictions for new drugs on targets in model\npredict(mod, g = testG)\n# Predictions for new drugs and targets\npredict(mod, testK, testG)\n\n\n\n"} {"package":"xnet","topic":"residuals","snippet":"### Name: residuals\n### Title: calculate residuals from a tskrr model\n### Aliases: residuals residuals.tskrr residuals,tskrr-method\n\n### ** Examples\n\n\ndata(drugtarget)\nmod <- tskrr(drugTargetInteraction, targetSim, drugSim,\n lambda = c(0.01,0.01))\ndelta <- response(mod) - loo(mod, exclusion = \"both\")\nresid <- residuals(mod, method = \"loo\", exclusion = \"both\")\nall.equal(delta, resid)\n\n\n\n"} {"package":"xnet","topic":"test_symmetry","snippet":"### Name: test_symmetry\n### Title: test the symmetry of a matrix\n### Aliases: test_symmetry\n\n### ** Examples\n\nmat1 <- matrix(c(1,0,0,1),ncol = 2)\ntest_symmetry(mat1)\nmat2 <- matrix(c(1,0,0,-1), ncol = 2)\ntest_symmetry(mat2)\nmat3 <- matrix(1:4, ncol = 2)\ntest_symmetry(mat3)\n\n\n\n"} {"package":"xnet","topic":"tskrr","snippet":"### Name: tskrr\n### Title: Fitting a two step kernel ridge regression\n### Aliases: tskrr\n\n### ** Examples\n\n\n# Heterogeneous network\n\ndata(drugtarget)\n\nmod <- tskrr(drugTargetInteraction, targetSim, drugSim)\n\nY <- response(mod)\npred <- fitted(mod)\n\n# Homogeneous network\n\ndata(proteinInteraction)\n\nmodh <- tskrr(proteinInteraction, Kmat_y2h_sc)\n\nYh <- response(modh)\npred <- fitted(modh)\n\n\n\n"} {"package":"xnet","topic":"tskrr.fit","snippet":"### Name: tskrr.fit\n### Title: Carry out a two-step kernel ridge regression\n### Aliases: tskrr.fit\n\n### ** Examples\n\n\ndata(drugtarget)\n\nK <- eigen(targetSim)\nG <- eigen(drugSim)\n\nres <- tskrr.fit(drugTargetInteraction,K,G,\n lambda.k = 0.01, lambda.g = 0.05)\n\n\n\n"} {"package":"xnet","topic":"tune","snippet":"### Name: tune\n### Title: tune the lambda parameters for a tskrr\n### Aliases: tune tune,tskrrHomogeneous-method\n### tune,tskrrHeterogeneous-method tune,matrix-method\n\n### ** Examples\n\ndata(drugtarget)\n\nmod <- tskrr(drugTargetInteraction, targetSim, drugSim)\ntuned <- tune(mod, lim = c(0.1,1), ngrid = list(5,10),\n fun = loss_auc)\n\n## Not run: \n##D \n##D # This is just some visualization of the matrix\n##D # It can be run safely.\n##D gridvals <- get_grid(tuned)\n##D z <- get_loss_values(tuned) # loss values\n##D \n##D image(gridvals$k,gridvals$g,z, log = 'xy',\n##D xlab = \"lambda k\", ylab = \"lambda g\",\n##D col = rev(heat.colors(20)))\n##D \n## End(Not run)\n\n\n"} {"package":"xnet","topic":"update","snippet":"### Name: update\n### Title: Update a tskrr object with a new lambda\n### Aliases: update update,tskrrHomogeneous-method\n### update,tskrrHeterogeneous-method\n\n### ** Examples\n\ndata(drugtarget)\n\nmod <- tskrr(drugTargetInteraction, targetSim, drugSim)\n\n# Update with the same lambda\nmod2 <- update(mod, lambda = 1e-3)\n\n# Use different lambda for rows and columns\nmod3 <- update(mod, lambda = c(0.01,0.001))\n\n# A model with the hat matrices stored\nlambda <- c(0.001,0.01)\nmodkeep <- tskrr(drugTargetInteraction, targetSim, drugSim, keep = TRUE)\nHk_1 <- hat(modkeep, which = \"row\")\nmodkeep2 <- update(modkeep, lambda = lambda)\nHk_2 <- hat(modkeep2, which = \"row\")\n\n# Calculate new hat matrix by hand:\ndecomp <- get_eigen(modkeep, which = \"row\")\nHk_byhand <- eigen2hat(decomp$vectors,\n decomp$values,\n lambda = lambda[1])\nidentical(Hk_2, Hk_byhand)\n\n\n\n"} {"package":"lookup","topic":"lookup","snippet":"### Name: lookup\n### Title: Lookup items in key-value pairs of vectors\n### Aliases: lookup\n\n### ** Examples\n\n# Example 1. A and B have different factor levels\nA <- factor(c(\"A\",\"E\",\"F\"))\nB <- factor(c(\"E\",\"F\",\"G\"))\nv <- c(4,2,0)\nlookup(A,B,v)\n\n# Example 2. Merge treatment means back into the raw data\ndat <- data.frame(Trt = rep(LETTERS[1:5],2),\n x=round(rnorm(10),2))\n# Treatment B is missing all values, treatment D missing one value\ndat$x[dat$Trt==\"B\"] <- NA\ndat$x[4] <- NA\n# Calculate treatment means\nTrtMean <- tapply(dat$x, dat$Trt, mean, na.rm=TRUE)\nTrtMean\n# Merge the means into the original data\ndat$TrtMean <- lookup(dat$Trt, names(TrtMean), TrtMean)\n\n\n\n"} {"package":"lookup","topic":"vlookup","snippet":"### Name: vlookup\n### Title: Lookup items in key-value dataframe similar to Excel's vlookup\n### function\n### Aliases: vlookup\n\n### ** Examples\n\n# Example 1. A and B have different factor levels\nA <- factor(c(\"A\",\"E\",\"F\"))\ndat <- data.frame(trt = factor(c(\"E\",\"F\",\"G\")),\n val = c(4,2,0))\nvlookup(A,dat, \"trt\", \"val\")\n\n\n\n"} {"package":"twostageTE","topic":"RVforLR_realizations","snippet":"### Name: RVforLR_realizations\n### Title: Realizations of Random variable for LR-based confidence\n### intervals\n### Aliases: RVforLR_realizations\n### Keywords: datasets\n\n### ** Examples\n\ndata(\"RVforLR_realizations\")\n\n\n"} {"package":"twostageTE","topic":"chernoff_realizations","snippet":"### Name: chernoff_realizations\n### Title: Quantiles of the Chernoff Random Variable\n### Aliases: chernoff_realizations\n### Keywords: datasets\n\n### ** Examples\n\ndata(chernoff_realizations)\n\n\n"} {"package":"twostageTE","topic":"estimateDeriv","snippet":"### Name: estimateDeriv\n### Title: Derivative Estimation\n### Aliases: estimateDeriv\n\n### ** Examples\n\nexplanatory = runif(50)\nresponse = explanatory^2 + rnorm(50, sd=0.1)\nestimateDeriv(explanatory, response, d_0=0.5,\n sigmaSq=estimateSigmaSq(explanatory, response)$sigmaSq) \n\n## The function is currently defined as\nfunction (explanatory, response, d_0, sigmaSq) \n{\n deriv_estimateHelper <- function(explanatory, response, d_0, \n sigmaSq) {\n n = length(response)\n p = 5\n X = matrix(0, n, p)\n for (i in 1:p) {\n X[, i] = (explanatory - d_0)^i\n }\n beta_hat = lm(response ~ 0 + X)$coef\n h = 0\n for (i in (p - 1):(p + 1)) {\n j = i - p + 2\n h = h + beta_hat[i - 1] * factorial(j) * d_0^(j - \n 1)\n }\n return(2.275 * (sigmaSq/h^2)^(1/7) * n^(-1/7))\n }\n n = length(response)\n p = 2\n X = matrix(0, n, p)\n X[, 1] = (explanatory - d_0)\n X[, 2] = (explanatory - d_0)^2\n bw_opt = deriv_estimateHelper(explanatory, response, d_0, \n sigmaSq)\n W = 0.75/bw_opt * sapply(1 - ((explanatory - d_0)/bw_opt)^2, \n max, 0)\n while (sum(W > 1) <= 1 & bw_opt <= max(explanatory) - min(explanatory)) {\n bw_opt = bw_opt * 2\n W = 0.75/bw_opt * sapply(1 - ((explanatory - d_0)/bw_opt)^2, \n max, 0)\n }\n beta_hat = lm(response ~ 0 + X, weight = W)$coef\n while (beta_hat[1] <= 0 & bw_opt <= max(explanatory) - min(explanatory)) {\n bw_opt = bw_opt * 2\n W = 0.75/bw_opt * sapply(1 - ((explanatory - d_0)/bw_opt)^2, \n max, 0)\n beta_hat = lm(response ~ 0 + X, weight = W)$coef\n }\n if (beta_hat[1] <= 0) {\n warning(\"deriv_estimate:WARNING: NEGATIVE DERIVATIVE HAS BEEN ESTIMATED\", \n .call = FALSE)\n return(1/log(n))\n }\n return(beta_hat[1])\n }\n\n\n"} {"package":"twostageTE","topic":"estimateSigmaSq","snippet":"### Name: estimateSigmaSq\n### Title: Estimate Variance\n### Aliases: estimateSigmaSq\n\n### ** Examples\n\nexplanatory = runif(50)\nresponse = explanatory^2 + rnorm(50, sd=0.1)\nestimateSigmaSq(explanatory, response)\n\n## The function is currently defined as\nfunction (explanatory, response) \n{\n ind = order(explanatory, decreasing = FALSE)\n if (sum(diff(ind) < 0) != 0) {\n explanatory = explanatory[ind]\n response = response[ind]\n }\n n = length(response)\n a = b = eps = rep(0, n - 2)\n for (i in 2:(n - 1)) {\n x = explanatory[(i - 1):(i + 1)]\n a[i - 1] = (x[3] - x[2])/(x[3] - x[1])\n b[i - 1] = (x[2] - x[1])/(x[3] - x[1])\n eps[i - 1] = a[i - 1] * response[i - 1] + b[i - 1] * \n response[i + 1] - response[i]\n }\n cSq = 1/(a^2 + b^2 + 1)\n list(sigmaSq = 1/(n - 2) * sum(cSq * eps^2), a = a, b = b, \n eps = eps)\n }\n\n\n"} {"package":"twostageTE","topic":"likelihoodConfidenceInterval","snippet":"### Name: likelihoodConfidenceInterval\n### Title: Likelihood ratio based confidence intervals\n### Aliases: likelihoodConfidenceInterval\n\n### ** Examples\n\nX=runif(25, 0,1)\nY=X^2+rnorm(n=length(X), sd=0.1)\noneStage_LR=likelihoodConfidenceInterval(X, Y, 0.25, 0.95)\n\n## The function is currently defined as\nfunction (explanatory, response, Y_0, level = NA) \n{\n if (is.na(level)) \n level = 0.95\n RVforLR_realizations <- NULL; rm(RVforLR_realizations); # Dummy to trick R CMD check \n data(\"RVforLR_realizations\", envir =environment())\n D = quantile(RVforLR_realizations, level)\n n = length(response)\n ind = order(explanatory, decreasing=FALSE)\n if (sum(diff(ind) < 0) != 0) {\n\texplanatory = explanatory[ind]\n\tresponse = response[ind]\n }\n fit = threshold_estimate_ir(explanatory, response, Y_0)\n sigmaSq = estimateSigmaSq(explanatory, response)$sigmaSq\n likelihoodRatio <- function(explanatory, response, X_0, Y_0, \n sigmaSq) {\n logLikelihood <- function(Y, Y_hat) {\n -1/(2 * sigmaSq) * sum((Y - Y_hat)^2)\n }\n unconstrainedLikelihood <- function(explanatory, response) {\n fit = pava(explanatory, response)\n tmp = logLikelihood(fit$response_obs, fit$y)\n return(list(x = fit$x, y_hat = fit$y, y = fit$response_obs, \n logLikelihood = tmp))\n }\n constrainedLikelihood <- function(explanatory, response, \n X_0, Y_0) {\n fit = pava(explanatory, response, X_0, Y_0)\n tmp = logLikelihood(fit$response_obs, fit$y)\n return(list(x = fit$x, y_hat = fit$y, y = fit$response_obs, \n logLikelihood = tmp))\n }\n unconst = unconstrainedLikelihood(explanatory, response)\n const = constrainedLikelihood(explanatory, response, \n X_0, Y_0)\n return(unconst$logLikelihood - const$logLikelihood)\n }\n i = fit$index + 1\n lrt_tmp = 0\n while (lrt_tmp < D && i < n) {\n lrt_tmp = likelihoodRatio(explanatory, response, explanatory[i], \n Y_0, sigmaSq)\n i = i + 1\n }\n right = explanatory[min(i, n)]\n i = fit$index - 1\n lrt_tmp = 0\n while (lrt_tmp < D && i > 1) {\n lrt_tmp = likelihoodRatio(explanatory, response, explanatory[i], \n Y_0, sigmaSq)\n i = i - 1\n }\n left = explanatory[max(i, 1)]\n return(list(estimate = fit$threshold_estimate_explanatory, \n lower = left, upper = right, sigmaSq = sigmaSq, deriv_d0 = NA))\n }\n\n\n"} {"package":"twostageTE","topic":"linearBootstrapConfidenceInterval_stageTwo","snippet":"### Name: linearBootstrapConfidenceInterval_stageTwo\n### Title: Confidence interval based on bootstrapping a local linear model\n### Aliases: linearBootstrapConfidenceInterval_stageTwo\n\n### ** Examples\n\nX=runif(25, 0,1)\nY=X^2+rnorm(n=length(X), sd=0.1)\noneStage_IR=stageOneAnalysis(X, Y, 0.25, type=\"IR-wald\", 0.99)\nX2 = c(rep(oneStage_IR$L1,37),rep(oneStage_IR$U1,38))\nY2=X2^2+rnorm(n=length(X2), sd=0.1)\ntwoStage_IR_locLinear=likelihoodConfidenceInterval(X, Y, 0.25, 0.95)\n\n## The function is currently defined as\nfunction (explanatory, response, Y_0, level = NA) \n{\n numBootstrap = 1000\n if (is.na(level)) {\n level = 0.95\n }\n alpha = 1 - level\n n = length(response)\n fit = threshold_estimate_locLinear(explanatory, response, \n Y_0)\n Rn = rep(0, numBootstrap)\n for (i in 1:numBootstrap) {\n ind = sample(x = n, replace = TRUE)\n fit_bst = threshold_estimate_locLinear(explanatory[ind], \n response[ind], Y_0)\n Rn[i] = sqrt(n) * (fit_bst$threshold_estimate_explanatory - \n fit$threshold_estimate_explanatory)\n }\n qU = quantile(Rn, alpha/2)\n qL = quantile(Rn, level + alpha/2)\n uBand = fit$threshold_estimate_explanatory - n^(-1/2) * qU\n lBand = fit$threshold_estimate_explanatory - n^(-1/2) * qL\n return(list(estimate = fit$threshold_estimate_explanatory, \n lower = max(lBand, min(explanatory)), upper = min(uBand, \n max(explanatory)), sigmaSq = NA, deriv_d0 = NA))\n }\n\n\n"} {"package":"twostageTE","topic":"pava","snippet":"### Name: pava\n### Title: isotonic regression\n### Aliases: pava\n\n### ** Examples\n\nX=runif(25, 0,1)\nY=X^2+rnorm(n=length(X), sd=0.1)\npava(X, Y, 0.25, 0.5)\n\n## The function is currently defined as\nfunction (explanatory, response, X_0 = NA, Y_0 = NA, w = NA) \n{\n require(isotone)\n if (is.na(w)) \n w = rep(1, length(explanatory))\n ind = order(explanatory, decreasing = FALSE)\n if (sum(diff(ind) < 0) != 0) {\n explanatory = explanatory[ind]\n response = response[ind]\n }\n if (is.na(X_0) && is.na(Y_0)) {\n fit = gpava(explanatory, response)\n response_fit = fit$x\n }\n else if (is.na(X_0) || is.na(Y_0)) {\n warning(\"Only X_0 or only Y_0 was supplied. Please check arguments.\")\n }\n else {\n n = length(explanatory)\n if (sum(response < Y_0) == n && sum(explanatory < X_0) == \n n) {\n warning(\"Warning: X_0 and Y_0 are outside observed region\")\n fit = gpava(explanatory, response)\n response_fit = fit$x\n }\n else if (sum(response < Y_0) == n && sum(explanatory < \n X_0) == 0) {\n warning(\"Warning: X_0 and Y_0 are outside observed region\")\n return(list(x = explanatory, y = rep(Y_0, n), y_compressed = rep(Y_0, \n n)))\n }\n else if (sum(response < Y_0) == n) {\n warning(\"Warning: Y_0 is outside observed region\")\n n2 = n - sum(explanatory < X_0)\n y1 = response[explanatory < X_0]\n x1 = explanatory[explanatory < X_0]\n fit = gpava(x1, y1)\n response_fit = c(sapply(fit$x, min, Y_0), rep(Y_0, \n n2))\n }\n else if (sum(response >= Y_0) == n && sum(explanatory < \n X_0) == n) {\n warning(\"Warning: X_0 and Y_0 are outside observed region\")\n return(list(x = explanatory, y = rep(Y_0, n), y_compressed = rep(Y_0, \n n)))\n }\n else if (sum(response >= Y_0) == n && sum(explanatory < \n X_0) == 0) {\n warning(\"Warning: X_0 and Y_0 are outside observed region\")\n fit = gpava(explanatory, response)\n response_fit = fit$x\n }\n else if (sum(response >= Y_0) == n) {\n warning(\"Warning: Y_0 is outside observed region\")\n n2 = n - sum(explanatory > X_0)\n y1 = response[explanatory > X_0]\n x1 = explanatory[explanatory > X_0]\n fit = gpava(x1, y1)\n response_fit = c(rep(Y_0, n2), sapply(fit$x, max, \n Y_0))\n }\n else if (sum(explanatory < X_0) == n) {\n warning(\"Warning: X_0 is outside observed region\")\n fit = gpava(explanatory, response)\n response_fit = sapply(fit$x, min, Y_0)\n }\n else if (sum(explanatory < X_0) == 0) {\n warning(\"Warning: X_0 is outside observed region\")\n fit = gpava(explanatory, response)\n response_fit = sapply(fit$x, max, Y_0)\n }\n else {\n y1 = response[explanatory < X_0]\n x1 = explanatory[explanatory < X_0]\n y2 = response[explanatory >= X_0]\n x2 = explanatory[explanatory >= X_0]\n fit1 = gpava(x1, y1)\n fit2 = gpava(x2, y2)\n response_fit = c(sapply(fit1$x, min, Y_0), sapply(fit2$x, \n max, Y_0))\n }\n }\n return(list(x = explanatory, y = response_fit, response_obs = response))\n }\n\n\n"} {"package":"twostageTE","topic":"plot.twostageTE","snippet":"### Name: plot.twostageTE\n### Title: Plot function for twostageTE\n### Aliases: plot.twostageTE plot\n\n### ** Examples\n\nX=runif(25, 0,1)\nY=X^2+rnorm(n=length(X), sd=0.1)\noneStage_IR=stageOneAnalysis(X, Y, 0.25, type=\"IR-wald\", 0.99)\nplot(oneStage_IR)\n\n## The function is currently defined as\nfunction (x, ...) \n{\n if (!inherits(x, \"twostageTE\")) {\n stop(\"Error: Object is not of class twostageTE\")\n }\n plot_gpava <- function(x, main = \"PAVA Plot\", xlab = \"Predictor\", \n ylab = \"Response\", col = \"lightblue\", ...) {\n o <- order(x$z)\n xval <- x$z[o]\n yval <- x$x[o]\n xcum <- c(xval[1] - mean(diff(xval)), xval)\n jumps <- ((1:length(yval))[!duplicated(yval)] - 1)[-1]\n jumps <- c(1, jumps, length(xval))\n lines(xval, yval, col = col, lwd = 1, type = \"S\")\n points(xval[jumps], yval[jumps], col = col, pch = 13)\n }\n pava1 = gpava(z = x$X1, y = x$Y1)\n if (!is.na(x$L2)) {\n pava2 = gpava(z = x$X2, y = x$Y2)\n }\n if (!is.na(x$L2)) {\n plot(x = x$X1, y = x$Y1, pch = \"1\", cex = 1.5, xlab = \"\", \n ylab = \"\", ylim = range(c(x$Y1, x$Y2)), col = \"grey80\")\n abline(h = x$threshold, lty = 3, lwd = 1, col = 2)\n points(x = x$X2, y = x$Y2, pch = \"2\", cex = 1.5, col = \"grey65\")\n plot_gpava(pava2, col = \"blue\")\n }\n else {\n plot(x = x$X1, y = x$Y1, pch = \"1\", cex = 1.5, xlab = \"\", \n ylab = \"\", col = \"grey80\")\n abline(h = x$threshold, lty = 3, lwd = 1, col = 2)\n plot_gpava(pava1, col = 1)\n }\n abline(v = x$L1, lty = 2, lwd = 2)\n abline(v = x$U1, lty = 2, lwd = 2)\n if (!is.na(x$L2)) {\n abline(v = x$L2, col = \"blue\", lwd = 2)\n abline(v = x$U2, col = \"blue\", lwd = 2)\n }\n points(x = x$estimate, y = x$threshold, col = \"blue\", pch = 4, \n cex = 1.5)\n if (!is.na(x$L2)) {\n segments(x$estimate, min(c(x$Y1, x$Y2)) - 1, x$estimate, \n x$threshold, lwd = 2, col = \"blue\")\n }\n else {\n segments(x$estimate, min(x$Y1) - 1, x$estimate, x$threshold, \n lwd = 2, col = \"blue\")\n }\n mtext(\"Explanatory\", side = 1, line = 2.5, cex = 1.65)\n mtext(\"Response\", side = 2, line = 2, cex = 1.65)\n if (!is.na(x$L2)) {\n legend(\"topleft\", c(\"Estimate\", \"1st Stage CI\", \"2nd Stage CI\", \n \"2nd Stage Iso-Regression\"), pch = c(4, NA, NA, 13), \n col = c(\"blue\", 1, \"blue\", \"blue\"), lty = c(NA, 2, \n 1, 1), lwd = c(NA, 2, 2, 1), bg = \"white\")\n }\n else {\n legend(\"topleft\", c(\"Estimate\", \"1st Stage CI\", \"1st Stage Iso-Regression\"), \n pch = c(4, NA, 13), col = c(\"blue\", 1, 1), lty = c(NA, \n 2, 1), lwd = c(NA, 2, 1), bg = \"white\")\n }\n }\n\n\n"} {"package":"twostageTE","topic":"print.twostageTE","snippet":"### Name: print.twostageTE\n### Title: print for twostageTE\n### Aliases: print.twostageTE print\n\n### ** Examples\n\nX=runif(25, 0,1)\nY=X^2+rnorm(n=length(X), sd=0.1)\noneStage_IR=stageOneAnalysis(X, Y, 0.25, type=\"IR-wald\", 0.99)\nprint(oneStage_IR)\n\n## The function is currently defined as\nfunction (x, ...) \n{\n if (!inherits(x, \"twostageTE\")) {\n stop(\"Error: Object is not of class twostageTE\")\n }\n if (!is.null(cl <- x$call)) {\n names(cl)[2] <- \"\"\n cat(\"Call:\\n\")\n dput(cl)\n }\n cat(sprintf(\"\\n%.1f%% Confidence Interval\", x$level * 100))\n if (is.na(x$L2)) {\n cat(sprintf(\"\\nn Lower d0_hat Upper\\n%d %.3f %.3f %.3f\\n\", \n length(x$Y1), x$L1, x$estimate, x$U1))\n }\n else {\n cat(sprintf(\"\\nn1 n2 Lower d0_hat Upper\\n%d %d %.3f %.3f %.3f\\n\", \n length(x$Y1), length(x$Y2), x$L2, x$estimate, x$U2))\n }\n invisible(x)\n }\n\n\n"} {"package":"twostageTE","topic":"stageOneAnalysis","snippet":"### Name: stageOneAnalysis\n### Title: Stage one analysis\n### Aliases: stageOneAnalysis\n\n### ** Examples\n\nX=runif(25, 0,1)\nY=X^2+rnorm(n=length(X), sd=0.1)\noneStage_IR=stageOneAnalysis(X, Y, 0.25, type=\"IR-wald\", 0.99)\n\n## The function is currently defined as\nfunction (explanatory, response, threshold, type = \"IR-wald\", \n level = 0.99) \n{\n cl1 <- match.call(expand.dots = TRUE)\n if (type == \"IR-wald\") {\n CI = waldConfidenceInterval_ir_stageOne(explanatory, \n response, threshold, level = level)\n return(structure(list(L1 = CI$lower, U1 = CI$upper, estimate = CI$estimate, \n C_1 = CI$C_1, threshold = threshold, level = level, \n X1 = explanatory, Y1 = response, X2 = NA, Y2 = NA, \n L2 = NA, U2 = NA, call = cl1, sigmaSq = CI$sigmaSq, \n deriv_d0 = CI$deriv_d0), class = \"twostageTE\"))\n }\n else if (type == \"IR-likelihood\") {\n CI = likelihoodConfidenceInterval(explanatory, response, \n threshold, level = level)\n return(structure(list(L1 = CI$lower, U1 = CI$upper, estimate = CI$estimate, \n threshold = threshold, level = level, X1 = explanatory, \n Y1 = response, X2 = NA, Y2 = NA, L2 = NA, U2 = NA, \n call = cl1, sigmaSq = CI$sigmaSq, deriv_d0 = CI$deriv_d0), \n class = \"twostageTE\"))\n }\n else if (type == \"SIR\") {\n CI = waldConfidenceInterval_sir_stageOne(explanatory, \n response, threshold, level = level)\n return(structure(list(L1 = CI$lower, U1 = CI$upper, estimate = CI$estimate, \n threshold = threshold, level = level, X1 = explanatory, \n Y1 = response, X2 = NA, Y2 = NA, L2 = NA, U2 = NA, \n call = cl1, sigmaSq = CI$sigmaSq, deriv_d0 = CI$deriv_d0), \n class = \"twostageTE\"))\n }\n else error(\"stageOneAnalysis: type should be either 'IR-wald',\n 'IR-likelihood' or 'SIR'\")\n }\n\n\n"} {"package":"twostageTE","topic":"stageTwoAnalysis","snippet":"### Name: stageTwoAnalysis\n### Title: Stage two analysis\n### Aliases: stageTwoAnalysis\n\n### ** Examples\n\nX=runif(25, 0,1)\nY=X^2+rnorm(n=length(X), sd=0.1)\noneStage_IR=stageOneAnalysis(X, Y, 0.25, type=\"IR-wald\", 0.99)\nX2=runif(75,oneStage_IR$L1 ,oneStage_IR$U1)\nY2=X2^2+rnorm(n=length(X2), sd=0.1)\ntwoStage_IR = stageTwoAnalysis(oneStage_IR, X2, Y2, type=\"IR-wald\", 0.95)\n\n## The function is currently defined as\nfunction (stageOne, explanatory, response, type = \"IR-wald\", \n level = 0.95, combineData = FALSE) \n{\n cl1 <- match.call(expand.dots = TRUE)\n Y_0 = stageOne$threshold\n C_1 = stageOne$C_1\n gamma1=1/3\n if (combineData) {\n\texplanatory = c(explanatory , \n\t stageOne$X1[stageOne$X1 > stageOne$L1 & stageOne$X1 < stageOne$U1])\n\tresponse = c(response , \n\t stageOne$Y1[stageOne$X1 > stageOne$L1 & stageOne$X1 < stageOne$U1])\t\t\n }\n if (type == \"IR-wald\") {\n CI = waldConfidenceInterval_ir_stageTwo(explanatory, \n response, Y_0, level = level, gamma1 = gamma1, C_1 = C_1, \n n1 = length(stageOne$X1))\n return(structure(list(L2 = CI$lower, U2 = CI$upper, estimate = CI$estimate, \n threshold = Y_0, level = level, X1 = stageOne$X1, \n Y1 = stageOne$Y1, X2 = explanatory, Y2 = response, \n L1 = stageOne$L1, U1 = stageOne$U1, call = cl1, sigmaSq = CI$sigmaSq, \n deriv_d0 = CI$deriv_d0), class = \"twostageTE\"))\n }\n else if (type == \"IR-likelihood\") {\n CI = likelihoodConfidenceInterval(explanatory, response, \n Y_0, level = level)\n return(structure(list(L2 = CI$lower, U2 = CI$upper, estimate = CI$estimate, \n threshold = Y_0, level = level, X1 = stageOne$X1, \n Y1 = stageOne$Y1, X2 = explanatory, Y2 = response, \n L1 = stageOne$L1, U1 = stageOne$U1, call = cl1, sigmaSq = CI$sigmaSq, \n deriv_d0 = CI$deriv_d0), class = \"twostageTE\"))\n }\n else if (type == \"SIR\") {\n CI = waldConfidenceInterval_sir_stageTwo(explanatory = explanatory, \n response = response, Y_0 = Y_0, gamma1 = gamma1, \n C_1 = C_1, level = level)\n return(structure(list(L2 = CI$lower, U2 = CI$upper, estimate = CI$estimate, \n threshold = Y_0, level = level, X1 = stageOne$X1, \n Y1 = stageOne$Y1, X2 = explanatory, Y2 = response, \n L1 = stageOne$L1, U1 = stageOne$U1, call = cl1, sigmaSq = CI$sigmaSq, \n deriv_d0 = CI$deriv_d0), class = \"twostageTE\"))\n }\n else if (type == \"locLinear\") {\n CI = linearBootstrapConfidenceInterval_stageTwo(explanatory = explanatory, \n response = response, Y_0 = Y_0, level = level)\n return(structure(list(L2 = CI$lower, U2 = CI$upper, estimate = CI$estimate, \n threshold = Y_0, level = level, X1 = stageOne$X1, \n Y1 = stageOne$Y1, X2 = explanatory, Y2 = response, \n L1 = stageOne$L1, U1 = stageOne$U1, call = cl1, sigmaSq = CI$sigmaSq, \n deriv_d0 = CI$deriv_d0), class = \"twostageTE\"))\n }\n else error(\"stageOneAnalysis: type should be either \n 'IR-wald','IR-likelihood', 'SIR', or 'locLinear'\")\n }\n\n\n"} {"package":"twostageTE","topic":"summary.twostageTE","snippet":"### Name: summary.twostageTE\n### Title: summary method for object twostageTE\n### Aliases: summary.twostageTE summary\n\n### ** Examples\n\nX=runif(25, 0,1)\nY=X^2+rnorm(n=length(X), sd=0.1)\noneStage_IR=stageOneAnalysis(X, Y, 0.25, type=\"IR-wald\", 0.99)\nsummary(oneStage_IR)\n\n\n\n"} {"package":"twostageTE","topic":"threshold_estimate_ir","snippet":"### Name: threshold_estimate_ir\n### Title: Threshold estimate based on IR\n### Aliases: threshold_estimate_ir\n\n### ** Examples\n\nX=runif(25, 0,1)\nY=X^2+rnorm(n=length(X), sd=0.1)\nstageOneAnalysis(X, Y, 0.25, type=\"IR-wald\", 0.99)\n\n## The function is currently defined as\nfunction (explanatory, response, Y_0) \n{\n n = length(response)\n if (sum(response < Y_0) == n) {\n warning(\"Y_0 is outside observed region\")\n list(threshold_estimate_explanatory = max(explanatory), \n threshold_estimate_response = max(response), threshold = Y_0, \n Y_hat = max(response), index = n)\n }\n else if (sum(response >= Y_0) == n) {\n warning(\"Y_0 is outside observed region\")\n list(threshold_estimate_explanatory = min(explanatory), \n threshold_estimate_response = min(response), threshold = Y_0, \n Y_hat = min(response), index = 1)\n }\n else {\n fit = pava(explanatory, response)\n if (sum(fit$y >= Y_0) == 0) {\n warning(\"estimate is on the boundary\")\n ind = n\n estim_x = fit$x[ind]\n }\n else if (sum(fit$y <= Y_0) == 0) {\n warning(\"estimate is on the boundary\")\n ind = min(which(fit$y >= Y_0))\n estim_x = fit$x[ind]\n }\n else {\n ind = min(which(fit$y >= Y_0))\n estim_x = fit$x[ind]\n }\n list(threshold_estimate_explanatory = estim_x, \n threshold_estimate_response = fit$y[ind], \n threshold = Y_0, Y_hat = fit$y, index = ind)\n }\n }\n\n\n"} {"package":"twostageTE","topic":"threshold_estimate_locLinear","snippet":"### Name: threshold_estimate_locLinear\n### Title: Threshold estimate based on local linear approximation\n### Aliases: threshold_estimate_locLinear\n\n### ** Examples\n\nX=runif(25, 0,1)\nY=X^2+rnorm(n=length(X), sd=0.1)\noneStage_IR=stageOneAnalysis(X, Y, 0.25, type=\"IR-wald\", 0.99)\nX2 = c(rep(oneStage_IR$L1,37),rep(oneStage_IR$U1,38))\nY2=X2^2+rnorm(n=length(X2), sd=0.1)\nstageTwoAnalysis(oneStage_IR, explanatory = X2, response = Y2,\ntype = \"locLinear\", level = 0.95)\n\n\n## The function is currently defined as\nfunction (explanatory, response, Y_0) \n{\n n = length(response)\n if (sum(response < Y_0) == n) {\n list(threshold_estimate_explanatory = max(explanatory), \n threshold_estimate_response = max(response), threshold = Y_0, \n Y_hat = max(response), index = n)\n }\n else if (sum(response >= Y_0) == n) {\n list(threshold_estimate_explanatory = min(explanatory), \n threshold_estimate_response = min(response), threshold = Y_0, \n Y_hat = min(response), index = 1)\n }\n else {\n beta = lm(response ~ explanatory)$coef\n estim_x = (Y_0 - beta[1])/beta[2]\n list(threshold_estimate_explanatory = estim_x, threshold = Y_0)\n }\n }\n\n\n"} {"package":"twostageTE","topic":"twostageTE-package","snippet":"### Name: twostageTE\n### Title: Threshold value estimation using two-stage plans\n### Aliases: twostageTE-package twostageTE\n### Keywords: package\n\n### ** Examples\n\n## Simulating the (wiggly) isotonic Sine function ##\nsampleData=function(n, lower, upper) {\nx=runif(n, lower, upper)\ny=(1/40)*sin(6*pi*x) + 1/4 + x/2 + (1/4)*x^2\n+ rnorm(n=length(x), sd=0.1)\nreturn(list(X=x, Y=y))\n}\nBudget=100\nd0=0.5\nthreshold = (1/40)*sin(6*pi*d0) + 1/4 + d0/2 + (1/4)*d0^2\n\nn1=floor(Budget*0.25)\nn2=Budget - n1\nsamp = sampleData(n1, lower=0, upper=1)\nX = samp$X\nY = samp$Y\n## Two Stage IR+IR ##\nstageOne_IR=stageOneAnalysis(X, Y, threshold, type=\"IR-wald\", 0.99)\nsamp2 = sampleData(n2, lower=stageOne_IR$L1, upper=stageOne_IR$U1)\nX2 = samp2$X\nY2 = samp2$Y\ntwoStageIR = stageTwoAnalysis(stageOne_IR, X2, Y2, type=\"IR-wald\", 0.95)\n## Two Stage LR+LR ##\nstageOne_LR=stageOneAnalysis(X, Y, threshold, type=\"IR-likelihood\", 0.99)\nsamp2 = sampleData(n2, lower=stageOne_LR$L1, upper=stageOne_LR$U1)\nX2 = samp2$X\nY2 = samp2$Y\ntwoStageLR = stageTwoAnalysis(stageOne_LR, X2, Y2, \n type=\"IR-likelihood\", 0.95)\n## Two Stage IR+Local Linear ##\nX2 = c(rep(stageOne_IR$L1,37),rep(stageOne_IR$U1,38))\nY2=X2^2+rnorm(n=length(X2), sd=0.1)\ntwoStageLinear=stageTwoAnalysis(stageOne_IR, explanatory = X2, response = Y2,\n type = \"locLinear\", level = 0.95)\n\n\n"} {"package":"twostageTE","topic":"waldConfidenceInterval_ir_stageOne","snippet":"### Name: waldConfidenceInterval_ir_stageOne\n### Title: Stage one IR-Wald confidence interval\n### Aliases: waldConfidenceInterval_ir_stageOne\n\n### ** Examples\n\nX=runif(25, 0,1)\nY=X^2+rnorm(n=length(X), sd=0.1)\noneStage_IR=stageOneAnalysis(X, Y, 0.25, type=\"IR-wald\", 0.99)\n\n## The function is currently defined as\nfunction (explanatory, response, Y_0, level = NA) \n{\n if (is.na(level)) {\n level = 0.95\n }\n alpha = 1 - level\n ## Import previously computed Chernoff quantiles, provided by Groeneboom and Wellner\n chernoff_realizations <- NULL; rm(chernoff_realizations); \n data(\"chernoff_realizations\", envir =environment())\n ind = min(which(chernoff_realizations$DF - (1-alpha/2) >= 0))\n q = chernoff_realizations$xcoor[ind]\n n = length(response)\n\t\n fit = threshold_estimate_ir(explanatory, response, Y_0)\n sigmaSq = estimateSigmaSq(explanatory, response)$sigmaSq\n deriv_d0 = estimateDeriv(explanatory, response, \n fit$threshold_estimate_explanatory, sigmaSq) \n g_d0 = 1/n\n\n n = length(explanatory)\n C_di = (4 * sigmaSq/(deriv_d0^2))^(1/3)\n band = n^(-1/3) * C_di * g_d0^(-1/3) * q\n return(list(estimate = fit$threshold_estimate_explanatory, \n lower = max(min(explanatory), fit$threshold_estimate_explanatory - \n band), upper = min(max(explanatory), fit$threshold_estimate_explanatory + \n band), C_1 = as.numeric(C_di * g_d0^(-1/3) * q), \n sigmaSq = sigmaSq, deriv_d0 = deriv_d0))\n }\n\n\n"} {"package":"twostageTE","topic":"waldConfidenceInterval_ir_stageTwo","snippet":"### Name: waldConfidenceInterval_ir_stageTwo\n### Title: Two-stage IR-Wald confidence interval\n### Aliases: waldConfidenceInterval_ir_stageTwo\n\n### ** Examples\n\nX=runif(25, 0,1)\nY=X^2+rnorm(n=length(X), sd=0.1)\noneStage_IR=stageOneAnalysis(X, Y, 0.25, type=\"IR-wald\", 0.99)\nX2=runif(75,oneStage_IR$L1 ,oneStage_IR$U1)\nY2=X2^2+rnorm(n=length(X2), sd=0.1)\ntwoStage_IR_IR = stageTwoAnalysis(oneStage_IR, X2, Y2, type=\"IR-wald\", 0.95)\n\n## The function is currently defined as\nfunction (explanatory, response, Y_0, gamma1, C_1, n1, level = NA) \n{\n if (is.na(level)) {\n level = 0.95\n }\n alpha = 1 - level\n chernoff_realizations <- NULL; rm(chernoff_realizations);\n data(\"chernoff_realizations\", envir =environment())\n\n ind = min(which(chernoff_realizations$DF - (1-alpha/2) >= 0))\n q = chernoff_realizations$xcoor[ind]\n n = length(response)\n fit = threshold_estimate_ir(explanatory, response, Y_0)\n phi_0 = C_1 * n1 * (n^(-1))\n sigmaSq = estimateSigmaSq(explanatory, response)$sigmaSq\n deriv_d0 = estimateDeriv(explanatory, response, fit$threshold_estimate_explanatory, \n sigmaSq)\n C_di = (4 * sigmaSq/(deriv_d0^2))^(1/3)\n n = length(explanatory)\n p = gamma1/(1 + gamma1)\n C_di2 = C_di * (C_1/((1 - p) * p^(gamma1) * phi_0))\n band = n^(-1 * (1 + gamma1)/3) * C_di2 * q\n return(list(estimate = fit$threshold_estimate_explanatory, \n lower = max(min(explanatory), fit$threshold_estimate_explanatory - \n band), upper = min(max(explanatory), fit$threshold_estimate_explanatory + \n band), sigmaSq = sigmaSq, deriv_d0 = deriv_d0))\n }\n\n\n"} {"package":"exploreR","topic":"masslm","snippet":"### Name: masslm\n### Title: Mass Linear Regression\n### Aliases: masslm\n\n### ** Examples\n\nexam.df <- iris\nmasslm(exam.df, \"Sepal.Width\", ignore = \"Species\")\nmasslm(exam.df, \"Sepal.Width\", ignore = c(\"Species\", \"Petal.Width\"))\n\n\n"} {"package":"exploreR","topic":"massregplot","snippet":"### Name: massregplot\n### Title: Mass Regression Plot\n### Aliases: massregplot\n\n### ** Examples\n\nexam.df <- iris\nmassregplot(exam.df, \"Sepal.Length\", ignore = \"Species\")\nmassregplot(exam.df, \"Sepal.Length\", ignore = c(\"Species\", \"Petal.Width\"), include.se = FALSE)\n\n\n\n"} {"package":"exploreR","topic":"reset","snippet":"### Name: reset\n### Title: Reset R\n### Aliases: reset\n\n### ** Examples\n\nreset()\n\n\n"} {"package":"exploreR","topic":"standardize","snippet":"### Name: standardize\n### Title: Standardize Variables\n### Aliases: standardize\n\n### ** Examples\n\nexam.df <- iris\nstandardize(exam.df, \"Petal.Width\")\nstandardize(exam.df, c(\"Petal.Width\", \"Petal.Length\"), type = \"classic\")\n\n\n"} {"package":"NegativeControlOutcomeAdjustment","topic":"data","snippet":"### Name: data\n### Title: Data for examples\n### Aliases: data\n### Keywords: data\n\n### ** Examples\n\n\n data(data, package=\"NegativeControlOutcomeAdjustment\")\n\n # Display some of the data\n data[1:5, ]\n\n\n"} {"package":"NegativeControlOutcomeAdjustment","topic":"negativeControlOutcomeAdjustment","snippet":"### Name: negativeControlOutcomeAdjustment\n### Title: NegativeControlOutcomeAdjustment\n### Aliases: negativeControlOutcomeAdjustment\n\n### ** Examples\n\n data(data, package=\"NegativeControlOutcomeAdjustment\")\n\n Y1 <- data[, \"Y1\"]\n Y2 <- data[, \"Y2\"]\n Trt <- data[, \"T\"]\n\n # With no covariates, only the Joint-NC method is used\n negativeControlOutcomeAdjustment(Y1, Y2, Trt)\n\n # Age and Region define 39 strata, some of which have fewer than 20 observations. \n # Other strata that lead to non-finite estimates in the SS-Joint method are also dropped.\n # Warning messages will be issued in these situations. \n tab <- table(interaction(data$Age, data$Region, drop=TRUE, sep=\"_\"))\n sum(tab < 20)\n negativeControlOutcomeAdjustment(Y1, Y2, Trt, W=data[, c(\"Age\", \"Region\")])\n\n # Create two age groups; Age > 18 and Age <= 18 to reduce the number of strata to 6\n Age <- as.numeric(data$Age > 18)\n W <- interaction(Age, data$Region, sep=\"_\", drop=TRUE)\n negativeControlOutcomeAdjustment(Y1, Y2, Trt, W=W)\n\n\n\n"} {"package":"dbMC","topic":"dbmc","snippet":"### Name: dbmc\n### Title: de-biased estimator\n### Aliases: dbmc\n\n### ** Examples\n\n\n# simulated data\nrequire(softImpute)\nn = 100\np = 100\nJ = 2 # the true low-rank \nnp = n*p\nsig2 = 1\nmissfrac = 0.5\n# xtrue is the underlying matrix that we do not know and want to recover it\nxtrue = matrix(rnorm(n*J),n,J)%*%matrix(rnorm(J*p),J,p) \n# generating missing entries locations\nimiss = sample(np,np*missfrac,replace=FALSE)\n# xna is the observed matrix with missing entries\nxna = xtrue + matrix(rnorm(np, sd = sig2),nr = n,nc = p)\nxna[imiss] = NA\nlamda = 2.5*sig2*sqrt(n*p)\n\n# note that we only have xna as our initial data\n# first, fit a softImpute method\nfit1 = softImpute(xna, type = 'als')\n# complete the matrix by a softImpute method\nximp = complete(xna,fit1)\nmean((ximp - xtrue)^2);rankMatrix(ximp,.1)[1]\n# now, de-biased the softImpute method\nx.db = dbmc(x = xna,\n ximp = ximp,\n entries_miss = imiss,\n est_rank = 2)\nmean((x.db - xtrue)^2);rankMatrix(x.db,.1)[1]\n\n\n\n\n\n\n\n"} {"package":"shapes","topic":"apes","snippet":"### Name: apes\n### Title: Great ape data\n### Aliases: apes\n### Keywords: datasets\n\n### ** Examples\n\ndata(apes)\npar(mfrow=c(1,2))\nplotshapes(apes$x[,,apes$group==\"gorf\"],symbol=\"f\")\nplotshapes(apes$x[,,apes$group==\"gorm\"],symbol=\"m\")\n\n\n"} {"package":"shapes","topic":"backfit","snippet":"### Name: backfit\n### Title: Backfit from scores to configuration\n### Aliases: backfit\n### Keywords: multivariate\n\n### ** Examples\n\nans <- pnss3d( macf.dat, sphere.type=\"BIC\", n.pc=8)\ny <- backfit( ans$PNS$scores[1,] , ans ,type=\"pnss\")\nriemdist( macf.dat[,,1] , y ) #should be close to zero\n\nans2 <- procGPA( macf.dat, tangentcoords=\"partial\")\ny <- backfit( ans2$scores[1,] , ans2 ,type=\"pca\")\nriemdist( macf.dat[,,1] , y ) #should be close to zero\n\n\n"} {"package":"shapes","topic":"bookstein2d","snippet":"### Name: bookstein2d\n### Title: Bookstein's baseline registration for 2D data\n### Aliases: bookstein2d\n### Keywords: multivariate\n\n### ** Examples\n\n data(gorf.dat)\n data(gorm.dat)\n\n bookf<-bookstein2d(gorf.dat)\n bookm<-bookstein2d(gorm.dat)\n\n plotshapes(bookf$mshape,bookm$mshape,joinline=c(1,6,7,8,2,3,4,5,1))\n\n\n"} {"package":"shapes","topic":"brains","snippet":"### Name: brains\n### Title: Brain landmark data\n### Aliases: brains\n### Keywords: datasets\n\n### ** Examples\n\ndata(brains)\n# plot first three brains\nshapes3d(brains$x[,,1:3])\n\n\n"} {"package":"shapes","topic":"centroid.size","snippet":"### Name: centroid.size\n### Title: Centroid size\n### Aliases: centroid.size\n### Keywords: multivariate\n\n### ** Examples\n\ndata(mice)\ncentroid.size(mice$x[,,1])\n\n\n"} {"package":"shapes","topic":"cortical","snippet":"### Name: cortical\n### Title: Cortical surface data\n### Aliases: cortical\n### Keywords: datasets\n\n### ** Examples\n\ndata(cortical)\nplotshapes(cortical$x)\n\n\n"} {"package":"shapes","topic":"digit3.dat","snippet":"### Name: digit3.dat\n### Title: Digit 3 data\n### Aliases: digit3.dat\n### Keywords: datasets\n\n### ** Examples\n\ndata(digit3.dat)\nk<-dim(digit3.dat)[1]\nn<-dim(digit3.dat)[3]\nplotshapes(digit3.dat,joinline=c(1:13))\n\n\n"} {"package":"shapes","topic":"distcov","snippet":"### Name: distcov\n### Title: Compute a distance between two covariance matrices\n### Aliases: distcov\n### Keywords: multivariate\n\n### ** Examples\n\n\n\nA <- diag(5)\nB <- A + .1*matrix(rnorm(25),5,5) \nS1<-A\nS2<- B\n\ndistcov( S1, S2, method=\"Procrustes\")\n\n\n\n"} {"package":"shapes","topic":"dna.dat","snippet":"### Name: dna.dat\n### Title: DNA data\n### Aliases: dna.dat\n### Keywords: datasets\n\n### ** Examples\n\ndata(dna.dat)\nplotshapestime3d(dna.dat)\n\n\n"} {"package":"shapes","topic":"estcov","snippet":"### Name: estcov\n### Title: Weighted Frechet mean of covariance matrices\n### Aliases: estcov\n### Keywords: multivariate\n\n### ** Examples\n\n\nS <- array(0,c(5,5,10) )\nfor (i in 1:10){\ntem <- diag(5)+.1*matrix(rnorm(25),5,5)\nS[,,i]<- tem\n}\n\nestcov( S , method=\"Procrustes\")\n\n\n\n"} {"package":"shapes","topic":"frechet","snippet":"### Name: frechet\n### Title: Mean shape estimators\n### Aliases: frechet\n### Keywords: multivariate\n\n### ** Examples\n\n\n#2D example : female and male Gorillas (cf. Dryden and Mardia, 2016)\n\ndata(gorf.dat)\nfrechet(gorf.dat[,,1:4],mean=\"intrinsic\")\n\n\n\n"} {"package":"shapes","topic":"gels","snippet":"### Name: gels\n### Title: Electrophoresis gel data\n### Aliases: gels\n### Keywords: datasets\n\n### ** Examples\n\ndata(gels)\nplotshapes(gels)\n\n\n"} {"package":"shapes","topic":"gorf.dat","snippet":"### Name: gorf.dat\n### Title: Female gorilla data\n### Aliases: gorf.dat\n### Keywords: datasets\n\n### ** Examples\n\ndata(gorf.dat)\nplotshapes(gorf.dat)\n\n\n"} {"package":"shapes","topic":"gorm.dat","snippet":"### Name: gorm.dat\n### Title: Male gorilla data\n### Aliases: gorm.dat\n### Keywords: datasets\n\n### ** Examples\n\ndata(gorm.dat)\nplotshapes(gorm.dat)\n\n\n"} {"package":"shapes","topic":"groupstack","snippet":"### Name: groupstack\n### Title: Combine two or more groups of configurations\n### Aliases: groupstack\n### Keywords: multivariate\n\n### ** Examples\n\n\n#2D example : female and male Gorillas (cf. Dryden and Mardia, 2016)\n\ndata(gorf.dat)\ndata(gorm.dat)\n\ngroupstack(gorf.dat,gorm.dat)\n\n\n\n"} {"package":"shapes","topic":"humanmove","snippet":"### Name: humanmove\n### Title: Human movement data\n### Aliases: humanmove\n### Keywords: datasets\n\n### ** Examples\n\ndata(humanmove)\n#plotshapes(humanmove[,,,1])\n#for (i in 2:5){\n#for (j in 1:4){\n#for (k in 1:10){\n#points(humanmove[j,,k,i],col=i)\n#}\n#}\n#}\n\n\n"} {"package":"shapes","topic":"macaques","snippet":"### Name: macaques\n### Title: Male and Female macaque data\n### Aliases: macaques\n### Keywords: datasets\n\n### ** Examples\n\ndata(macaques)\nshapes3d(macaques$x[,,1])\n\n\n"} {"package":"shapes","topic":"macf.dat","snippet":"### Name: macf.dat\n### Title: Female macaque data\n### Aliases: macf.dat\n### Keywords: datasets\n\n### ** Examples\n\ndata(macf.dat)\nplotshapes(macf.dat)\n\n\n"} {"package":"shapes","topic":"macm.dat","snippet":"### Name: macm.dat\n### Title: Male macaque data\n### Aliases: macm.dat\n### Keywords: datasets\n\n### ** Examples\n\ndata(macm.dat)\nplotshapes(macm.dat)\n\n\n"} {"package":"shapes","topic":"mice","snippet":"### Name: mice\n### Title: T2 mouse vertabrae data\n### Aliases: mice\n### Keywords: datasets\n\n### ** Examples\n\ndata(mice)\nplotshapes(mice$x,symbol=as.character(mice$group),joinline=c(1,6,2:5,1))\n\n\n"} {"package":"shapes","topic":"panf.dat","snippet":"### Name: panf.dat\n### Title: Female chimpanzee data\n### Aliases: panf.dat\n### Keywords: datasets\n\n### ** Examples\n\ndata(panf.dat)\nplotshapes(panf.dat)\n\n\n"} {"package":"shapes","topic":"panm.dat","snippet":"### Name: panm.dat\n### Title: Male chimpanzee data\n### Aliases: panm.dat\n### Keywords: datasets\n\n### ** Examples\n\ndata(panm.dat)\nplotshapes(panm.dat)\n\n\n"} {"package":"shapes","topic":"plot3darcs","snippet":"### Name: plot3darcs\n### Title: Modes of variation plots for PCA and PNSS\n### Aliases: plot3darcs\n### Keywords: multivariate\n\n### ** Examples\n\nans <- pnss3d(digit3.dat, sphere.type=\"BIC\", n.pc=5)\n#aa <- plot3darcs(ans,c=2,pcno=1)\n#bb <- plot3darcs(ans,c=2,pcno=1,type=\"pca\")\n\n\n"} {"package":"shapes","topic":"plotshapes","snippet":"### Name: plotshapes\n### Title: Plot configurations\n### Aliases: plotshapes\n### Keywords: hplot multivariate\n\n### ** Examples\n\ndata(gorf.dat)\ndata(gorm.dat)\nplotshapes(gorf.dat,gorm.dat,joinline=c(1,6,7,8,2,3,4,5,1))\n\ndata(macm.dat)\ndata(macf.dat)\nplotshapes(macm.dat,macf.dat)\n\n\n"} {"package":"shapes","topic":"pns","snippet":"### Name: pns\n### Title: Principal Nested Spheres\n### Aliases: pns\n### Keywords: multivariate\n\n### ** Examples\n\n\n# out <- pc2sphere(x = gorf.dat, n.pc = 2)\n# spheredata <- t(out$spheredata)\n# pns.out <- pns(x = spheredata)\n\n\n\n"} {"package":"shapes","topic":"pns4pc","snippet":"### Name: pns4pc\n### Title: Principal Nested Shape Spaces from PCA\n### Aliases: pns4pc\n### Keywords: multivariate\n\n### ** Examples\n\n\npns4pc(digit3.dat,n.pc=2)\n\n\n\n"} {"package":"shapes","topic":"pnss3d","snippet":"### Name: pnss3d\n### Title: Principal Nested Shape Space Analysis\n### Aliases: pnss3d\n### Keywords: multivariate\n\n### ** Examples\n\nans <- pnss3d(digit3.dat, sphere.type=\"BIC\", n.pc=5)\n#aa <- plot3darcs(ans,c=2,pcno=1)\n#bb <- plot3darcs(ans,c=2,pcno=1,type=\"pca\")\n\n\n"} {"package":"shapes","topic":"pongof.dat","snippet":"### Name: pongof.dat\n### Title: Female orang utan data\n### Aliases: pongof.dat\n### Keywords: datasets\n\n### ** Examples\n\ndata(pongof.dat)\nplotshapes(pongof.dat)\n\n\n"} {"package":"shapes","topic":"pongom.dat","snippet":"### Name: pongom.dat\n### Title: Male orang utan data\n### Aliases: pongom.dat\n### Keywords: datasets\n\n### ** Examples\n\ndata(pongom.dat)\nplotshapes(pongom.dat)\n\n\n"} {"package":"shapes","topic":"procGPA","snippet":"### Name: procGPA\n### Title: Generalised Procrustes analysis\n### Aliases: procGPA\n### Keywords: multivariate\n\n### ** Examples\n\n\n#2D example : female and male Gorillas (cf. Dryden and Mardia, 2016)\n\ndata(gorf.dat)\ndata(gorm.dat)\n\nplotshapes(gorf.dat,gorm.dat)\nn1<-dim(gorf.dat)[3]\nn2<-dim(gorm.dat)[3]\nk<-dim(gorf.dat)[1]\nm<-dim(gorf.dat)[2]\ngor.dat<-array(0,c(k,2,n1+n2))\ngor.dat[,,1:n1]<-gorf.dat\ngor.dat[,,(n1+1):(n1+n2)]<-gorm.dat\n\ngor<-procGPA(gor.dat)\nshapepca(gor,type=\"r\",mag=3)\nshapepca(gor,type=\"v\",mag=3)\n\ngor.gp<-c(rep(\"f\",times=30),rep(\"m\",times=29))\nx<-cbind(gor$size,gor$rho,gor$scores[,1:3])\npairs(x,panel=function(x,y) text(x,y,gor.gp),\n label=c(\"s\",\"rho\",\"score 1\",\"score 2\",\"score 3\"))\n\n\n##########################################################\n#3D example\n\ndata(macm.dat)\nout<-procGPA(macm.dat,scale=FALSE)\n\npar(mfrow=c(2,2))\nplot(out$rawscores[,1],out$rawscores[,2],xlab=\"PC1\",ylab=\"PC2\")\ntitle(\"PC scores\")\nplot(out$rawscores[,2],out$rawscores[,3],xlab=\"PC2\",ylab=\"PC3\")\nplot(out$rawscores[,1],out$rawscores[,3],xlab=\"PC1\",ylab=\"PC3\")\nplot(out$size,out$rho,xlab=\"size\",ylab=\"rho\")\ntitle(\"Size versus shape distance\")\n\n\n\n"} {"package":"shapes","topic":"procOPA","snippet":"### Name: procOPA\n### Title: Ordinary Procrustes analysis\n### Aliases: procOPA\n### Keywords: multivariate\n\n### ** Examples\n\ndata(digit3.dat)\n\nA<-digit3.dat[,,1]\nB<-digit3.dat[,,2]\nans<-procOPA(A,B) \nplotshapes(A,B,joinline=1:13)\nplotshapes(ans$Ahat,ans$Bhat,joinline=1:13)\n\n#Sooty Mangabey data\ndata(sooty.dat)\nA<-sooty.dat[,,1] #juvenile\nB<-sooty.dat[,,2] #adult\npar(mfrow=c(1,3))\npar(pty=\"s\")\nplot(A,xlim=c(-2000,3000),ylim=c(-2000,3000),xlab=\" \",ylab=\" \")\nlines(A[c(1:12,1),])\npoints(B)\nlines(B[c(1:12,1),],lty=2)\ntitle(\"Juvenile (-------) Adult (- - - -)\")\n#match B onto A\nout<-procOPA(A,B)\n#rotation angle\nprint(atan2(out$R[1,2],out$R[1,1])*180/pi)\n#scale\nprint(out$s)\nplot(A,xlim=c(-2000,3000),ylim=c(-2000,3000),xlab=\" \",ylab=\" \")\nlines(A[c(1:12,1),])\npoints(out$Bhat)\nlines(out$Bhat[c(1:12,1),],lty=2)\ntitle(\"Match adult onto juvenile\")\n#match A onto B\nout<-procOPA(B,A)\n#rotation angle\nprint(atan2(out$R[1,2],out$R[1,1])*180/pi)\n#scale\nprint(out$s)\nplot(B,xlim=c(-2000,3000),ylim=c(-2000,3000),xlab=\" \",ylab=\" \")\nlines(B[c(1:12,1),],lty=2)\npoints(out$Bhat)\nlines(out$Bhat[c(1:12,1),])\ntitle(\"Match juvenile onto adult\")\n\n\n"} {"package":"shapes","topic":"procWGPA","snippet":"### Name: procWGPA\n### Title: Weighted Procrustes analysis\n### Aliases: procWGPA\n### Keywords: multivariate\n\n### ** Examples\n\n\n#2D example : female Gorillas (cf. Dryden and Mardia, 2016)\n\ndata(gorf.dat)\n\ngor<-procWGPA(gorf.dat,maxiterations=3)\n\n\n\n"} {"package":"shapes","topic":"procdist","snippet":"### Name: procdist\n### Title: Procrustes distance\n### Aliases: procdist\n### Keywords: multivariate\n\n### ** Examples\n\ndata(gorf.dat)\ndata(gorm.dat)\ngorf<-procGPA(gorf.dat)\ngorm<-procGPA(gorm.dat)\ndistfull<-procdist(gorf$mshape,gorm$mshape)\ncat(\"Full Procustes distance between mean shapes is \",distfull,\" \\n\")\n\n\n"} {"package":"shapes","topic":"qcet2.dat","snippet":"### Name: qcet2.dat\n### Title: Control T2 mouse vertabrae data\n### Aliases: qcet2.dat\n### Keywords: datasets\n\n### ** Examples\n\ndata(qcet2.dat)\nplotshapes(qcet2.dat)\n\n\n"} {"package":"shapes","topic":"qlet2.dat","snippet":"### Name: qlet2.dat\n### Title: Large T2 mouse vertabrae data\n### Aliases: qlet2.dat\n### Keywords: datasets\n\n### ** Examples\n\ndata(qlet2.dat)\nplotshapes(qlet2.dat)\n\n\n"} {"package":"shapes","topic":"qset2.dat","snippet":"### Name: qset2.dat\n### Title: Small T2 mouse vertabrae data\n### Aliases: qset2.dat\n### Keywords: datasets\n\n### ** Examples\n\ndata(qset2.dat)\nplotshapes(qset2.dat)\n\n\n"} {"package":"shapes","topic":"rats","snippet":"### Name: rats\n### Title: Rat skulls data\n### Aliases: rats\n### Keywords: datasets\n\n### ** Examples\n\ndata(rats)\nplotshapes(rats$x,col=1:8)\n\n\n"} {"package":"shapes","topic":"resampletest","snippet":"### Name: resampletest\n### Title: Tests for mean shape difference using complex arithmetic,\n### including bootstrap and permutation tests.\n### Aliases: resampletest\n### Keywords: multivariate\n\n### ** Examples\n\n\n#2D example : female and male Gorillas\n\ndata(gorf.dat)\ndata(gorm.dat)\n\n#just select 3 landmarks and the first 10 observations in each group\nselect<-c(1,2,3)\nA<-gorf.dat[select,,1:10]\nB<-gorm.dat[select,,1:10]\nresampletest(A,B,resamples=100)\n\n\n\n"} {"package":"shapes","topic":"riemdist","snippet":"### Name: riemdist\n### Title: Riemannian shape distance\n### Aliases: riemdist\n### Keywords: multivariate\n\n### ** Examples\n\ndata(gorf.dat)\ndata(gorm.dat)\ngorf<-procGPA(gorf.dat)\ngorm<-procGPA(gorm.dat)\nrho<-riemdist(gorf$mshape,gorm$mshape)\ncat(\"Riemannian distance between mean shapes is \",rho,\" \\n\")\n\n\n"} {"package":"shapes","topic":"rigidbody","snippet":"### Name: rigidbody\n### Title: Rigid body transformations\n### Aliases: rigidbody\n### Keywords: multivariate\n\n### ** Examples\n\ndata(gorf.dat)\nplotshapes ( rigidbody(gorf.dat , 0, 0, 0, 0, 0, -90 ) )\n\n\n"} {"package":"shapes","topic":"sand","snippet":"### Name: sand\n### Title: Sand particle outline data\n### Aliases: sand\n### Keywords: datasets\n\n### ** Examples\n\ndata(sand)\nplotshapes(sand$x[,,sand$group==\"sea\"],sand$x[,,sand$group==\"river\"],joinline=c(1:50))\n\n\n"} {"package":"shapes","topic":"schizophrenia","snippet":"### Name: schizophrenia\n### Title: Bookstein's schizophrenia data\n### Aliases: schizophrenia\n### Keywords: datasets\n\n### ** Examples\n\ndata(schizophrenia)\nplotshapes(schizophrenia$x,symbol=as.integer(schizophrenia$group))\n\n\n"} {"package":"shapes","topic":"schizophrenia.dat","snippet":"### Name: schizophrenia.dat\n### Title: Bookstein's schizophrenia data\n### Aliases: schizophrenia.dat\n### Keywords: datasets\n\n### ** Examples\n\ndata(schizophrenia.dat)\nk<-dim(schizophrenia.dat)[1]\nn<-dim(schizophrenia.dat)[3]\nplotshapes(schizophrenia.dat)\n\n\n"} {"package":"shapes","topic":"shapepca","snippet":"### Name: shapepca\n### Title: Principal components analysis for shape\n### Aliases: shapepca\n### Keywords: hplot multivariate\n\n### ** Examples\n\n#2d example\ndata(gorf.dat)\ndata(gorm.dat)\n\ngorf<-procGPA(gorf.dat)\ngorm<-procGPA(gorm.dat)\nshapepca(gorf,type=\"r\",mag=3)\nshapepca(gorf,type=\"v\",mag=3)\nshapepca(gorm,type=\"r\",mag=3)\nshapepca(gorm,type=\"v\",mag=3)\n\n#3D example\n#data(macm.dat)\n#out<-procGPA(macm.dat)\n#movie\n#shapepca(out,pcno=1)\n\n\n"} {"package":"shapes","topic":"shapes.cva","snippet":"### Name: shapes.cva\n### Title: Canonical variate analysis for shapes\n### Aliases: shapes.cva\n### Keywords: multivariate\n\n### ** Examples\n\n\n#2D example : female and male apes (cf. Dryden and Mardia, 2016)\n\ndata(pongof.dat)\ndata(pongom.dat) \ndata(panm.dat)\ndata(panf.dat)\n\napes <- groupstack( pongof.dat , pongom.dat , panm.dat, panf.dat )\n\nshapes.cva( apes$x, apes$groups) \n\n\n"} {"package":"shapes","topic":"shapes3d","snippet":"### Name: shapes3d\n### Title: Plot 3D data\n### Aliases: shapes3d\n### Keywords: multivariate\n\n### ** Examples\n\ndata(dna.dat)\nshapes3d(dna.dat)\n\n\n"} {"package":"shapes","topic":"shells","snippet":"### Name: shells\n### Title: Microfossil shell data\n### Aliases: shells\n### Keywords: datasets\n\n### ** Examples\n\ndata(shells)\nplotshapes(shells$uv)\n\n\n"} {"package":"shapes","topic":"sooty","snippet":"### Name: sooty\n### Title: Sooty mangabey data\n### Aliases: sooty\n### Keywords: datasets\n\n### ** Examples\n\ndata(sooty)\nplotshapes(sooty,joinline=c(1:12,1))\n\n\n"} {"package":"shapes","topic":"ssriemdist","snippet":"### Name: ssriemdist\n### Title: Riemannian size-and-shape distance\n### Aliases: ssriemdist\n### Keywords: multivariate\n\n### ** Examples\n\ndata(gorf.dat)\ndata(gorm.dat)\ngorf<-procGPA(gorf.dat,scale=FALSE)\ngorm<-procGPA(gorm.dat,scale=FALSE)\nds<-ssriemdist(gorf$mshape,gorm$mshape)\ncat(\"Riemannian size-and-shape distance between mean size-and-shapes is \",ds,\" \\n\")\n\n\n"} {"package":"shapes","topic":"steroids","snippet":"### Name: steroids\n### Title: Steroid data\n### Aliases: steroids\n### Keywords: datasets\n\n### ** Examples\n\ndata(steroids)\nshapes3d(steroids$x[,,1])\n\n\n"} {"package":"shapes","topic":"testmeanshapes","snippet":"### Name: testmeanshapes\n### Title: Tests for mean shape difference, including permutation and\n### bootstrap tests\n### Aliases: testmeanshapes\n### Keywords: multivariate\n\n### ** Examples\n\n\n#2D example : female and male Gorillas\n\ndata(gorf.dat)\ndata(gorm.dat)\n\nA<-gorf.dat\nB<-gorm.dat\ntestmeanshapes(A,B,resamples=100)\n\n\n\n"} {"package":"shapes","topic":"tpsgrid","snippet":"### Name: tpsgrid\n### Title: Thin-plate spline transformation grids\n### Aliases: tpsgrid\n### Keywords: multivariate hplot\n\n### ** Examples\n\ndata(gorf.dat)\ndata(gorm.dat)\n\n#TPS grid with shape change exaggerated (2x)\ngorf<-procGPA(gorf.dat)\ngorm<-procGPA(gorm.dat)\nTT<-gorf$mshape\nYY<-gorm$mshape\ntpsgrid(TT,YY,mag=2) \ntitle(\"TPS grid: Female mean (left) to Male mean (right)\") \n\n\n\n"} {"package":"shapes","topic":"transformations","snippet":"### Name: transformations\n### Title: Calculate similarity transformations\n### Aliases: transformations\n### Keywords: multivariate\n\n### ** Examples\n\n\n#2D example : female and male Gorillas (cf. Dryden and Mardia, 2016)\n\ndata(gorf.dat)\n\nXorig <- gorf.dat\nXrotated <- procGPA(gorf.dat)$rotated\n\ntransformations(Xrotated,Xorig)\n\n\n\n"} {"package":"covidprobability","topic":"probability_any","snippet":"### Name: probability_any\n### Title: Find the probability of any (at least one) event happening\n### Aliases: probability_any\n\n### ** Examples\n\nprobability_any(1, 0.5)\nprobability_any(2, 0.5)\nprobability_any(2, c(0.5, 1/3, 0.25))\n\n\n"} {"package":"shinyTree","topic":"dfToTree","snippet":"### Name: dfToTree\n### Title: Converts a data.frame to a data.tree format\n### Aliases: dfToTree\n\n### ** Examples\n\n## Not run: \n##D df <- data.frame(Titanic)\n##D tree <- dfToTree(df, c(\"Sex\", \"Class\", \"Survived\"))\n## End(Not run)\n\n\n"} {"package":"shinyTree","topic":"set_node_attrs","snippet":"### Name: set_node_attrs\n### Title: Tree traversal\n### Aliases: set_node_attrs\n\n### ** Examples\n\ntree <- dfToTree(data.frame(Titanic), c(\"Sex\", \"Survived\"))\nstr(set_node_attrs(tree, attr_name = \"sttype\", inner_val = \"directory\", leaf_val = \"file\"))\n\n\n\n"} {"package":"shinyTree","topic":"treeToDf","snippet":"### Name: treeToDf\n### Title: Convert tree into data.frame\n### Aliases: treeToDf\n\n### ** Examples\n\n## Not run: \n##D df <- data.frame(Titanic)\n##D tree <- dfToTree(df, c(\"Sex\", \"Class\", \"Survived\"))\n##D newDf <- treeToDf(tree, c(\"Sex\", \"Class\", \"Survived\"))\n## End(Not run)\n\n\n"} {"package":"BHMSMAfMRI","topic":"BHMSMA","snippet":"### Name: BHMSMA\n### Title: Bayesian hierarchical multi-subject multiscale analysis (BHMSMA)\n### of functional MRI data or other multiscale data\n### Aliases: BHMSMA\n\n### ** Examples\n\n# BHMSMA multi-subject analysis for simulated (fMRI) \n# data at 4 timepoints over an 8x8 grid (of a brain \n# slice) for 3 subjects\nset.seed(1)\nn <- 3\ngrid <- 8\nntime <- 4\ndata <- array(rnorm(n*grid*grid*ntime),\n dim=c(n,grid,grid,ntime))\ndesignmat <- cbind(c(1,1,1,1),c(1,0,1,0))\nk <- 2\nanalysis <- \"multi\"\nBHMSMAmulti <- BHMSMA(n, grid, data, designmat, \n k, analysis)\n\nzlim = c(0,max(abs(BHMSMAmulti$GLMCoefStandardized)))\npar(mfrow=c(1,2))\nimage( abs(BHMSMAmulti$GLMCoefStandardized[1,,,k]),\n col=heat.colors(12),zlim=zlim,main=\"GLM coef map\")\nimage( abs(BHMSMAmulti$GLMcoefposterior[1,,]),\n col=heat.colors(12),zlim=zlim,main=\"GLM coef posterior map\")\n\n\n## Not run: \n##D # BHMSMA multi-subject analysis for simulated (fMRI) \n##D # data at 100 timepoints over an 64x64 grid (of a \n##D # brain slice) for 15 subjects\n##D # (takes ~12s in a 2.8 GHz Quad-Core Intel Core i7 processor)\n##D set.seed(1)\n##D n <- 15\n##D grid <- 64\n##D ntime <- 100\n##D data <- array(rnorm(n*grid*grid*ntime),\n##D dim=c(n,grid,grid,ntime))\n##D designmat <- cbind(rep(1,ntime),runif(ntime))\n##D k <- 2\n##D analysis <- \"multi\"\n##D system.time({BHMSMAmulti <- BHMSMA(n,grid,data, \n##D designmat,k,analysis)})\n## End(Not run)\n\n\n"} {"package":"BHMSMAfMRI","topic":"glmcoef","snippet":"### Name: glmcoef\n### Title: Fit GLM (general linear model) to the fMRI time-series of all\n### voxels within a single 2D brain slice\n### Aliases: glmcoef\n\n### ** Examples\n\nset.seed(1)\nn <- 3\ngrid <- 8\nntime <- 10\ndesignmat <- cbind(rep(1,10),c(rep(c(1,0),5)))\ndata <- array(dim=c(n,grid,grid,ntime),\n rnorm(n*grid*grid*ntime))\nglm.fit <- glmcoef(n,grid,data,designmat)\ndim(glm.fit$GLMCoefStandardized)\n#[1] 3 8 8\n\n\n"} {"package":"BHMSMAfMRI","topic":"hyperparamest","snippet":"### Name: hyperparamest\n### Title: Obtain estimates of the hyperparameters of the BHMSMA model\n### Aliases: hyperparamest\n\n### ** Examples\n\nset.seed(1)\nn <- 3\ngrid <- 8\nwaveletcoefmat <- array(dim=c(n,grid^2-1),\n rnorm(n*(grid^2-1)))\nanalysis <- \"multi\"\nhyperest <- hyperparamest(n,grid,waveletcoefmat,analysis)\nhyperest$hyperparam\n# [1] 1.00000 1.00000 1.00000 1.00000 0.00000 28.37678\n\n\n"} {"package":"BHMSMAfMRI","topic":"postglmcoef","snippet":"### Name: postglmcoef\n### Title: Obtain posterior estimate of a 2D GLM coefficients map of a\n### regressor\n### Aliases: postglmcoef\n\n### ** Examples\n\nset.seed(1)\nn <- 3\ngrid <- 8\nglmcoefstd <- array(rnorm(n*grid*grid),\n dim=c(n,grid,grid))\npostmeanwaveletcoef <- array(rnorm(n*(grid^2-1)),\n dim=c(n,(grid^2-1)))\npostmeanglmcoef <- postglmcoef(n,grid,glmcoefstd,\n postmeanwaveletcoef)\ndim(postmeanglmcoef$GLMcoefposterior)\n#[1] 3 8 8\n\n\n"} {"package":"BHMSMAfMRI","topic":"postgroupglmcoef","snippet":"### Name: postgroupglmcoef\n### Title: Obtain posterior group estimate of a 2D GLM coefficients map of\n### a regressor\n### Aliases: postgroupglmcoef\n\n### ** Examples\n\nset.seed(1)\nn <- 3\ngrid <- 8\nglmcoefstd <- array(rnorm(n*grid*grid),\n dim=c(n,grid,grid))\npostmeanwaveletcoef <- array(rnorm(n*(grid^2-1)),\n dim=c(n,grid^2-1))\npost.groupcoef <- postgroupglmcoef(n,grid,glmcoefstd,\n postmeanwaveletcoef)\ndim(post.groupcoef$groupcoef)\n#[1] 8 8\n\n\n"} {"package":"BHMSMAfMRI","topic":"postmixprob","snippet":"### Name: postmixprob\n### Title: Obtain estimates of the mixture probabilities defining the\n### BHMSMA posterior wavelet coefficients distributions\n### Aliases: postmixprob\n\n### ** Examples\n\nset.seed(1)\nn <- 3\ngrid <- 8\nwaveletcoefmat <- matrix(nrow=n,ncol=grid^2-1)\nfor(i in 1:n) waveletcoefmat[i,] <- rnorm(grid^2-1)\nhyperparam <- rep(.1,6)\nanalysis <- \"multi\"\npkljbar <- postmixprob(n,grid,waveletcoefmat,hyperparam,\n analysis)\ndim(pkljbar$pkljbar)\n#[1] 3 63\n\n\n"} {"package":"BHMSMAfMRI","topic":"postsamples","snippet":"### Name: postsamples\n### Title: Obtain samples from the posterior distribution of a 2D GLM\n### coefficient map.\n### Aliases: postsamples\n\n### ** Examples\n\nset.seed(1)\nn <- 3\ngrid <- 8\nnsample <- 5\nglmcoefstd <- array(rnorm(n*grid*grid),\n dim=c(n,grid,grid))\nwaveletcoefmat <- array(rnorm(n*(grid^2-1)),\n dim=c(n,(grid^2-1)))\nhyperparam <- rep(.2,6)\npkljbar <- array(runif(n*(grid^2-1)),\n dim=c(n,(grid^2-1)))\nanalysis <- \"multi\"\npostsample <- postsamples(nsample,n,grid,glmcoefstd, \nwaveletcoefmat, hyperparam,pkljbar,analysis,seed=1)\ndim(postsample$samples)\n#[1] 3 8 8 5\n\n\n"} {"package":"BHMSMAfMRI","topic":"postwaveletcoef","snippet":"### Name: postwaveletcoef\n### Title: Obtain posterior estimates of the BHMSMA wavelet coefficients\n### Aliases: postwaveletcoef\n\n### ** Examples\n\nset.seed(1)\nn <- 3\ngrid <- 8\nnsample <- 5\nwaveletcoefmat <- array(rnorm(n*(grid^2-1)),\n dim=c(n,grid^2-1))\nhyperparam <- rep(.2,6)\npkljbar <- array(runif(n*(grid^2-1)),\n dim=c(n,grid^2-1))\nanalysis <- \"multi\"\npostwavecoef <- postwaveletcoef(n,grid,waveletcoefmat, \nhyperparam,pkljbar,analysis)\ndim(postwavecoef$PostMeanWaveletCoef)\n#[1] 3 63\n\n\n"} {"package":"BHMSMAfMRI","topic":"readfmridata","snippet":"### Name: readfmridata\n### Title: Import fMRI data from various fMRI image files\n### Aliases: readfmridata\n\n### ** Examples\n\n# import simmulated fMRI data from image files provided within this package\nfpath <- system.file(\"extdata\", package=\"BHMSMAfMRI\")\nuntar(paste0(fpath,\"/fmridata.tar\"), exdir=tempdir())\ndata <- array(dim=c(3,32,32,9))\nfor(subject in 1:3)\n{\n directory <- paste0(tempdir(),\"/fmridata\",\"/s0\",subject,\"/\")\n a <- readfmridata(directory, format=\"Analyze\", prefix=paste0(\"s0\",subject,\"_t\"),\n nimages=9, dim.image=c(32,32,1))\n data[subject,,,] <- a[,,1,]\n}\ndim(a)\n\n\n"} {"package":"BHMSMAfMRI","topic":"substituteWaveletCoef","snippet":"### Name: substituteWaveletCoef\n### Title: Substitute 2D wavelet transform coefficients with user-given\n### values\n### Aliases: substituteWaveletCoef\n\n### ** Examples\n\nset.seed(1)\nn <- 3\ngrid <- 8\nntime <- 10\ndesignmat <- cbind( rep(1,10), c(rep(c(1,0),5)) )\ndata <- array(dim=c(n,grid,grid,ntime),\n rnorm(n*grid*grid*ntime))\nglm.fit <- glmcoef(n, grid, data, designmat)\nglmcoefstd <- glm.fit$GLMCoefStandardized[,,,1]\ndwt = wavethresh::imwd(glmcoefstd[1,,],type=\"wavelet\",\n family=\"DaubLeAsymm\",filter.number=6,bc=\"periodic\")\ndwt\n\nvalues = rnorm(grid^2-1)\ndwtnew = substituteWaveletCoef(grid,dwt,values)\ndwtnew\n\n\n"} {"package":"BHMSMAfMRI","topic":"waveletcoef","snippet":"### Name: waveletcoef\n### Title: Apply discrete wavelet transform (DWT) to a 2D GLM coefficient\n### map of a regressor\n### Aliases: waveletcoef\n\n### ** Examples\n\nset.seed(1)\nn <- 3\ngrid <- 8\nntime <- 10\ndesignmat <- cbind( rep(1,10), c(rep(c(1,0),5)) )\ndata <- array(dim=c(n,grid,grid,ntime),\n rnorm(n*grid*grid*ntime))\nglm.fit <- glmcoef(n,grid,data,designmat)\nglmcoefstd <- glm.fit$GLMCoefStandardized[,,,1]\nwavecoef <- waveletcoef(n,grid,glmcoefstd)\ndim(wavecoef$WaveletCoefficientMatrix)\n#[1] 3 63\n\n\n"} {"package":"sfaR","topic":"coef","snippet":"### Name: coef\n### Title: Extract coefficients of stochastic frontier models\n### Aliases: coef coef.sfacross coef.summary.sfacross coef.sfalcmcross\n### coef.summary.sfalcmcross coef.sfaselectioncross\n### coef.summary.sfaselectioncross\n### Keywords: coefficients methods\n\n### ** Examples\n\n\n## Not run: \n##D ## Using data on fossil fuel fired steam electric power generation plants in the U.S.\n##D # Translog SFA (cost function) truncated normal with scaling property\n##D tl_u_ts <- sfacross(formula = log(tc/wf) ~ log(y) + I(1/2 * (log(y))^2) +\n##D log(wl/wf) + log(wk/wf) + I(1/2 * (log(wl/wf))^2) + I(1/2 * (log(wk/wf))^2) +\n##D I(log(wl/wf) * log(wk/wf)) + I(log(y) * log(wl/wf)) + I(log(y) * log(wk/wf)),\n##D udist = 'tnormal', muhet = ~ regu, uhet = ~ regu, data = utility, S = -1,\n##D scaling = TRUE, method = 'mla')\n##D coef(tl_u_ts, extraPar = TRUE)\n##D coef(summary(tl_u_ts))\n## End(Not run)\n\n\n\n"} {"package":"sfaR","topic":"dairynorway","snippet":"### Name: dairynorway\n### Title: Data on Norwegian dairy farms\n### Aliases: dairynorway\n### Keywords: datasets\n\n### ** Examples\n\n\nstr(dairynorway)\nsummary(dairynorway)\n\n\n"} {"package":"sfaR","topic":"dairyspain","snippet":"### Name: dairyspain\n### Title: Data on Spanish dairy farms\n### Aliases: dairyspain\n### Keywords: datasets\n\n### ** Examples\n\n\nstr(dairyspain)\nsummary(dairyspain)\n\n\n"} {"package":"sfaR","topic":"efficiencies","snippet":"### Name: efficiencies\n### Title: Compute conditional (in-)efficiency estimates of stochastic\n### frontier models\n### Aliases: efficiencies efficiencies.sfacross efficiencies.sfalcmcross\n### efficiencies.sfaselectioncross\n\n### ** Examples\n\n\n## Not run: \n##D ## Using data on fossil fuel fired steam electric power generation plants in the U.S.\n##D # Translog SFA (cost function) truncated normal with scaling property\n##D tl_u_ts <- sfacross(formula = log(tc/wf) ~ log(y) + I(1/2 * (log(y))^2) + log(wl/wf) +\n##D log(wk/wf) + I(1/2 * (log(wl/wf))^2) + I(1/2 * (log(wk/wf))^2) + I(log(wl/wf) *\n##D log(wk/wf)) + I(log(y) * log(wl/wf)) + I(log(y) * log(wk/wf)), udist = 'tnormal',\n##D muhet = ~ regu, uhet = ~ regu, data = utility, S = -1, scaling = TRUE, method = 'mla')\n##D eff.tl_u_ts <- efficiencies(tl_u_ts)\n##D head(eff.tl_u_ts)\n##D summary(eff.tl_u_ts)\n## End(Not run)\n\n\n\n"} {"package":"sfaR","topic":"electricity","snippet":"### Name: electricity\n### Title: Data on U.S. electric power generation\n### Aliases: electricity\n### Keywords: datasets\n\n### ** Examples\n\n\nstr(electricity)\nsummary(electricity)\n\n\n"} {"package":"sfaR","topic":"extract","snippet":"### Name: extract\n### Title: Extract frontier information to be used with *texreg* package\n### Aliases: extract extract.sfacross extract.sfalcmcross\n### extract.sfaselectioncross\n### Keywords: extract methods\n\n### ** Examples\n\n\nhlf <- sfacross(formula = log(tc/wf) ~ log(y) + I(1/2 * (log(y))^2) +\nlog(wl/wf) + log(wk/wf) + I(1/2 * (log(wl/wf))^2) + I(1/2 * (log(wk/wf))^2) + \nI(log(wl/wf) * log(wk/wf)) + I(log(y) * log(wl/wf)) + I(log(y) * log(wk/wf)),\nudist = 'hnormal', uhet = ~ regu, data = utility, S = -1, method = 'bfgs')\ntrnorm <- sfacross(formula = log(tc/wf) ~ log(y) + I(1/2 * (log(y))^2) + \nlog(wl/wf) + log(wk/wf) + I(1/2 * (log(wl/wf))^2) + I(1/2 * (log(wk/wf))^2) +\nI(log(wl/wf) * log(wk/wf)) + I(log(y) * log(wl/wf)) + I(log(y) * log(wk/wf)),\nudist = 'tnormal', muhet = ~ regu, data = utility, S = -1, method = 'bfgs')\n\ntscal <- sfacross(formula = log(tc/wf) ~ log(y) + I(1/2 * (log(y))^2) +\nlog(wl/wf) + log(wk/wf) + I(1/2 * (log(wl/wf))^2) + I(1/2 * (log(wk/wf))^2) +\nI(log(wl/wf) * log(wk/wf)) + I(log(y) * log(wl/wf)) + I(log(y) * log(wk/wf)),\nudist = 'tnormal', muhet = ~ regu, uhet = ~ regu, data = utility, \nS = -1, method = 'bfgs', scaling = TRUE)\n\nexpo <- sfacross(formula = log(tc/wf) ~ log(y) + I(1/2 * (log(y))^2) +\nlog(wl/wf) + log(wk/wf) + I(1/2 * (log(wl/wf))^2) + I(1/2 * (log(wk/wf))^2) +\nI(log(wl/wf) * log(wk/wf)) + I(log(y) * log(wl/wf)) + I(log(y) * log(wk/wf)),\nudist = 'exponential', uhet = ~ regu, data = utility, S = -1, method = 'bfgs')\n\ntexreg::screenreg(list(hlf, trnorm, tscal, expo))\n\n\n\n"} {"package":"sfaR","topic":"fitted","snippet":"### Name: fitted\n### Title: Extract fitted values of stochastic frontier models\n### Aliases: fitted fitted.sfacross fitted.sfalcmcross\n### fitted.sfaselectioncross\n### Keywords: fitted methods\n\n### ** Examples\n\n\n## Not run: \n##D ## Using data on eighty-two countries production (GDP)\n##D # LCM Cobb Douglas (production function) half normal distribution\n##D cb_2c_h <- sfalcmcross(formula = ly ~ lk + ll + yr, udist = 'hnormal', \n##D data = worldprod)\n##D fit.cb_2c_h <- fitted(cb_2c_h)\n##D head(fit.cb_2c_h)\n## End(Not run)\n\n\n\n"} {"package":"sfaR","topic":"ic","snippet":"### Name: ic\n### Title: Extract information criteria of stochastic frontier models\n### Aliases: ic ic.sfacross ic.sfalcmcross ic.sfaselectioncross\n### Keywords: AIC BIC HQIC methods\n\n### ** Examples\n\n\n## Not run: \n##D ## Using data on Swiss railway\n##D # LCM (cost function) half normal distribution\n##D cb_2c_u <- sfalcmcross(formula = LNCT ~ LNQ2 + LNQ3 + LNNET + LNPK + LNPL,\n##D udist = 'hnormal', uhet = ~ 1, data = swissrailways, S = -1, method='ucminf')\n##D ic(cb_2c_u)\n##D ic(cb_2c_u, IC = 'BIC')\n##D ic(cb_2c_u, IC = 'HQIC')\n## End(Not run)\n\n\n\n"} {"package":"sfaR","topic":"logLik","snippet":"### Name: logLik\n### Title: Extract log-likelihood value of stochastic frontier models\n### Aliases: logLik logLik.sfacross logLik.sfalcmcross\n### logLik.sfaselectioncross\n### Keywords: likelihood methods\n\n### ** Examples\n\n\n## Not run: \n##D ## Using data on fossil fuel fired steam electric power generation plants in the U.S.\n##D # Translog SFA (cost function) truncated normal with scaling property\n##D tl_u_ts <- sfacross(formula = log(tc/wf) ~ log(y) + I(1/2 * (log(y))^2) +\n##D log(wl/wf) + log(wk/wf) + I(1/2 * (log(wl/wf))^2) + I(1/2 * (log(wk/wf))^2) +\n##D I(log(wl/wf) * log(wk/wf)) + I(log(y) * log(wl/wf)) + I(log(y) * log(wk/wf)),\n##D udist = 'tnormal', muhet = ~ regu, uhet = ~ regu, data = utility, S = -1,\n##D scaling = TRUE, method = 'mla')\n##D logLik(tl_u_ts)\n##D \n##D ## Using data on eighty-two countries production (GDP)\n##D # LCM Cobb Douglas (production function) half normal distribution\n##D cb_2c_h <- sfalcmcross(formula = ly ~ lk + ll + yr, udist = 'hnormal', \n##D data = worldprod, S = 1)\n##D logLik(cb_2c_h, individual = TRUE)\n## End(Not run)\n\n\n\n"} {"package":"sfaR","topic":"marginal","snippet":"### Name: marginal\n### Title: Marginal effects of the inefficiency drivers in stochastic\n### frontier models\n### Aliases: marginal marginal.sfacross marginal.sfalcmcross\n### marginal.sfaselectioncross\n### Keywords: marginal methods\n\n### ** Examples\n\n\n## Not run: \n##D ## Using data on fossil fuel fired steam electric power generation plants in the U.S.\n##D # Translog SFA (cost function) truncated normal with scaling property\n##D tl_u_ts <- sfacross(formula = log(tc/wf) ~ log(y) + I(1/2 * (log(y))^2) +\n##D log(wl/wf) + log(wk/wf) + I(1/2 * (log(wl/wf))^2) + I(1/2 * (log(wk/wf))^2) +\n##D I(log(wl/wf) * log(wk/wf)) + I(log(y) * log(wl/wf)) + I(log(y) * log(wk/wf)),\n##D udist = 'tnormal', muhet = ~ regu + wl, uhet = ~ regu + wl, data = utility, \n##D S = -1, scaling = TRUE, method = 'mla')\n##D marg.tl_u_ts <- marginal(tl_u_ts)\n##D summary(marg.tl_u_ts)\n##D \n##D ## Using data on eighty-two countries production (GDP)\n##D # LCM Cobb Douglas (production function) half normal distribution\n##D cb_2c_h <- sfalcmcross(formula = ly ~ lk + ll + yr, udist = 'hnormal',\n##D data = worldprod, uhet = ~ initStat + h, S = 1, method = 'mla')\n##D marg.cb_2c_h <- marginal(cb_2c_h)\n##D summary(marg.cb_2c_h)\n##D \n## End(Not run)\n\n\n\n"} {"package":"sfaR","topic":"nobs","snippet":"### Name: nobs\n### Title: Extract total number of observations used in frontier models\n### Aliases: nobs nobs.sfacross nobs.sfalcmcross nobs.sfaselectioncross\n### Keywords: attribute\n\n### ** Examples\n\n\n## Not run: \n##D ## Using data on fossil fuel fired steam electric power generation plants in the U.S.\n##D # Translog (cost function) half normal with heteroscedasticity\n##D tl_u_h <- sfacross(formula = log(tc/wf) ~ log(y) + I(1/2 * (log(y))^2) +\n##D log(wl/wf) + log(wk/wf) + I(1/2 * (log(wl/wf))^2) + I(1/2 * (log(wk/wf))^2) +\n##D I(log(wl/wf) * log(wk/wf)) + I(log(y) * log(wl/wf)) + I(log(y) * log(wk/wf)),\n##D udist = 'hnormal', uhet = ~ regu, data = utility, S = -1, method = 'bfgs')\n##D nobs(tl_u_h)\n## End(Not run)\n\n\n\n"} {"package":"sfaR","topic":"residuals","snippet":"### Name: residuals\n### Title: Extract residuals of stochastic frontier models\n### Aliases: residuals residuals.sfacross residuals.sfalcmcross\n### residuals.sfaselectioncross\n### Keywords: methods residuals\n\n### ** Examples\n\n\n## Not run: \n##D ## Using data on fossil fuel fired steam electric power generation plants in the U.S.\n##D # Translog SFA (cost function) truncated normal with scaling property\n##D tl_u_ts <- sfacross(formula = log(tc/wf) ~ log(y) + I(1/2 * (log(y))^2) +\n##D log(wl/wf) + log(wk/wf) + I(1/2 * (log(wl/wf))^2) + I(1/2 * (log(wk/wf))^2) +\n##D I(log(wl/wf) * log(wk/wf)) + I(log(y) * log(wl/wf)) + I(log(y) * log(wk/wf)),\n##D udist = 'tnormal', muhet = ~ regu, uhet = ~ regu, data = utility, S = -1,\n##D scaling = TRUE, method = 'mla')\n##D resid.tl_u_ts <- residuals(tl_u_ts)\n##D head(resid.tl_u_ts)\n##D \n##D ## Using data on eighty-two countries production (GDP)\n##D # LCM Cobb Douglas (production function) half normal distribution\n##D cb_2c_h <- sfalcmcross(formula = ly ~ lk + ll + yr, udist = 'hnormal', \n##D data = worldprod, S = 1)\n##D resid.cb_2c_h <- residuals(cb_2c_h)\n##D head(resid.cb_2c_h)\n## End(Not run)\n\n\n\n"} {"package":"sfaR","topic":"ricephil","snippet":"### Name: ricephil\n### Title: Data on rice production in the Philippines\n### Aliases: ricephil\n### Keywords: datasets\n\n### ** Examples\n\n\nstr(ricephil)\nsummary(ricephil)\n\n\n"} {"package":"sfaR","topic":"sfacross","snippet":"### Name: sfacross\n### Title: Stochastic frontier estimation using cross-sectional data\n### Aliases: sfacross print.sfacross bread.sfacross estfun.sfacross\n### Keywords: cross-section likelihood models optimize\n\n### ** Examples\n\n\n## Using data on fossil fuel fired steam electric power generation plants in the U.S.\n# Translog (cost function) half normal with heteroscedasticity\ntl_u_h <- sfacross(formula = log(tc/wf) ~ log(y) + I(1/2 * (log(y))^2) +\nlog(wl/wf) + log(wk/wf) + I(1/2 * (log(wl/wf))^2) + I(1/2 * (log(wk/wf))^2) +\nI(log(wl/wf) * log(wk/wf)) + I(log(y) * log(wl/wf)) + I(log(y) * log(wk/wf)),\nudist = 'hnormal', uhet = ~ regu, data = utility, S = -1, method = 'bfgs')\nsummary(tl_u_h)\n\n# Translog (cost function) truncated normal with heteroscedasticity\ntl_u_t <- sfacross(formula = log(tc/wf) ~ log(y) + I(1/2 * (log(y))^2) +\nlog(wl/wf) + log(wk/wf) + I(1/2 * (log(wl/wf))^2) + I(1/2 * (log(wk/wf))^2) +\nI(log(wl/wf) * log(wk/wf)) + I(log(y) * log(wl/wf)) + I(log(y) * log(wk/wf)),\nudist = 'tnormal', muhet = ~ regu, data = utility, S = -1, method = 'bhhh')\nsummary(tl_u_t)\n\n# Translog (cost function) truncated normal with scaling property\ntl_u_ts <- sfacross(formula = log(tc/wf) ~ log(y) + I(1/2 * (log(y))^2) +\nlog(wl/wf) + log(wk/wf) + I(1/2 * (log(wl/wf))^2) + I(1/2 * (log(wk/wf))^2) +\nI(log(wl/wf) * log(wk/wf)) + I(log(y) * log(wl/wf)) + I(log(y) * log(wk/wf)),\nudist = 'tnormal', muhet = ~ regu, uhet = ~ regu, data = utility, S = -1,\nscaling = TRUE, method = 'mla')\nsummary(tl_u_ts)\n\n## Using data on Philippine rice producers\n# Cobb Douglas (production function) generalized exponential, and Weibull \n# distributions\n\ncb_p_ge <- sfacross(formula = log(PROD) ~ log(AREA) + log(LABOR) + log(NPK) +\nlog(OTHER), udist = 'genexponential', data = ricephil, S = 1, method = 'bfgs')\nsummary(cb_p_ge)\n\n## Using data on U.S. electric utility industry\n# Cost frontier Gamma distribution\ntl_u_g <- sfacross(formula = log(cost/fprice) ~ log(output) + I(log(output)^2) +\nI(log(lprice/fprice)) + I(log(cprice/fprice)), udist = 'gamma', uhet = ~ 1,\ndata = electricity, S = -1, method = 'bfgs', simType = 'halton', Nsim = 200,\nhessianType = 2)\nsummary(tl_u_g)\n\n\n\n"} {"package":"sfaR","topic":"sfalcmcross","snippet":"### Name: sfalcmcross\n### Title: Latent class stochastic frontier using cross-sectional data\n### Aliases: sfalcmcross print.sfalcmcross bread.sfalcmcross\n### estfun.sfalcmcross\n### Keywords: cross-section latent-class likelihood models optimize\n\n### ** Examples\n\n\n## Using data on eighty-two countries production (GDP)\n# LCM Cobb Douglas (production function) half normal distribution\n# Intercept and initStat used as separating variables\ncb_2c_h1 <- sfalcmcross(formula = ly ~ lk + ll + yr, thet = ~initStat, \ndata = worldprod)\nsummary(cb_2c_h1)\n\n# summary of the initial ML model\nsummary(cb_2c_h1$InitHalf)\n\n# Only the intercept is used as the separating variable\n# and only variable initStat is used as inefficiency driver\ncb_2c_h3 <- sfalcmcross(formula = ly ~ lk + ll + yr, uhet = ~initStat, \ndata = worldprod)\nsummary(cb_2c_h3)\n\n\n\n"} {"package":"sfaR","topic":"sfaselectioncross","snippet":"### Name: sfaselectioncross\n### Title: Sample selection in stochastic frontier estimation using\n### cross-section data\n### Aliases: sfaselectioncross print.sfaselectioncross\n### bread.sfaselectioncross estfun.sfaselectioncross\n### Keywords: cross-section likelihood models optimize\n\n### ** Examples\n\n\n## Not run: \n##D \n##D ## Simulated example\n##D \n##D N <- 2000 # sample size\n##D set.seed(12345)\n##D z1 <- rnorm(N)\n##D z2 <- rnorm(N)\n##D v1 <- rnorm(N)\n##D v2 <- rnorm(N)\n##D e1 <- v1\n##D e2 <- 0.7071 * (v1 + v2)\n##D ds <- z1 + z2 + e1\n##D d <- ifelse(ds > 0, 1, 0)\n##D u <- abs(rnorm(N))\n##D x1 <- rnorm(N)\n##D x2 <- rnorm(N)\n##D y <- x1 + x2 + e2 - u\n##D data <- cbind(y = y, x1 = x1, x2 = x2, z1 = z1, z2 = z2, d = d)\n##D \n##D ## Estimation using quadrature (Gauss-Kronrod)\n##D \n##D selecRes1 <- sfaselectioncross(selectionF = d ~ z1 + z2, frontierF = y ~ x1 + x2, \n##D modelType = 'greene10', method = 'bfgs',\n##D logDepVar = TRUE, data = as.data.frame(data),\n##D S = 1L, udist = 'hnormal', lType = 'kronrod', Nsub = 100, uBound = Inf,\n##D simType = 'halton', Nsim = 300, prime = 2L, burn = 10, antithetics = FALSE,\n##D seed = 12345, itermax = 2000, printInfo = FALSE)\n##D \n##D summary(selecRes1)\n##D \n##D ## Estimation using maximum simulated likelihood\n##D \n##D selecRes2 <- sfaselectioncross(selectionF = d ~ z1 + z2, frontierF = y ~ x1 + x2, \n##D modelType = 'greene10', method = 'bfgs',\n##D logDepVar = TRUE, data = as.data.frame(data),\n##D S = 1L, udist = 'hnormal', lType = 'msl', Nsub = 100, uBound = Inf,\n##D simType = 'halton', Nsim = 300, prime = 2L, burn = 10, antithetics = FALSE,\n##D seed = 12345, itermax = 2000, printInfo = FALSE)\n##D \n##D summary(selecRes2)\n##D \n## End(Not run)\n\n\n\n"} {"package":"sfaR","topic":"skewnessTest","snippet":"### Name: skewnessTest\n### Title: Skewness test for stochastic frontier models\n### Aliases: skewnessTest\n### Keywords: methods\n\n### ** Examples\n\n\n## Not run: \n##D ## Using data on fossil fuel fired steam electric power generation plants in the U.S.\n##D # Translog SFA (cost function) truncated normal with scaling property\n##D tl_u_ts <- sfacross(formula = log(tc/wf) ~ log(y) + I(1/2 * (log(y))^2) +\n##D log(wl/wf) + log(wk/wf) + I(1/2 * (log(wl/wf))^2) + I(1/2 * (log(wk/wf))^2) +\n##D I(log(wl/wf) * log(wk/wf)) + I(log(y) * log(wl/wf)) + I(log(y) * log(wk/wf)),\n##D udist = 'tnormal', muhet = ~ regu, uhet = ~ regu, data = utility, S = -1,\n##D scaling = TRUE, method = 'mla')\n##D skewnessTest(tl_u_ts)\n##D skewnessTest(tl_u_ts, test = 'coelli')\n## End(Not run)\n\n\n\n"} {"package":"sfaR","topic":"summary","snippet":"### Name: summary\n### Title: Summary of results for stochastic frontier models\n### Aliases: summary summary.sfacross print.summary.sfacross\n### summary.sfalcmcross print.summary.sfalcmcross\n### summary.sfaselectioncross print.summary.sfaselectioncross\n### Keywords: methods summary\n\n### ** Examples\n\n\n## Using data on fossil fuel fired steam electric power generation plants in the U.S.\n# Translog SFA (cost function) truncated normal with scaling property\ntl_u_ts <- sfacross(formula = log(tc/wf) ~ log(y) + I(1/2 * (log(y))^2) +\nlog(wl/wf) + log(wk/wf) + I(1/2 * (log(wl/wf))^2) + I(1/2 * (log(wk/wf))^2) +\nI(log(wl/wf) * log(wk/wf)) + I(log(y) * log(wl/wf)) + I(log(y) * log(wk/wf)),\nudist = 'tnormal', muhet = ~ regu, uhet = ~ regu, data = utility, S = -1,\nscaling = TRUE, method = 'mla')\nsummary(tl_u_ts, grad = TRUE, ci = TRUE)\n\n\n\n"} {"package":"sfaR","topic":"swissrailways","snippet":"### Name: swissrailways\n### Title: Data on Swiss railway companies\n### Aliases: swissrailways\n### Keywords: datasets\n\n### ** Examples\n\n\nstr(swissrailways)\n\n\n"} {"package":"sfaR","topic":"utility","snippet":"### Name: utility\n### Title: Data on U.S. electricity generating plants\n### Aliases: utility\n### Keywords: datasets\n\n### ** Examples\n\n\nstr(utility)\nsummary(utility)\n\n\n"} {"package":"sfaR","topic":"vcov","snippet":"### Name: vcov\n### Title: Compute variance-covariance matrix of stochastic frontier models\n### Aliases: vcov vcov.sfacross vcov.sfalcmcross vcov.sfaselectioncross\n### Keywords: methods vcov\n\n### ** Examples\n\n\n## Using data on Spanish dairy farms\n# Cobb Douglas (production function) half normal distribution\ncb_s_h <- sfacross(formula = YIT ~ X1 + X2 + X3 + X4, udist = 'hnormal',\ndata = dairyspain, S = 1, method = 'bfgs')\nvcov(cb_s_h)\nvcov(cb_s_h, extraPar = TRUE)\n \n# Other variance-covariance matrices can be obtained using the sandwich package\n \n# Robust variance-covariance matrix\n \nrequireNamespace('sandwich', quietly = TRUE)\n \nsandwich::vcovCL(cb_s_h)\n \n# Coefficients and standard errors can be obtained using lmtest package\n \nrequireNamespace('lmtest', quietly = TRUE)\n \nlmtest::coeftest(cb_s_h, vcov. = sandwich::vcovCL)\n \n# Clustered standard errors\n \nlmtest::coeftest(cb_s_h, vcov. = sandwich::vcovCL, cluster = ~ FARM)\n \n# Doubly clustered standard errors\n \nlmtest::coeftest(cb_s_h, vcov. = sandwich::vcovCL, cluster = ~ FARM + YEAR)\n \n# BHHH standard errors\n \nlmtest::coeftest(cb_s_h, vcov. = sandwich::vcovOPG)\n \n# Adjusted BHHH standard errors\n \nlmtest::coeftest(cb_s_h, vcov. = sandwich::vcovOPG, adjust = TRUE)\n\n## Using data on eighty-two countries production (GDP)\n# LCM Cobb Douglas (production function) half normal distribution\ncb_2c_h <- sfalcmcross(formula = ly ~ lk + ll + yr, udist = 'hnormal',\ndata = worldprod, uhet = ~ initStat, S = 1)\nvcov(cb_2c_h)\n\n\n\n"} {"package":"sfaR","topic":"worldprod","snippet":"### Name: worldprod\n### Title: Data on world production\n### Aliases: worldprod\n### Keywords: datasets\n\n### ** Examples\n\n\nstr(worldprod)\nsummary(worldprod)\n\n\n"} {"package":"survminer","topic":"BMT","snippet":"### Name: BMT\n### Title: Bone Marrow Transplant\n### Aliases: BMT\n\n### ** Examples\n\ndata(BMT)\nif(require(\"cmprsk\")){\n\n# Data preparaion\n#+++++++++++++++++++++\n# Label diseases\nBMT$dis <- factor(BMT$dis, levels = c(0,1),\n labels = c(\"ALL\", \"AML\"))\n# Label status\nBMT$status <- factor(BMT$status, levels = c(0,1,2),\n labels = c(\"Censored\",\"Mortality\",\"Relapse\"))\n\n# Cumulative Incidence Function\n# ++++++++++++++++++++++++++\nfit <- cmprsk::cuminc(\n ftime = BMT$ftime, # Failure time variable\n fstatus = BMT$status, # Codes for different causes of failure\n group = BMT$dis # Estimates will calculated within groups\n )\n\n# Visualize\n# +++++++++++++++++++++++\nggcompetingrisks(fit)\nggcompetingrisks(fit, multiple_panels = FALSE,\n legend = \"right\")\n\n}\n\n\n\n"} {"package":"survminer","topic":"BRCAOV.survInfo","snippet":"### Name: BRCAOV.survInfo\n### Title: Breast and Ovarian Cancers Survival Information\n### Aliases: BRCAOV.survInfo\n\n### ** Examples\n\ndata(BRCAOV.survInfo)\nlibrary(survival)\nfit <- survfit(Surv(times, patient.vital_status) ~ admin.disease_code,\n data = BRCAOV.survInfo)\nggsurvplot(fit, data = BRCAOV.survInfo, risk.table = TRUE)\n\n\n"} {"package":"survminer","topic":"add_ggsurvplot","snippet":"### Name: add_ggsurvplot\n### Title: Add Components to a ggsurvplot\n### Aliases: add_ggsurvplot +.ggsurv %++%\n\n### ** Examples\n\n# Fit survival curves\nrequire(\"survival\")\nfit<- survfit(Surv(time, status) ~ sex, data = lung)\n\n# Basic survival curves\np <- ggsurvplot(fit, data = lung, risk.table = TRUE,\n main = \"Survival curve\",\n submain = \"Based on Kaplan-Meier estimates\",\n caption = \"created with survminer\"\n )\np\n\n# Customizing the plots\np + theme_survminer(\n font.main = c(16, \"bold\", \"darkblue\"),\n font.submain = c(15, \"bold.italic\", \"purple\"),\n font.caption = c(14, \"plain\", \"orange\"),\n font.x = c(14, \"bold.italic\", \"red\"),\n font.y = c(14, \"bold.italic\", \"darkred\"),\n font.tickslab = c(12, \"plain\", \"darkgreen\")\n)\n\n\n"} {"package":"survminer","topic":"arrange_ggsurvplots","snippet":"### Name: arrange_ggsurvplots\n### Title: Arranging Multiple ggsurvplots\n### Aliases: arrange_ggsurvplots\n\n### ** Examples\n\n\n# Fit survival curves\nrequire(\"survival\")\nfit<- survfit(Surv(time, status) ~ sex, data = lung)\n\n# List of ggsurvplots\nrequire(\"survminer\")\nsplots <- list()\nsplots[[1]] <- ggsurvplot(fit, data = lung, risk.table = TRUE, ggtheme = theme_minimal())\nsplots[[2]] <- ggsurvplot(fit, data = lung, risk.table = TRUE, ggtheme = theme_grey())\n\n# Arrange multiple ggsurvplots and print the output\narrange_ggsurvplots(splots, print = TRUE,\n ncol = 2, nrow = 1, risk.table.height = 0.4)\n\n## Not run: \n##D # Arrange and save into pdf file\n##D res <- arrange_ggsurvplots(splots, print = FALSE)\n##D ggsave(\"myfile.pdf\", res)\n## End(Not run)\n\n\n\n\n"} {"package":"survminer","topic":"ggadjustedcurves","snippet":"### Name: ggadjustedcurves\n### Title: Adjusted Survival Curves for Cox Proportional Hazards Model\n### Aliases: ggadjustedcurves surv_adjustedcurves\n\n### ** Examples\n\n\nlibrary(survival)\nfit2 <- coxph( Surv(stop, event) ~ size, data = bladder )\n# single curve\nggadjustedcurves(fit2, data = bladder)\ncurve <- surv_adjustedcurves(fit2, data = bladder)\n\nfit2 <- coxph( Surv(stop, event) ~ size + strata(rx), data = bladder )\n# average in groups\nggadjustedcurves(fit2, data = bladder, method = \"average\", variable = \"rx\")\ncurve <- surv_adjustedcurves(fit2, data = bladder, method = \"average\", variable = \"rx\")\n\n# conditional balancing in groups\nggadjustedcurves(fit2, data = bladder, method = \"marginal\", variable = \"rx\")\ncurve <- surv_adjustedcurves(fit2, data = bladder, method = \"marginal\", variable = \"rx\")\n\n# selected reference population\nggadjustedcurves(fit2, data = bladder, method = \"marginal\", variable = \"rx\",\n reference = bladder[bladder$rx == \"1\",])\n\n# conditional balancing in groups\nggadjustedcurves(fit2, data = bladder, method = \"conditional\", variable = \"rx\")\ncurve <- surv_adjustedcurves(fit2, data = bladder, method = \"conditional\", variable = \"rx\")\n\n## Not run: \n##D # this will take some time\n##D fdata <- flchain[flchain$futime >=7,]\n##D fdata$age2 <- cut(fdata$age, c(0,54, 59,64, 69,74,79, 89, 110),\n##D labels = c(paste(c(50,55,60,65,70,75,80),\n##D c(54,59,64,69,74,79,89), sep='-'), \"90+\"))\n##D fdata$group <- factor(1+ 1*(fdata$flc.grp >7) + 1*(fdata$flc.grp >9),\n##D levels=1:3,\n##D labels=c(\"FLC < 3.38\", \"3.38 - 4.71\", \"FLC > 4.71\"))\n##D # single curve\n##D fit <- coxph( Surv(futime, death) ~ age*sex, data = fdata)\n##D ggadjustedcurves(fit, data = fdata, method = \"single\")\n##D \n##D # average in groups\n##D fit <- coxph( Surv(futime, death) ~ age*sex + strata(group), data = fdata)\n##D ggadjustedcurves(fit, data = fdata, method = \"average\")\n##D \n##D # conditional balancing in groups\n##D ggadjustedcurves(fit, data = fdata, method = \"conditional\")\n##D \n##D # marginal balancing in groups\n##D ggadjustedcurves(fit, data = fdata, method = \"marginal\", reference = fdata)\n## End(Not run)\n\n\n\n"} {"package":"survminer","topic":"ggcompetingrisks","snippet":"### Name: ggcompetingrisks\n### Title: Cumulative Incidence Curves for Competing Risks\n### Aliases: ggcompetingrisks\n\n### ** Examples\n\n## Not run: \n##D if(require(\"cmprsk\")){\n##D \n##D set.seed(2)\n##D ss <- rexp(100)\n##D gg <- factor(sample(1:3,100,replace=TRUE),1:3,c('BRCA','LUNG','OV'))\n##D cc <- factor(sample(0:2,100,replace=TRUE),0:2,c('no event', 'death', 'progression'))\n##D strt <- sample(1:2,100,replace=TRUE)\n##D \n##D # handles cuminc objects\n##D print(fit <- cmprsk::cuminc(ss,cc,gg,strt))\n##D ggcompetingrisks(fit)\n##D ggcompetingrisks(fit, multiple_panels = FALSE)\n##D ggcompetingrisks(fit, conf.int = TRUE)\n##D ggcompetingrisks(fit, multiple_panels = FALSE, conf.int = TRUE)\n##D \n##D # handles survfitms objects\n##D library(survival)\n##D df <- data.frame(time = ss, group = gg, status = cc, strt)\n##D fit2 <- survfit(Surv(time, status, type=\"mstate\") ~ 1, data=df)\n##D ggcompetingrisks(fit2)\n##D fit3 <- survfit(Surv(time, status, type=\"mstate\") ~ group, data=df)\n##D ggcompetingrisks(fit3)\n##D }\n##D \n##D library(ggsci)\n##D library(cowplot)\n##D ggcompetingrisks(fit3) + theme_cowplot() + scale_fill_jco()\n## End(Not run)\n\n\n"} {"package":"survminer","topic":"ggcoxdiagnostics","snippet":"### Name: ggcoxdiagnostics\n### Title: Diagnostic Plots for Cox Proportional Hazards Model with ggplot2\n### Aliases: ggcoxdiagnostics\n\n### ** Examples\n\n\nlibrary(survival)\ncoxph.fit2 <- coxph(Surv(futime, fustat) ~ age + ecog.ps, data=ovarian)\nggcoxdiagnostics(coxph.fit2, type = \"deviance\")\n\nggcoxdiagnostics(coxph.fit2, type = \"schoenfeld\", title = \"Diagnostic plot\")\nggcoxdiagnostics(coxph.fit2, type = \"deviance\", ox.scale = \"time\")\nggcoxdiagnostics(coxph.fit2, type = \"schoenfeld\", ox.scale = \"time\",\n title = \"Diagnostic plot\", subtitle = \"Data comes from survey XYZ\",\n font.subtitle = 9)\nggcoxdiagnostics(coxph.fit2, type = \"deviance\", ox.scale = \"linear.predictions\",\n caption = \"Code is available here - link\", font.caption = 10)\nggcoxdiagnostics(coxph.fit2, type = \"schoenfeld\", ox.scale = \"observation.id\")\nggcoxdiagnostics(coxph.fit2, type = \"scaledsch\", ox.scale = \"time\")\n\n\n\n"} {"package":"survminer","topic":"ggcoxfunctional","snippet":"### Name: ggcoxfunctional\n### Title: Functional Form of Continuous Variable in Cox Proportional\n### Hazards Model\n### Aliases: ggcoxfunctional print.ggcoxfunctional\n\n### ** Examples\n\n\nlibrary(survival)\ndata(mgus)\nres.cox <- coxph(Surv(futime, death) ~ mspike + log(mspike) + I(mspike^2) +\n age + I(log(age)^2) + I(sqrt(age)), data = mgus)\nggcoxfunctional(res.cox, data = mgus, point.col = \"blue\", point.alpha = 0.5)\nggcoxfunctional(res.cox, data = mgus, point.col = \"blue\", point.alpha = 0.5,\n title = \"Pass the title\", caption = \"Pass the caption\")\n\n\n\n\n"} {"package":"survminer","topic":"ggcoxzph","snippet":"### Name: ggcoxzph\n### Title: Graphical Test of Proportional Hazards with ggplot2\n### Aliases: ggcoxzph print.ggcoxzph\n\n### ** Examples\n\n\nlibrary(survival)\nfit <- coxph(Surv(futime, fustat) ~ age + ecog.ps + rx, data=ovarian)\ncox.zph.fit <- cox.zph(fit)\n# plot all variables\nggcoxzph(cox.zph.fit)\n# plot all variables in specified order\nggcoxzph(cox.zph.fit, var = c(\"ecog.ps\", \"rx\", \"age\"), font.main = 12)\n# plot specified variables in specified order\nggcoxzph(cox.zph.fit, var = c(\"ecog.ps\", \"rx\"), font.main = 12, caption = \"Caption goes here\")\n\n\n\n"} {"package":"survminer","topic":"ggflexsurvplot","snippet":"### Name: ggflexsurvplot\n### Title: Ggplots of Fitted Flexible Survival Models\n### Aliases: ggflexsurvplot\n\n### ** Examples\n\n## No test: \nif(require(\"flexsurv\")) {\nfit <- flexsurvreg(Surv(rectime, censrec) ~ group,\n dist = \"gengamma\", data = bc)\nggflexsurvplot(fit)\n}\n## End(No test)\n\n\n\n"} {"package":"survminer","topic":"ggforest","snippet":"### Name: ggforest\n### Title: Forest Plot for Cox Proportional Hazards Model\n### Aliases: ggforest\n\n### ** Examples\n\nrequire(\"survival\")\nmodel <- coxph( Surv(time, status) ~ sex + rx + adhere,\n data = colon )\nggforest(model)\n\ncolon <- within(colon, {\n sex <- factor(sex, labels = c(\"female\", \"male\"))\n differ <- factor(differ, labels = c(\"well\", \"moderate\", \"poor\"))\n extent <- factor(extent, labels = c(\"submuc.\", \"muscle\", \"serosa\", \"contig.\"))\n})\nbigmodel <-\n coxph(Surv(time, status) ~ sex + rx + adhere + differ + extent + node4,\n data = colon )\nggforest(bigmodel)\n\n\n\n"} {"package":"survminer","topic":"ggsurvevents","snippet":"### Name: ggsurvevents\n### Title: Distribution of Events' Times\n### Aliases: ggsurvevents\n\n### ** Examples\n\nrequire(\"survival\")\n# from Surv\nsurv <- Surv(lung$time, lung$status)\nggsurvevents(surv)\n\nsurv2 <- Surv(colon$time, colon$status)\nggsurvevents(surv2)\nggsurvevents(surv2, normalized = TRUE)\n\n# from survfit\nfit <- survfit(Surv(time, status) ~ sex, data = lung)\nggsurvevents(fit = fit, data = lung)\n\n# from coxph\nmodel <- coxph( Surv(time, status) ~ sex + rx + adhere, data = colon )\nggsurvevents(fit = model, data = colon)\nggsurvevents(surv2, normalized = TRUE, type = \"radius\")\nggsurvevents(surv2, normalized = TRUE, type = \"fraction\")\n\n\n\n"} {"package":"survminer","topic":"ggsurvplot","snippet":"### Name: ggsurvplot\n### Title: Drawing Survival Curves Using ggplot2\n### Aliases: ggsurvplot print.ggsurvplot\n\n### ** Examples\n\n\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n# Example 1: Survival curves with two groups\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n# Fit survival curves\n#++++++++++++++++++++++++++++++++++++\nrequire(\"survival\")\nfit<- survfit(Surv(time, status) ~ sex, data = lung)\n\n# Basic survival curves\nggsurvplot(fit, data = lung)\n\n# Customized survival curves\nggsurvplot(fit, data = lung,\n surv.median.line = \"hv\", # Add medians survival\n\n # Change legends: title & labels\n legend.title = \"Sex\",\n legend.labs = c(\"Male\", \"Female\"),\n # Add p-value and tervals\n pval = TRUE,\n\n conf.int = TRUE,\n # Add risk table\n risk.table = TRUE,\n tables.height = 0.2,\n tables.theme = theme_cleantable(),\n\n # Color palettes. Use custom color: c(\"#E7B800\", \"#2E9FDF\"),\n # or brewer color (e.g.: \"Dark2\"), or ggsci color (e.g.: \"jco\")\n palette = c(\"#E7B800\", \"#2E9FDF\"),\n ggtheme = theme_bw() # Change ggplot2 theme\n)\n\n# Change font size, style and color\n#++++++++++++++++++++++++++++++++++++\n## Not run: \n##D # Change font size, style and color at the same time\n##D ggsurvplot(fit, data = lung, main = \"Survival curve\",\n##D font.main = c(16, \"bold\", \"darkblue\"),\n##D font.x = c(14, \"bold.italic\", \"red\"),\n##D font.y = c(14, \"bold.italic\", \"darkred\"),\n##D font.tickslab = c(12, \"plain\", \"darkgreen\"))\n## End(Not run)\n\n\n\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n# Example 2: Facet ggsurvplot() output by\n# a combination of factors\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n\n# Fit (complexe) survival curves\n#++++++++++++++++++++++++++++++++++++\n## Not run: \n##D require(\"survival\")\n##D fit3 <- survfit( Surv(time, status) ~ sex + rx + adhere,\n##D data = colon )\n##D \n##D # Visualize\n##D #++++++++++++++++++++++++++++++++++++\n##D ggsurv <- ggsurvplot(fit3, data = colon,\n##D fun = \"cumhaz\", conf.int = TRUE,\n##D risk.table = TRUE, risk.table.col=\"strata\",\n##D ggtheme = theme_bw())\n##D \n##D # Faceting survival curves\n##D curv_facet <- ggsurv$plot + facet_grid(rx ~ adhere)\n##D curv_facet\n##D \n##D # Faceting risk tables:\n##D # Generate risk table for each facet plot item\n##D ggsurv$table + facet_grid(rx ~ adhere, scales = \"free\")+\n##D theme(legend.position = \"none\")\n##D \n##D # Generate risk table for each facet columns\n##D tbl_facet <- ggsurv$table + facet_grid(.~ adhere, scales = \"free\")\n##D tbl_facet + theme(legend.position = \"none\")\n##D \n##D # Arrange faceted survival curves and risk tables\n##D g2 <- ggplotGrob(curv_facet)\n##D g3 <- ggplotGrob(tbl_facet)\n##D min_ncol <- min(ncol(g2), ncol(g3))\n##D g <- gridExtra::gtable_rbind(g2[, 1:min_ncol], g3[, 1:min_ncol], size=\"last\")\n##D g$widths <- grid::unit.pmax(g2$widths, g3$widths)\n##D grid::grid.newpage()\n##D grid::grid.draw(g)\n##D \n## End(Not run)\n\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n# Example 3: CUSTOMIZED PVALUE\n#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n# Customized p-value\nggsurvplot(fit, data = lung, pval = TRUE)\nggsurvplot(fit, data = lung, pval = 0.03)\nggsurvplot(fit, data = lung, pval = \"The hot p-value is: 0.031\")\n\n\n\n"} {"package":"survminer","topic":"ggsurvplot_add_all","snippet":"### Name: ggsurvplot_add_all\n### Title: Add Survival Curves of Pooled Patients onto the Main Plot\n### Aliases: ggsurvplot_add_all\n\n### ** Examples\n\nlibrary(survival)\n\n# Fit survival curves\nfit <- surv_fit(Surv(time, status) ~ sex, data = lung)\n\n# Visualize survival curves\nggsurvplot(fit, data = lung,\n risk.table = TRUE, pval = TRUE,\n surv.median.line = \"hv\", palette = \"jco\")\n\n# Add survival curves of pooled patients (Null model)\n# Use add.all = TRUE option\nggsurvplot(fit, data = lung,\n risk.table = TRUE, pval = TRUE,\n surv.median.line = \"hv\", palette = \"jco\",\n add.all = TRUE)\n\n\n\n"} {"package":"survminer","topic":"ggsurvplot_combine","snippet":"### Name: ggsurvplot_combine\n### Title: Combine a List of Survfit Objects on the Same Plot\n### Aliases: ggsurvplot_combine\n\n### ** Examples\n\nlibrary(survival)\n# Create a demo data set\n#::::::::::::::::::::::::::::::::::::::::::::::::::::::::\n set.seed(123)\n demo.data <- data.frame(\n os.time = colon$time,\n os.status = colon$status,\n pfs.time = sample(colon$time),\n pfs.status = colon$status,\n sex = colon$sex, rx = colon$rx, adhere = colon$adhere\n )\n\n# Ex1: Combine null models\n#::::::::::::::::::::::::::::::::::::::::::::::::::::::::\n # Fit\n pfs <- survfit( Surv(pfs.time, pfs.status) ~ 1, data = demo.data)\n os <- survfit( Surv(os.time, os.status) ~ 1, data = demo.data)\n # Combine on the same plot\n fit <- list(PFS = pfs, OS = os)\n ggsurvplot_combine(fit, demo.data)\n\n# Combine survival curves stratified by treatment assignment rx\n#::::::::::::::::::::::::::::::::::::::::::::::::::::::::\n# Fit\npfs <- survfit( Surv(pfs.time, pfs.status) ~ rx, data = demo.data)\nos <- survfit( Surv(os.time, os.status) ~ rx, data = demo.data)\n# Combine on the same plot\nfit <- list(PFS = pfs, OS = os)\nggsurvplot_combine(fit, demo.data)\n\n\n\n"} {"package":"survminer","topic":"ggsurvplot_df","snippet":"### Name: ggsurvplot_df\n### Title: Plot Survival Curves from Survival Summary Data Frame\n### Aliases: ggsurvplot_df\n\n### ** Examples\n\nlibrary(survival)\n\n# Fit survival curves\n#::::::::::::::::::::::::::::::::::::::::::::::::::::::::\nfit1 <- survfit( Surv(time, status) ~ 1, data = colon)\nfit2 <- survfit( Surv(time, status) ~ adhere, data = colon)\n\n# Summary\n#::::::::::::::::::::::::::::::::::::::::::::::::::::::::\nhead(surv_summary(fit1, colon))\n\nhead(surv_summary(fit2, colon))\n\n# Visualize\n#::::::::::::::::::::::::::::::::::::::::::::::::::::::::\nggsurvplot_df(surv_summary(fit1, colon))\n\nggsurvplot_df(surv_summary(fit2, colon), conf.int = TRUE,\n legend.title = \"Adhere\", legend.labs = c(\"0\", \"1\"))\n\n# Kaplan-Meier estimate\n#::::::::::::::::::::::::::::::::::::::::::::::::::::::::\nout_km <- survfit(Surv(time, status) ~ 1, data = lung)\n\n# Weibull model\n#::::::::::::::::::::::::::::::::::::::::::::::::::::::::\nwb <- survreg(Surv(time, status) ~ 1, data = lung)\ns <- seq(.01, .99, by = .01)\nt <- predict(wb, type = \"quantile\", p = s, newdata = lung[1, ])\nout_wb <- data.frame(time = t, surv = 1 - s, upper = NA, lower = NA, std.err = NA)\n\n# plot both\n#::::::::::::::::::::::::::::::::::::::::::::::::::::::::\np_km <- ggsurvplot(out_km, conf.int = FALSE)\np_wb <- ggsurvplot(out_wb, conf.int = FALSE, surv.geom = geom_line)\n\np_km\np_wb\np_km$plot + geom_line(data = out_wb, aes(x = time, y = surv))\n\n\n\n"} {"package":"survminer","topic":"ggsurvplot_facet","snippet":"### Name: ggsurvplot_facet\n### Title: Facet Survival Curves into Multiple Panels\n### Aliases: ggsurvplot_facet\n\n### ** Examples\n\nlibrary(survival)\n\n# Facet by one grouping variables: rx\n#::::::::::::::::::::::::::::::::::::::::::::::::::::::::\nfit <- survfit( Surv(time, status) ~ sex, data = colon )\nggsurvplot_facet(fit, colon, facet.by = \"rx\",\n palette = \"jco\", pval = TRUE)\n\n# Facet by two grouping variables: rx and adhere\n#::::::::::::::::::::::::::::::::::::::::::::::::::::::::\nggsurvplot_facet(fit, colon, facet.by = c(\"rx\", \"adhere\"),\n palette = \"jco\", pval = TRUE)\n\n\n# Another fit\n#::::::::::::::::::::::::::::::::::::::::::::::::::::::::\nfit2 <- survfit( Surv(time, status) ~ sex + rx, data = colon )\nggsurvplot_facet(fit2, colon, facet.by = \"adhere\",\n palette = \"jco\", pval = TRUE)\n\n\n\n"} {"package":"survminer","topic":"ggsurvplot_group_by","snippet":"### Name: ggsurvplot_group_by\n### Title: Survival Curves of Grouped Data sets\n### Aliases: ggsurvplot_group_by\n\n### ** Examples\n\n# Fit survival curves\n#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\nlibrary(survival)\nfit <- survfit( Surv(time, status) ~ sex, data = colon )\n\n# Visualize: grouped by treatment rx\n#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\nggsurv.list <- ggsurvplot_group_by(fit, colon, group.by = \"rx\", risk.table = TRUE,\n pval = TRUE, conf.int = TRUE, palette = \"jco\")\nnames(ggsurv.list)\n\n\n# Visualize: grouped by treatment rx and adhere\n#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\nggsurv.list <- ggsurvplot_group_by(fit, colon, group.by = c(\"rx\", \"adhere\"),\n risk.table = TRUE,\n pval = TRUE, conf.int = TRUE, palette = \"jco\")\n\nnames(ggsurv.list)\n\n\n"} {"package":"survminer","topic":"ggsurvplot_list","snippet":"### Name: ggsurvplot_list\n### Title: Plot a List of Survfit Objects\n### Aliases: ggsurvplot_list\n\n### ** Examples\n\n\nlibrary(survival)\n\n# Create a list of formulas\n#:::::::::::::::::::::::::::::::::::::::::::::::::::::::\ndata(colon)\nf1 <- survfit(Surv(time, status) ~ adhere, data = colon)\nf2 <- survfit(Surv(time, status) ~ rx, data = colon)\nfits <- list(sex = f1, rx = f2)\n\n# Visualize\n#:::::::::::::::::::::::::::::::::::::::::::::::::::::::\nlegend.title <- list(\"sex\", \"rx\")\nggsurvplot_list(fits, colon, legend.title = legend.title)\n\n\n\n"} {"package":"survminer","topic":"ggrisktable","snippet":"### Name: ggrisktable\n### Title: Plot Survival Tables\n### Aliases: ggrisktable ggcumevents ggcumcensor ggsurvtable\n\n### ** Examples\n\n# Fit survival curves\n#:::::::::::::::::::::::::::::::::::::::::::::::\nrequire(\"survival\")\nfit<- survfit(Surv(time, status) ~ sex, data = lung)\n\n# Survival tables\n#:::::::::::::::::::::::::::::::::::::::::::::::\ntables <- ggsurvtable(fit, data = lung, color = \"strata\",\n y.text = FALSE)\n\n# Risk table\ntables$risk.table\n\n# Number of cumulative events\ntables$cumevents\n\n# Number of cumulative censoring\ntables$cumcensor\n\n\n"} {"package":"survminer","topic":"theme_survminer","snippet":"### Name: theme_survminer\n### Title: Theme for Survminer Plots\n### Aliases: theme_survminer theme_cleantable\n\n### ** Examples\n\n\n# Fit survival curves\n#++++++++++++++++++++++++++++++++++++\nrequire(\"survival\")\nfit<- survfit(Surv(time, status) ~ sex, data = lung)\n\n# Basic survival curves\n#++++++++++++++++++++++++++++++++++++\nggsurv <- ggsurvplot(fit, data = lung, risk.table = TRUE,\n main = \"Survival curves\",\n submain = \"Based on Kaplan-Meier estimates\",\n caption = \"created with survminer\"\n )\n\n# Change font size, style and color\n#++++++++++++++++++++++++++++++++++++\n# Change font size, style and color at the same time\n# Use font.x = 14, to change only font size; or use\n# font.x = \"bold\", to change only font face.\nggsurv %+% theme_survminer(\n font.main = c(16, \"bold\", \"darkblue\"),\n font.submain = c(15, \"bold.italic\", \"purple\"),\n font.caption = c(14, \"plain\", \"orange\"),\n font.x = c(14, \"bold.italic\", \"red\"),\n font.y = c(14, \"bold.italic\", \"darkred\"),\n font.tickslab = c(12, \"plain\", \"darkgreen\")\n )\n\n# Clean risk table\n# +++++++++++++++++++++++++++++\nggsurv$table <- ggsurv$table + theme_cleantable()\nggsurv\n\n\n\n"} {"package":"survminer","topic":"myeloma","snippet":"### Name: myeloma\n### Title: Multiple Myeloma Data\n### Aliases: myeloma\n\n### ** Examples\n\ndata(myeloma)\nhead(myeloma)\n\n\n\n"} {"package":"survminer","topic":"pairwise_survdiff","snippet":"### Name: pairwise_survdiff\n### Title: Multiple Comparisons of Survival Curves\n### Aliases: pairwise_survdiff\n\n### ** Examples\n\n\nlibrary(survival)\nlibrary(survminer)\ndata(myeloma)\n\n# Pairwise survdiff\nres <- pairwise_survdiff(Surv(time, event) ~ molecular_group,\n data = myeloma)\nres\n\n# Symbolic number coding\nsymnum(res$p.value, cutpoints = c(0, 0.0001, 0.001, 0.01, 0.05, 0.1, 1),\n symbols = c(\"****\", \"***\", \"**\", \"*\", \"+\", \" \"),\n abbr.colnames = FALSE, na = \"\")\n\n\n\n\n"} {"package":"survminer","topic":"surv_cutpoint","snippet":"### Name: surv_cutpoint\n### Title: Determine the Optimal Cutpoint for Continuous Variables\n### Aliases: surv_cutpoint surv_categorize summary.surv_cutpoint\n### print.surv_cutpoint plot.surv_cutpoint print.plot_surv_cutpoint\n\n### ** Examples\n\n# 0. Load some data\ndata(myeloma)\nhead(myeloma)\n\n# 1. Determine the optimal cutpoint of variables\nres.cut <- surv_cutpoint(myeloma, time = \"time\", event = \"event\",\n variables = c(\"DEPDC1\", \"WHSC1\", \"CRIM1\"))\n\nsummary(res.cut)\n\n# 2. Plot cutpoint for DEPDC1\n# palette = \"npg\" (nature publishing group), see ?ggpubr::ggpar\nplot(res.cut, \"DEPDC1\", palette = \"npg\")\n\n# 3. Categorize variables\nres.cat <- surv_categorize(res.cut)\nhead(res.cat)\n\n# 4. Fit survival curves and visualize\nlibrary(\"survival\")\nfit <- survfit(Surv(time, event) ~DEPDC1, data = res.cat)\nggsurvplot(fit, data = res.cat, risk.table = TRUE, conf.int = TRUE)\n\n\n\n"} {"package":"survminer","topic":"surv_fit","snippet":"### Name: surv_fit\n### Title: Create Survival Curves\n### Aliases: surv_fit\n\n### ** Examples\n\n\nlibrary(\"survival\")\nlibrary(\"magrittr\")\n\n# Case 1: One formula and One data set\n#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\nfit <- surv_fit(Surv(time, status) ~ sex,\n data = colon)\nsurv_pvalue(fit)\n\n\n# Case 2: List of formulas and One data set.\n# - Different formulas are applied to the same data set\n# - Returns a (named) list of survfit objects\n#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\n# Create a named list of formulas\nformulas <- list(\n sex = Surv(time, status) ~ sex,\n rx = Surv(time, status) ~ rx\n)\n\n# Fit survival curves for each formula\nfit <- surv_fit(formulas, data = colon)\nsurv_pvalue(fit)\n\n# Case 3: One formula and List of data sets\n#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\nfit <- surv_fit(Surv(time, status) ~ sex,\n data = list(colon, lung))\nsurv_pvalue(fit)\n\n\n# Case 4: List of formulas and List of data sets\n# - Each formula is applied to each of the data in the data list\n# - argument: match.fd = FALSE\n#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\n\n# Create two data sets\nset.seed(123)\ncolon1 <- dplyr::sample_frac(colon, 1/2)\nset.seed(1234)\ncolon2 <- dplyr::sample_frac(colon, 1/2)\n\n# Create a named list of formulas\nformula.list <- list(\n sex = Surv(time, status) ~ sex,\n adhere = Surv(time, status) ~ adhere,\n rx = Surv(time, status) ~ rx\n)\n\n# Fit survival curves\nfit <- surv_fit(formula.list, data = list(colon1, colon2),\n match.fd = FALSE)\nsurv_pvalue(fit)\n\n\n# Grouped survfit\n#:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\n# - Group by the treatment \"rx\" and fit survival curves on each subset\n# - Returns a list of survfit objects\nfit <- surv_fit(Surv(time, status) ~ sex,\n data = colon, group.by = \"rx\")\n\n# Alternatively, do this\nfit <- colon %>%\n surv_group_by(\"rx\") %>%\n surv_fit(Surv(time, status) ~ sex, data = .)\n\nsurv_pvalue(fit)\n\n\n\n"} {"package":"survminer","topic":"surv_group_by","snippet":"### Name: surv_group_by\n### Title: Create a Grouped Dataset for Survival Analysis\n### Aliases: surv_group_by\n\n### ** Examples\n\nlibrary(\"survival\")\nlibrary(\"magrittr\")\n\n# Grouping by one variables: treatment \"rx\"\n#::::::::::::::::::::::::::::::::::::::::::\ngrouped.d <- colon %>%\n surv_group_by(\"rx\")\n\ngrouped.d # print\n\ngrouped.d$data # Access to the data\n\n# Grouping by two variables\n#::::::::::::::::::::::::::::::::::::::::::\ngrouped.d <- colon %>%\n surv_group_by(grouping.vars = c(\"rx\", \"adhere\"))\n grouped.d\n\n\n\n"} {"package":"survminer","topic":"surv_median","snippet":"### Name: surv_median\n### Title: Median of Survival Curves\n### Aliases: surv_median\n\n### ** Examples\n\n\nlibrary(survival)\n\n# Different survfits\n#:::::::::::::::::::::::::::::::::::::::::::::::::::::::\nfit.null <- surv_fit(Surv(time, status) ~ 1, data = colon)\n\nfit1 <- surv_fit(Surv(time, status) ~ sex, data = colon)\n\nfit2 <- surv_fit(Surv(time, status) ~ adhere, data = colon)\n\nfit.list <- list(sex = fit1, adhere = fit2)\n\n# Extract the median survival\n#:::::::::::::::::::::::::::::::::::::::::::::::::::::::\nsurv_median(fit.null)\n\nsurv_median(fit2)\n\nsurv_median(fit.list)\n\nsurv_median(fit.list, combine = TRUE)\n\n# Grouped survfit\n#:::::::::::::::::::::::::::::::::::::::::::::::::::::::\nfit.list2 <- surv_fit(Surv(time, status) ~ sex, data = colon,\n group.by = \"rx\")\nsurv_median(fit.list2)\n\n\n"} {"package":"survminer","topic":"surv_pvalue","snippet":"### Name: surv_pvalue\n### Title: Compute P-value Comparing Survival Curves\n### Aliases: surv_pvalue\n\n### ** Examples\n\n\nlibrary(survival)\n\n# Different survfits\n#:::::::::::::::::::::::::::::::::::::::::::::::::::::::\nfit.null <- surv_fit(Surv(time, status) ~ 1, data = colon)\n\nfit1 <- surv_fit(Surv(time, status) ~ sex, data = colon)\n\nfit2 <- surv_fit(Surv(time, status) ~ adhere, data = colon)\n\nfit.list <- list(sex = fit1, adhere = fit2)\n\n# Extract the median survival\n#:::::::::::::::::::::::::::::::::::::::::::::::::::::::\nsurv_pvalue(fit.null)\n\nsurv_pvalue(fit2, colon)\n\nsurv_pvalue(fit.list)\n\nsurv_pvalue(fit.list, combine = TRUE)\n\n# Grouped survfit\n#:::::::::::::::::::::::::::::::::::::::::::::::::::::::\nfit.list2 <- surv_fit(Surv(time, status) ~ sex, data = colon,\n group.by = \"rx\")\n\nsurv_pvalue(fit.list2)\n\n# Get coordinate for annotion of the survival plots\n#:::::::::::::::::::::::::::::::::::::::::::::::::::::::\nsurv_pvalue(fit.list2, combine = TRUE, get_coord = TRUE)\n\n\n\n"} {"package":"survminer","topic":"surv_summary","snippet":"### Name: surv_summary\n### Title: Nice Summary of a Survival Curve\n### Aliases: surv_summary\n\n### ** Examples\n\n\n# Fit survival curves\nrequire(\"survival\")\nfit <- survfit(Surv(time, status) ~ rx + adhere, data = colon)\n\n# Summarize\nres.sum <- surv_summary(fit, data = colon)\nhead(res.sum)\n\n# Information about the survival curves\nattr(res.sum, \"table\")\n\n\n\n\n"} {"package":"gbfs","topic":"get_free_bike_status","snippet":"### Name: get_free_bike_status\n### Title: Grab the free_bike_status feed.\n### Aliases: get_free_bike_status\n\n### ** Examples\n\n# grab the free bike status feed for portland, oregon's bikeshare program \n## No test: \nget_free_bike_status(city = \n\"https://gbfs.biketownpdx.com/gbfs/en/free_bike_status.json\",\noutput = \"return\")\n## End(No test)\n \n\n\n"} {"package":"gbfs","topic":"get_gbfs","snippet":"### Name: get_gbfs\n### Title: Grab bikeshare data\n### Aliases: get_gbfs\n\n### ** Examples\n\n# grab all of the feeds released by portland's \n# bikeshare program and return them as a \n# named list of dataframes\n## No test: \nget_gbfs(city = \"biketown_pdx\")\n## End(No test)\n\n# if, rather than returning the data, we wanted to save it:\n## No test: \nget_gbfs(city = \"biketown_pdx\", directory = tempdir())\n## End(No test)\n\n# note that, usually, we'd supply a character string \n# (like \"pdx\", maybe,) to the directory argument \n# instead of `tempdir()`. \n\n# if we're having trouble specifying the correct feed,\n# we can also supply the actual URL to the feed\n## No test: \nget_gbfs(city = \"https://gbfs.biketownpdx.com/gbfs/gbfs.json\")\n## End(No test)\n \n# the examples above grab every feed that portland releases.\n# if, instead, we just wanted the dynamic feeds\n## No test: \nget_gbfs(city = \"biketown_pdx\", feeds = \"dynamic\")\n## End(No test)\n\n\n"} {"package":"gbfs","topic":"get_station_information","snippet":"### Name: get_station_information\n### Title: Grab the station_information feed.\n### Aliases: get_station_information\n\n### ** Examples\n\n# grab the free bike status feed for portland, oreoon's bikeshare program \n## No test: \nget_station_information(city = \n\"https://gbfs.biketownpdx.com/gbfs/en/station_information.json\", \n output = \"return\")\n## End(No test)\n\n\n\n"} {"package":"gbfs","topic":"get_station_status","snippet":"### Name: get_station_status\n### Title: Grab the station_status feed.\n### Aliases: get_station_status\n\n### ** Examples\n\n# we can grab the free bike status feed for portland, \n# oregon's bikeshare program in several ways! the most \n# straightforward way is just to supply the `city` argument\n# as a string:\n## No test: \nget_station_status(city = \"biketown_pdx\")\n## End(No test)\n\n# the `city` argument can also be supplied as an\n# actual URL to an active .json feed\n## No test: \nget_station_status(city = \n\"https://gbfs.biketownpdx.com/gbfs/en/station_status.json\")\n## End(No test)\n \n\n\n"} {"package":"gbfs","topic":"get_system_alerts","snippet":"### Name: get_system_alerts\n### Title: Grab the system_alerts feed.\n### Aliases: get_system_alerts\n\n### ** Examples\n\n# grab the system alerts feed for portland, oregon\n## No test: \nget_system_alerts(city = \n\"https://gbfs.biketownpdx.com/gbfs/en/system_alerts.json\", \n output = \"return\")\n## End(No test)\n \n\n\n"} {"package":"gbfs","topic":"get_system_calendar","snippet":"### Name: get_system_calendar\n### Title: Grab the system_calendar feed.\n### Aliases: get_system_calendar\n\n### ** Examples\n\n# grab the system calendar feed for portland, oregon\n## No test: \nget_system_calendar(city = \n\"https://gbfs.biketownpdx.com/gbfs/en/system_calendar.json\", \n output = \"return\")\n## End(No test)\n\n\n\n\n"} {"package":"gbfs","topic":"get_system_hours","snippet":"### Name: get_system_hours\n### Title: Grab the system_hours feed.\n### Aliases: get_system_hours\n\n### ** Examples\n\n# grab the system hours feed for portland, oregon\n## No test: \nget_system_hours(city = \n\"https://gbfs.biketownpdx.com/gbfs/en/system_hours.json\", \n output = \"return\")\n## End(No test)\n\n\n\n"} {"package":"gbfs","topic":"get_system_information","snippet":"### Name: get_system_information\n### Title: Grab the system_information feed.\n### Aliases: get_system_information\n\n### ** Examples\n\n# we can grab the free bike status feed for portland, \n# oregon's bikeshare program in several ways! first, supply the `city` \n# argument as a URL, and save to file by leaving output \n# set to it's default. usually, we would supply a character \n# string (like \"pdx\", maybe,) for the `directory` argument \n# instead of `tempdir`.\n## No test: \nget_system_information(city = \n\"https://gbfs.biketownpdx.com/gbfs/en/system_information.json\", \n directory = tempdir())\n## End(No test)\n \n# or, instead, just supply the name of \n# the city as a string and return the output as a dataframe\n## No test: \nget_system_information(city = \"biketown_pdx\", \n output = \"return\")\n## End(No test)\n\n\n"} {"package":"gbfs","topic":"get_system_regions","snippet":"### Name: get_system_regions\n### Title: Grab the system_regions feed.\n### Aliases: get_system_regions\n\n### ** Examples\n\n# we can grab the system regions feed for portland, \n# oregon in one of several ways! first, supply the `city` \n# argument as a URL, and save to file by leaving output \n# set to it's default. usually, we would supply a character \n# string (like \"pdx\", maybe,) for the `directory` argument \n# instead of `tempdir`.\n## No test: \nget_system_regions(city = \n\"https://gbfs.biketownpdx.com/gbfs/en/system_regions.json\", \n directory = tempdir())\n## End(No test)\n \n# or, instead, just supply the name of \n# the city as a string and return the output\n# as a dataframe\n## No test: \nget_system_regions(city = \"biketown_pdx\", \n output = \"return\")\n## End(No test)\n\n\n"} {"package":"gbfs","topic":"get_which_gbfs_feeds","snippet":"### Name: get_which_gbfs_feeds\n### Title: Get dataframe of bikeshare feeds released by a city\n### Aliases: get_which_gbfs_feeds\n\n### ** Examples\n\n# grab all of the feeds released by portland\n## No test: \nget_which_gbfs_feeds(city = \"biketown_pdx\")\n## End(No test)\n\n\n\n"} {"package":"ggrastr","topic":"geom_beeswarm_rast","snippet":"### Name: geom_beeswarm_rast\n### Title: This geom is similar to 'geom_beeswarm', but creates a raster\n### layer\n### Aliases: geom_beeswarm_rast\n\n### ** Examples\n\nlibrary(ggplot2)\nlibrary(ggrastr)\n\nggplot(mtcars) + geom_beeswarm_rast(aes(x = factor(cyl), y = mpg), raster.dpi = 600, cex = 1.5)\n\n\n\n"} {"package":"ggrastr","topic":"geom_boxplot_jitter","snippet":"### Name: geom_boxplot_jitter\n### Title: This geom is similar to 'geom_boxplot', but allows to jitter\n### outlier points and to raster points layer.\n### Aliases: geom_boxplot_jitter\n\n### ** Examples\n\nlibrary(ggplot2)\nlibrary(ggrastr)\n\nyvalues = rt(1000, df=3)\nxvalues = as.factor(1:1000 %% 2)\nggplot() + geom_boxplot_jitter(aes(y=yvalues, x=xvalues), outlier.jitter.width = 0.1, raster = TRUE)\n\n\n\n"} {"package":"ggrastr","topic":"geom_jitter_rast","snippet":"### Name: geom_jitter_rast\n### Title: This geom is similar to 'geom_jitter', but creates a raster\n### layer\n### Aliases: geom_jitter_rast\n\n### ** Examples\n\nlibrary(ggplot2)\nlibrary(ggrastr)\n\nggplot(mpg) + geom_jitter_rast(aes(x = factor(cyl), y = hwy), raster.dpi = 600)\n\n\n\n"} {"package":"ggrastr","topic":"geom_point_rast","snippet":"### Name: geom_point_rast\n### Title: This geom is similar to 'geom_point', but creates a raster layer\n### Aliases: geom_point_rast\n\n### ** Examples\n\nlibrary(ggplot2)\nlibrary(ggrastr)\n\nggplot() + geom_point_rast(aes(x=rnorm(1000), y=rnorm(1000)), raster.dpi=600)\n\n\n\n"} {"package":"ggrastr","topic":"geom_quasirandom_rast","snippet":"### Name: geom_quasirandom_rast\n### Title: This geom is similar to 'geom_quasirandom', but creates a raster\n### layer\n### Aliases: geom_quasirandom_rast\n\n### ** Examples\n\nlibrary(ggplot2)\nlibrary(ggrastr)\n\nggplot(mtcars) + geom_quasirandom_rast(aes(x = factor(cyl), y = mpg), raster.dpi = 600)\n\n\n\n"} {"package":"ggrastr","topic":"geom_tile_rast","snippet":"### Name: geom_tile_rast\n### Title: This geom is similar to 'geom_tile', but creates a raster layer\n### Aliases: geom_tile_rast\n\n### ** Examples\n\nlibrary(ggplot2)\nlibrary(ggrastr)\n\ncoords <- expand.grid(1:100, 1:100)\ncoords$Value <- 1 / apply(as.matrix(coords), 1, function(x) sum((x - c(50, 50))^2)^0.01)\nggplot(coords) + geom_tile_rast(aes(x=Var1, y=Var2, fill=Value))\n\n\n\n"} {"package":"ggrastr","topic":"geom_violin_rast","snippet":"### Name: geom_violin_rast\n### Title: This geom is similar to 'geom_violin', but creates a raster\n### layer\n### Aliases: geom_violin_rast\n\n### ** Examples\n\nlibrary(ggplot2)\nlibrary(ggrastr)\n\nggplot(mpg) + geom_violin_rast(aes(x = factor(cyl), y = hwy), raster.dpi = 600)\n\n\n\n"} {"package":"ggrastr","topic":"rasterise","snippet":"### Name: rasterise\n### Title: Rasterise ggplot layers Takes a ggplot object or a layer as\n### input and renders their graphical output as a raster.\n### Aliases: rasterise rasterise.Layer rasterise.list rasterise.ggplot\n\n### ** Examples\n\nrequire(ggplot2)\n# `rasterise()` is used to wrap layers\nggplot(pressure, aes(temperature, pressure)) +\n rasterise(geom_line())\n\n# The `dpi` argument controls resolution\nggplot(faithful, aes(eruptions, waiting)) +\n rasterise(geom_point(), dpi = 5)\n\n# The `dev` argument offers a few options for devices\nrequire(ragg)\nggplot(diamonds, aes(carat, depth, z = price)) +\n rasterise(stat_summary_hex(), dev = \"ragg\")\n\n# The `scale` argument allows you to render a 'big' plot in small window, or vice versa.\nggplot(faithful, aes(eruptions, waiting)) +\n rasterise(geom_point(), scale = 4)\n\n\n"} {"package":"ggrastr","topic":"rasterize","snippet":"### Name: rasterize\n### Title: Rasterise ggplot layers Takes a ggplot object or a layer as\n### input and renders their graphical output as a raster.\n### Aliases: rasterize\n\n### ** Examples\n\nrequire(ggplot2)\n# `rasterise()` is used to wrap layers\nggplot(pressure, aes(temperature, pressure)) +\n rasterise(geom_line())\n\n# The `dpi` argument controls resolution\nggplot(faithful, aes(eruptions, waiting)) +\n rasterise(geom_point(), dpi = 5)\n\n# The `dev` argument offers a few options for devices\nrequire(ragg)\nggplot(diamonds, aes(carat, depth, z = price)) +\n rasterise(stat_summary_hex(), dev = \"ragg\")\n\n# The `scale` argument allows you to render a 'big' plot in small window, or vice versa.\nggplot(faithful, aes(eruptions, waiting)) +\n rasterise(geom_point(), scale = 4)\n\n\n"} {"package":"ggrastr","topic":"theme_pdf","snippet":"### Name: theme_pdf\n### Title: Pretty theme\n### Aliases: theme_pdf\n\n### ** Examples\n\nlibrary(ggplot2)\nlibrary(ggrastr)\n\ndata = rnorm(100)\ncolors = (1:100/100)\nggplot() + geom_point(aes(x=data, y=data, color=colors)) + theme_pdf(FALSE, legend.pos=c(1, 1))\n\n\n\n"} {"package":"secr","topic":"AIC.secr","snippet":"### Name: AIC.secr\n### Title: Compare SECR Models\n### Aliases: AIC.secr logLik.secr AIC.secrlist secrlist [.secrlist\n### Keywords: models\n\n### ** Examples\n\n## Compare two models fitted previously\n## secrdemo.0 is a null model\n## secrdemo.b has a learned trap response\n\nAIC(secrdemo.0, secrdemo.b)\n\n## Form secrlist and pass to AIC.secr\ntemp <- secrlist(null = secrdemo.0, learnedresponse = secrdemo.b)\nAIC(temp)\n\n\n\n"} {"package":"secr","topic":"AICcompatible.secr","snippet":"### Name: AICcompatible\n### Title: Model Compatibility\n### Aliases: AICcompatible.secr AICcompatible.secrlist AICcompatible\n### Keywords: models\n\n### ** Examples\n\n\nAICcompatible(secrdemo.0, secrdemo.CL)\n\n## Not run: \n##D \n##D ## A common application of AICcompatible() is to determine \n##D ## the compatibility of models fitted with and without the \n##D ## fastproximity option.\n##D \n##D ovenCHp1 <- reduce(ovenCHp, by = 'all', outputdetector = 'count')\n##D ob1 <- secr.fit(ovenCHp, buffer = 300, details = list(fastproximity = TRUE))\n##D ob2 <- secr.fit(ovenCHp1, buffer = 300, details = list(fastproximity = FALSE))\n##D ob3 <- secr.fit(ovenCHp1, buffer = 300, details = list(fastproximity = FALSE), binomN = 1)\n##D AICcompatible(ob1,ob2)\n##D AICcompatible(ob1,ob3)\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"CV","snippet":"### Name: CV\n### Title: Coefficient of Variation\n### Aliases: CV CVa CVa0\n### Keywords: manip\n\n### ** Examples\n\n\n## Not run: \n##D \n##D ## housemouse model\n##D morning <- subset(housemouse, occ = c(1,3,5,7,9))\n##D msk <- make.mask((traps(morning)), nx = 32) \n##D morning.h2 <- secr.fit(morning, buffer = 20, model = list(g0~h2), mask = msk, \n##D trace = FALSE)\n##D CVa0(morning.h2 )\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"read.DA","snippet":"### Name: BUGS\n### Title: Convert Data To Or From BUGS Format\n### Aliases: read.DA write.DA\n### Keywords: IO\n\n### ** Examples\n\n\nwrite.DA (hornedlizardCH, buffer = 100, units = 100)\n\n## In this example, the input uses Xl, Xu etc.\n## for the limits of the plot itself, so buffer = 0.\n## Input is in hundreds of metres.\n## First, obtain the list lzdata\nolddir <- setwd (system.file(\"extdata\", package=\"secr\"))\nsource (\"lizarddata.R\")\nsetwd(olddir)\nstr(lzdata)\n## Now convert to capthist\ntempcapt <- read.DA(lzdata, Y = \"H\", xcoord = \"X\",\n ycoord = \"Y\", buffer = 0, units = 100)\n\n## Not run: \n##D \n##D plot(tempcapt)\n##D secr.fit(tempcapt, trace = FALSE)\n##D ## etc.\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"read.capthist","snippet":"### Name: read.capthist\n### Title: Import or export data\n### Aliases: read.capthist write.capthist\n### Keywords: IO\n\n### ** Examples\n\n\n## export ovenbird capture histories\n## the files \"ovenCHcapt.txt\" and \"ovenCHtrap.txt\" are\n## placed in the current folder (check with getwd() or dir())\n\n## Not run: \n##D write.capthist(ovenCH)\n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"LLsurface","snippet":"### Name: LLsurface\n### Title: Plot Likelihood Surface\n### Aliases: LLsurface LLsurface.secr\n### Keywords: hplot\n\n### ** Examples\n\n\n## Not run: \n##D \n##D LLsurface(secrdemo.CL, xval = seq(0.16,0.40,0.02),\n##D yval = 25:35, nlevels = 20)\n##D \n##D ## now verify MLE\n##D ## click on MLE and apparent `peak'\n##D if (interactive()) {\n##D xy <- locator(2)\n##D LLsurface(secrdemo.CL, xval = xy$x, yval = xy$y, plot = FALSE)\n##D }\n##D \n## End(Not run)\n\n\n"} {"package":"secr","topic":"LR.test","snippet":"### Name: LR.test\n### Title: Likelihood Ratio Test\n### Aliases: LR.test\n### Keywords: htest\n\n### ** Examples\n\n\n## two pre-fitted models\nAIC (secrdemo.0, secrdemo.b)\nLR.test (secrdemo.0, secrdemo.b)\n\n\n\n"} {"package":"secr","topic":"OVpossum","snippet":"### Name: OVpossum\n### Title: Orongorongo Valley Brushtail Possums\n### Aliases: OVpossum OVpossumCH\n### Keywords: datasets\n\n### ** Examples\n\n\n## Not run: \n##D \n##D library(sf)\n##D \n##D summary(OVpossumCH, terse = TRUE)\n##D ovtrap <- traps(OVpossumCH[[1]])\n##D \n##D ## retrieve and plot the forest map\n##D OVforest <- st_read(system.file(\"extdata/OVforest.shp\", package = \"secr\"))\n##D OVforest <- as(OVforest, \"Spatial\")\n##D forestcol <- terrain.colors(6)[c(4,2,2)]\n##D sp::plot(OVforest, col = forestcol)\n##D plot(ovtrap, add = TRUE)\n##D \n##D ## construct a mask\n##D ## we omit forest across the river by selecting only\n##D ## forest polygons 1 and 2 \n##D ovmask <- make.mask(ovtrap, buffer = 120, type = 'trapbuffer',\n##D poly = OVforest[1:2,], spacing = 7.5, keep.poly = FALSE)\n##D ovmask <- addCovariates(ovmask, OVforest[1:2,])\n##D \n##D ## display mask\n##D par(mar = c(0,0,0,8))\n##D plot(ovmask, covariate = 'forest', dots = FALSE, col = forestcol)\n##D plot(ovtrap, add = TRUE)\n##D \n##D ## add the left bank of the Orongorongo River\n##D lines(read.table(system.file(\"extdata/leftbank.txt\", package = \"secr\")))\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"PG","snippet":"### Name: PG\n### Title: Telemetry Fixes in Polygons\n### Aliases: PG\n### Keywords: manip\n\n### ** Examples\n\n\n## Not run: \n##D olddir <- setwd('d:/density communication/combining telemetry and secr/possums')\n##D CvilleCH <- read.capthist('CVILLE summer captures 4occ.txt',\n##D 'CVILLE detectors summer 4occ.txt',\n##D detector = 'single')\n##D CvilleGPS <- read.telemetry('CVILLE GPS Combined 4occ.txt')\n##D CvilleGPSnew <- read.telemetry('CVILLE summer GPS New occasions.txt')\n##D setwd(olddir)\n##D \n##D CvilleBoth <- addTelemetry(CvilleCH, CvilleGPSnew)\n##D plot(CvilleBoth, border = 400)\n##D PG(CvilleBoth, buffer = 100, convex = TRUE, plt = TRUE, add = TRUE, \n##D col = 'red')\n##D \n##D ###################################################################\n##D ## this code computes an area-adjusted density estimate\n##D ## cf Grant and Doherty 2007\n##D PGD <- function (CH, estimator = 'h2', ...) {\n##D pg <- PG(CH, ...)\n##D PGbar <- mean(pg)\n##D N <- closedN(CH, estimator)\n##D A <- polyarea(buffer.contour(traps(CH), ...)[[1]])\n##D Dhat <- N$Nhat / A * PGbar\n##D varDhat <- (N$Nhat^2 * var(pg) + PGbar^2 * N$seNhat^2) / A^2 \n##D c(Dhat = Dhat, seDhat = sqrt(varDhat))\n##D }\n##D plot(traps(CvilleBoth), border = 400)\n##D PGD(CvilleBoth, buffer = 0, convex = TRUE, plt = TRUE, add = TRUE)\n##D PGD(CvilleBoth, est='null', buffer = 0, convex = TRUE, plt = FALSE)\n##D \n##D ###################################################################\n##D ## this code generates a PG summary for telemetry records randomly\n##D ## translated and rotated, keeping the centres within a habitat mask\n##D \n##D randomPG <- function(CH, poly = NULL, mask, reorient = TRUE, nrepl = 1,\n##D seed = 12345, ...) {\n##D moveone <- function(xy, newcentre) {\n##D xy <- sweep(xy,2,apply(xy,2,mean))\n##D if (reorient) ## random rotation about centre\n##D xy <- rotate(xy, runif(1)*360)\n##D sweep(xy,2,unlist(newcentre), \"+\")\n##D }\n##D onerepl <- function(r) { ## r is dummy for replicate\n##D centres <- sim.popn(D = D, core = mask, model2D = \"IHP\",\n##D Ndist = \"fixed\")\n##D xyl <- mapply(moveone, xyl, split(centres, rownames(centres)))\n##D attr(CH, 'xylist') <- xyl ## substitute random placement\n##D PG(CH = CH , poly = poly, plt = FALSE, ...)\n##D }\n##D set.seed(seed)\n##D if (!requireNamespace('sf')) stop (\"requires package sf\")\n##D if (is.null(poly)) {\n##D poly <- buffer.contour (traps(CH), ...)\n##D poly <- lapply(poly, as.matrix)\n##D poly <- sf::st_sfc(sf::st_polygon(poly))\n##D }\n##D xyl <- telemetryxy(CH)\n##D D <- length(xyl) / maskarea(mask)\n##D sapply(1:nrepl, onerepl)\n##D }\n##D \n##D mask <- make.mask (traps(CvilleBoth), buffer = 400, type = \"trapbuffer\")\n##D pg <- randomPG (CvilleBoth, mask = mask, buffer = 100, convex = TRUE,\n##D nrepl = 20)\n##D apply(pg, 1, mean)\n##D ###################################################################\n##D \n## End(Not run)\n\n\n"} {"package":"secr","topic":"Parallel","snippet":"### Name: Parallel\n### Title: Multi-core Processing\n### Aliases: Parallel ncores 'Multi-core processing'\n\n### ** Examples\n\n\n## Not run: \n##D \n##D sessionInfo()\n##D # R version 4.3.0 (2023-04-21 ucrt)\n##D # Platform: x86_64-w64-mingw32/x64 (64-bit)\n##D # Running under: Windows 11 x64 (build 22621)\n##D # on Dell-XPS 8950 Intel i7-12700K\n##D # ...\n##D # see stackoverflow suggestion for microbenchmark list argument\n##D # https://stackoverflow.com/questions/32950881/how-to-use-list-argument-in-microbenchmark\n##D \n##D library(microbenchmark)\n##D options(digits = 4)\n##D \n##D ## benefit from multi-threading in secr.fit\n##D \n##D jobs <- lapply(seq(2,8,2), function(nc) \n##D bquote(suppressWarnings(secr.fit(captdata, trace = FALSE, ncores = .(nc)))))\n##D microbenchmark(list = jobs, times = 10, unit = \"seconds\")\n##D # [edited output]\n##D # Unit: seconds\n##D # expr min lq mean median uq max neval\n##D # ncores = 2 1.75880 2.27978 2.6680 2.7450 3.0960 3.4334 10\n##D # ncores = 4 1.13549 1.16280 1.6120 1.4431 2.0041 2.4018 10\n##D # ncores = 6 0.88003 0.98215 1.2333 1.1387 1.5175 1.6966 10\n##D # ncores = 8 0.78338 0.90033 1.5001 1.0406 1.2319 4.0669 10\n##D \n##D ## sometimes (surprising) lack of benefit with ncores>2\n##D \n##D msk <- make.mask(traps(ovenCH[[1]]), buffer = 300, nx = 25)\n##D jobs <- lapply(c(1,2,4,8), function(nc) \n##D bquote(secr.fit(ovenCH, trace = FALSE, ncores = .(nc), mask = msk)))\n##D microbenchmark(list = jobs, times = 10, unit = \"seconds\")\n##D # [edited output]\n##D # Unit: seconds\n##D # expr min lq mean median uq max neval\n##D # ncores = 1 12.5010 13.4951 15.674 15.304 16.373 21.723 10\n##D # ncores = 2 10.0363 11.8634 14.407 13.726 16.782 22.966 10\n##D # ncores = 4 8.6335 10.3422 13.085 12.449 15.729 17.914 10\n##D # ncores = 8 8.5286 9.9008 10.751 10.736 10.796 14.885 10\n##D \n##D ## and for simulation...\n##D \n##D jobs <- lapply(seq(2,8,2), function(nc)\n##D bquote(sim.secr(secrdemo.0, nsim = 20, tracelevel = 0, ncores = .(nc))))\n##D microbenchmark(list = jobs, times = 10, unit = \"seconds\")\n##D # [edited output]\n##D # Unit: seconds\n##D # expr min lq mean median uq max neval\n##D # ncores = 2 48.610 49.932 59.032 52.485 54.730 119.905 10\n##D # ncores = 4 29.480 29.996 30.524 30.471 31.418 31.612 10\n##D # ncores = 6 22.583 23.594 24.148 24.354 24.644 25.388 10\n##D # ncores = 8 19.924 20.651 25.581 21.002 21.696 51.920 10\n##D \n##D ## and log-likelihood surface\n##D \n##D jobs <- lapply(seq(2,8,2), function(nc) \n##D bquote(suppressMessages(LLsurface(secrdemo.0, ncores = .(nc)))))\n##D microbenchmark(list = jobs, times = 10, unit = \"seconds\")\n##D # [edited output]\n##D # Unit: seconds\n##D # expr min lq mean median uq max neval\n##D # ncores = 2 20.941 21.098 21.290 21.349 21.471 21.619 10\n##D # ncores = 4 14.982 15.125 15.303 15.263 15.449 15.689 10\n##D # ncores = 6 13.994 14.299 14.529 14.342 14.458 16.515 10\n##D # ncores = 8 13.597 13.805 13.955 13.921 14.128 14.353 10\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"RMarkInput","snippet":"### Name: RMarkInput\n### Title: Convert Data to RMark Input Format\n### Aliases: RMarkInput unRMarkInput\n### Keywords: manip\n\n### ** Examples\n\n\n## ovenCH is a 5-year mist-netting dataset\novenRD <- RMarkInput (join(ovenCH))\nhead(ovenRD)\n\nunRMarkInput(ovenRD)\n\nRMarkInput(deermouse.ESG, covariates = FALSE, grouped = TRUE)\nRMarkInput(deermouse.ESG, covariates = TRUE)\n\n## Not run: \n##D ## fit robust-design model in RMark (MARK must be installed)\n##D library(RMark)\n##D MarkPath <- 'c:/MARK' ## adjust for your installation\n##D ovenRD.data <- process.data(ovenRD, model = \"Robust\",\n##D time.interval = attr(ovenRD, \"intervals\"))\n##D ovenRD.model <- mark(data = ovenRD.data, model = \"Robust\",\n##D model.parameters = list(p = list(formula = ~1, share = TRUE),\n##D GammaDoublePrime = list(formula = ~1),\n##D GammaPrime = list(formula = ~1),\n##D f0 = list(formula = ~1))) \n##D cleanup(ask = FALSE)\n## End(Not run)\n\n\n\n\n"} {"package":"secr","topic":"RSE","snippet":"### Name: RSE\n### Title: RSE from Fitted Model\n### Aliases: RSE\n### Keywords: model\n\n### ** Examples\n\n\nRSE(secrdemo.0)\n\n\n\n"} {"package":"secr","topic":"Rsurface","snippet":"### Name: Rsurface\n### Title: Smoothed Resource Surface\n### Aliases: Rsurface\n### Keywords: manip\n\n### ** Examples\n\n\n## create binary covariate (0 outside habitat)\nmsk <- make.mask(traps(possumCH), buffer = 800)\ncovariates(msk) <- data.frame(z = as.numeric(pointsInPolygon\n (msk,possumarea)))\n\n## derive and plot \"resource availability\"\nRs <- Rsurface(msk, sigma = 100, usecov = 'z')\nplot(Rs, plottype = 'contour', col = topo.colors(10))\nlines(possumarea)\n\nif (interactive()) {\n spotHeight(Rs, dec = 2)\n}\n\n\n\n"} {"package":"secr","topic":"addCovariates","snippet":"### Name: addCovariates\n### Title: Add Covariates to Mask or Traps\n### Aliases: addCovariates\n### Keywords: manip\n\n### ** Examples\n\n\n## In the Lake Station skink study (see ?skink), habitat covariates were\n## measured only at trap sites. Here we extrapolate to a mask, taking\n## values for each mask point from the nearest trap.\n\nLSmask <- make.mask(LStraps, buffer = 30, type = \"trapbuffer\")\ntempmask <- addCovariates(LSmask, LStraps)\n## show first few lines\nhead(covariates(tempmask))\n\n\n\n"} {"package":"secr","topic":"addSightings","snippet":"### Name: addSightings\n### Title: Mark-resight Data\n### Aliases: addSightings\n### Keywords: manip\n\n### ** Examples\n\n\n## construct capthist object MRCH from text files provided in \n## 'extdata' folder, assigning attribute 'markocc' and add unmarked\n## and marked sightings from respective textfiles\n\ndatadir <- system.file(\"extdata\", package = \"secr\")\ncaptfile <- paste0(datadir, '/MRCHcapt.txt')\ntrapfile <- paste0(datadir, '/MRCHtrap.txt')\nTufile <- paste0(datadir, '/Tu.txt')\nTmfile <- paste0(datadir, '/Tm.txt')\n\nMRCH <- read.capthist(captfile, trapfile, detector = c(\"multi\", \n rep(\"proximity\",4)), markocc = c(1,0,0,0,0))\nMRCH1 <- addSightings(MRCH, Tufile, Tmfile)\n\n## alternatively (ignoring marked, not ID sightings)\n\nMRCH <- read.capthist(captfile, trapfile, detector = c(\"multi\", \n rep(\"proximity\",4)), markocc = c(1,0,0,0,0))\nTu <- read.table(Tufile)[,-1] # drop session column\nMRCH2 <- addSightings(MRCH, unmarked = Tu)\nsummary(MRCH2)\n\n\n\n"} {"package":"secr","topic":"addTelemetry","snippet":"### Name: addTelemetry\n### Title: Combine Telemetry and Detection Data\n### Aliases: addTelemetry xy2CH telemetrytype telemetrytype<-\n### Keywords: manip\n\n### ** Examples\n\n\n## Not run: \n##D \n##D # Generate some detection and telemetry data, combine them using\n##D # addTelemetry, and perform analyses\n##D \n##D # detectors\n##D te <- make.telemetry()\n##D tr <- make.grid(detector = \"proximity\")\n##D \n##D # simulated population and 50% telemetry sample\n##D totalpop <- sim.popn(tr, D = 20, buffer = 100)\n##D tepop <- subset(totalpop, runif(nrow(totalpop)) < 0.5)\n##D \n##D # simulated detection histories and telemetry\n##D # the original animalID (renumber = FALSE) are needed for matching\n##D trCH <- sim.capthist(tr, popn = totalpop, renumber = FALSE, detectfn = \"HHN\")\n##D teCH <- sim.capthist(te, popn = tepop, renumber=FALSE, detectfn = \"HHN\",\n##D detectpar = list(lambda0 = 3, sigma = 25))\n##D \n##D combinedCH <- addTelemetry(trCH, teCH)\n##D \n##D # summarise and display\n##D summary(combinedCH)\n##D plot(combinedCH, border = 150)\n##D ncapt <- apply(combinedCH,1,sum)\n##D points(totalpop[row.names(combinedCH)[ncapt==0],], pch = 1)\n##D points(totalpop[row.names(combinedCH)[ncapt>0],], pch = 16)\n##D \n##D # for later comparison of precision we must fix the habitat mask\n##D mask <- make.mask(tr, buffer = 100)\n##D fit.tr <- secr.fit(trCH, mask = mask, CL = TRUE, detectfn = \"HHN\") ## trapping alone\n##D fit.te <- secr.fit(teCH, mask = mask, CL = TRUE, start = log(20), ## telemetry alone\n##D detectfn = \"HHN\") \n##D fit2 <- secr.fit(combinedCH, mask = mask, CL = TRUE, ## combined\n##D detectfn = \"HHN\") \n##D \n##D # improved precision when focus on realised population\n##D # (compare CVD)\n##D derived(fit.tr, distribution = \"binomial\")\n##D derived(fit2, distribution = \"binomial\")\n##D \n##D \n##D # may also use CL = FALSE\n##D secr.fit(combinedCH, CL = FALSE, detectfn = \"HHN\", trace = FALSE)\n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"as.data.frame","snippet":"### Name: as.data.frame\n### Title: Coerce capthist to Data Frame\n### Aliases: as.data.frame as.data.frame.capthist as.data.frame.traps\n### Keywords: IO\n\n### ** Examples\n\n\n as.data.frame (captdata)\n as.data.frame (traps(captdata))\n \n\n\n"} {"package":"secr","topic":"as.mask","snippet":"### Name: as.mask\n### Title: Coerce traps object to mask\n### Aliases: as.mask\n### Keywords: manip\n\n### ** Examples\n\n\nplot(as.mask(traps(captdata)), dots = FALSE, meshcol = \"black\")\nplot(traps(captdata), add = TRUE)\n\n\n\n"} {"package":"secr","topic":"autoini","snippet":"### Name: autoini\n### Title: Initial Parameter Values for SECR\n### Aliases: autoini\n### Keywords: models\n\n### ** Examples\n\n\n## Not run: \n##D \n##D demotraps <- make.grid()\n##D demomask <- make.mask(demotraps)\n##D demoCH <- sim.capthist (demotraps, popn = list(D = 5, buffer = 100), seed = 321)\n##D autoini (demoCH, demomask)\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"animalID","snippet":"### Name: capthist.parts\n### Title: Dissect Spatial Capture History Object\n### Aliases: animalID occasion trap alive xy alongtransect xy<- telemetryxy\n### telemetryxy<- telemetered\n### Keywords: manip\n\n### ** Examples\n\n\n## `captdata' is a demonstration dataset\nanimalID(captdata)\n\ntemp <- sim.capthist(popn = list(D = 1), make.grid(detector\n = \"count\"))\ncbind(ID = as.numeric(animalID(temp)), occ = occasion(temp),\n trap = trap(temp))\n\n\n\n"} {"package":"secr","topic":"nk","snippet":"### Name: chat\n### Title: Overdispersion of Activity Centres\n### Aliases: nk Enk chat.nk adjustVarD\n### Keywords: manip\n\n### ** Examples\n\n## No test: \n temptrap <- make.grid()\n msk <- make.mask(temptrap)\n ## expected number of individuals per detector (multi-catch) \n Enk (D = 5, msk, temptrap, detectpar = list(g0 = 0.2, sigma = 25),\n noccasions = 5)\n\n## End(No test)\n\n# useful plotting function for simulated chat (nsim>0)\nplotchat <- function(chat, head = '', breaks = seq(0.5,2.5,0.05)) {\n hist(chat$sim.chat, xlim = range(breaks), main = head, xlab = 'c-hat',\n breaks = breaks, cex.main = 1, yaxs = 'i')\n abline(v = chat$chat, lwd = 1.5, col = 'blue')\n}\n\n\n\n"} {"package":"secr","topic":"circular.r","snippet":"### Name: circular\n### Title: Circular Probability\n### Aliases: circular.r circular.p\n### Keywords: models\n\n### ** Examples\n\n\n## Calhoun and Casby (1958) p 3.\n## give p = 0.3940, 0.8645, 0.9888\ncircular.p(1:3, hazard = FALSE)\n\n## halfnormal, hazard-rate and exponential\ncircular.r ()\ncircular.r (detectfn = \"HR\", detectpar = list(sigma = 1, z = 4))\ncircular.r (detectfn = \"EX\")\ncircular.r (detectfn = \"HHN\")\ncircular.r (detectfn = \"HHR\", detectpar = list(sigma = 1, z = 4))\ncircular.r (detectfn = \"HEX\")\n\nplot(seq(0, 5, 0.05), circular.p(r = seq(0, 5, 0.05)),\n type = \"l\", xlab = \"Radius (multiples of sigma)\", ylab = \"Probability\")\nlines(seq(0, 5, 0.05), circular.p(r = seq(0, 5, 0.05), detectfn = 2),\n type = \"l\", col = \"red\")\nlines(seq(0, 5, 0.05), circular.p(r = seq(0, 5, 0.05), detectfn = 1,\n detectpar = list(sigma = 1,z = 4)), type = \"l\", col = \"blue\")\nabline (h = 0.95, lty = 2)\n\nlegend (2.8, 0.3, legend = c(\"halfnormal\",\"hazard-rate, z = 4\", \"exponential\"),\n col = c(\"black\",\"blue\",\"red\"), lty = rep(1,3))\n\n## in this example, a more interesting comparison would use\n## sigma = 0.58 for the exponential curve.\n\n\n\n"} {"package":"secr","topic":"clone","snippet":"### Name: clone\n### Title: Replicate Rows\n### Aliases: clone clone.default clone.popn clone.capthist\n### Keywords: manip\n\n### ** Examples\n\n\n## population of animals at 1 / hectare generates random\n## Poisson number of cues, lambda = 5\nmics4 <- make.grid( nx = 2, ny = 2, spacing = 44, detector = \"signal\")\npop <- sim.popn (D = 1, core = mics4, buffer = 300, nsessions = 6)\npop <- clone (pop, \"poisson\", 5)\nattr(pop[[1]],\"freq\")\n\nclone(captdata, \"poisson\", 3)\n\n# To avoid losing any individuals use zero-truncated Poisson\n# First find lambda of truncated Poisson with given mean\ngetlambda <- function (target) {\n fn <- function(x) x / (1-exp(-x)) - target\n uniroot(interval = c(1e-8, target), f = fn)$root\n}\nclone(captdata, \"truncatedpoisson\", getlambda(3))\n\n\n\n"} {"package":"secr","topic":"closedN","snippet":"### Name: closedN\n### Title: Closed population estimates\n### Aliases: closedN\n### Keywords: models\n\n### ** Examples\n\nclosedN(deermouse.ESG)\n\n\n"} {"package":"secr","topic":"closure.test","snippet":"### Name: closure.test\n### Title: Closure tests\n### Aliases: closure.test\n### Keywords: htest\n\n### ** Examples\n\n closure.test(captdata)\n\n\n"} {"package":"secr","topic":"cluster","snippet":"### Name: cluster\n### Title: Detector Clustering\n### Aliases: cluster clusterID clustertrap clusterID<- clustertrap<-\n### Keywords: manip\n\n### ** Examples\n\n\n## 25 4-detector clusters\nmini <- make.grid(nx = 2, ny = 2)\ntempgrid <- trap.builder (cluster = mini , method = \"all\",\n frame = expand.grid(x = seq(100, 500, 100), y = seq(100,\n 500, 100)))\nclusterID(tempgrid)\nclustertrap(tempgrid)\n\ntempCH <- sim.capthist(tempgrid)\ntable(clusterID(tempCH)) ## detections per cluster\ncluster.counts(tempCH) ## distinct individuals\n\n\n\n"} {"package":"secr","topic":"coef.secr","snippet":"### Name: coef.secr\n### Title: Coefficients of secr Object\n### Aliases: coef.secr\n### Keywords: models\n\n### ** Examples\n\n\n## load & extract coefficients of previously fitted null model\ncoef(secrdemo.0)\n\n\n\n"} {"package":"secr","topic":"collate","snippet":"### Name: collate\n### Title: Array of Parameter Estimates\n### Aliases: collate collate.secr collate.ipsecr collate.secrlist\n\n### ** Examples\n\n\ncollate (secrdemo.0, secrdemo.b, perm = c(4,2,3,1))[,,1,]\n\n\n\n"} {"package":"secr","topic":"confint.secr","snippet":"### Name: confint.secr\n### Title: Profile Likelihood Confidence Intervals\n### Aliases: confint.secr\n### Keywords: models\n\n### ** Examples\n\n\n## Not run: \n##D \n##D ## Limits for the constant real parameter \"D\"\n##D confint(secrdemo.0, \"D\") \n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"pdot.contour","snippet":"### Name: contour\n### Title: Contour Detection Probability\n### Aliases: pdot.contour buffer.contour\n### Keywords: hplot\n\n### ** Examples\n\n\npossumtraps <- traps(possumCH)\n\n## convex and concave buffers\nplot(possumtraps, border = 270)\nbuffer.contour(possumtraps, buffer = 100, add = TRUE, col = \"blue\")\nbuffer.contour(possumtraps, buffer = 100, convex = TRUE, add = TRUE)\n\n## areas\nbuff.concave <- buffer.contour(possumtraps, buffer = 100,\n plt = FALSE)\nbuff.convex <- buffer.contour(possumtraps, buffer = 100,\n plt = FALSE, convex = TRUE)\nsum (sapply(buff.concave, polyarea)) ## sum over parts\nsapply(buff.convex, polyarea)\n\n## effect of nx on area\nbuff.concave2 <- buffer.contour(possumtraps, buffer = 100,\n nx = 128, plt = FALSE)\nsum (sapply(buff.concave2, polyarea))\n\n## Not run: \n##D \n##D plot(possumtraps, border = 270)\n##D pdot.contour(possumtraps, detectfn = 0, nx = 128, detectpar =\n##D detectpar(possum.model.0), levels = c(0.1, 0.01, 0.001),\n##D noccasions = 5, add = TRUE)\n##D \n##D ## clipping to polygon\n##D olddir <- setwd(system.file(\"extdata\", package = \"secr\"))\n##D possumtraps <- traps(possumCH)\n##D possumarea <- read.table(\"possumarea.txt\", header = TRUE)\n##D par(xpd = TRUE, mar = c(1,6,6,6))\n##D plot(possumtraps, border = 400, gridlines = FALSE)\n##D pdot.contour(possumtraps, detectfn = 0, nx = 256, detectpar =\n##D detectpar(possum.model.0), levels = c(0.1, 0.01, 0.001),\n##D noccasions = 5, add = TRUE, poly = possumarea, col = \"blue\")\n##D lines(possumarea)\n##D setwd(olddir)\n##D par(xpd = FALSE, mar = c(5,4,4,2) + 0.1) ## reset to default\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"housemouse","snippet":"### Name: housemouse\n### Title: House mouse live trapping data\n### Aliases: housemouse Coulombe\n### Keywords: datasets\n\n### ** Examples\n\n\nplot(housemouse, title = paste(\"Coulombe (1965), Mus musculus,\",\n \"California salt marsh\"), border = 5, rad = 0.5,\n gridlines = FALSE)\n\nmorning <- subset(housemouse, occ = c(1,3,5,7,9))\nsummary(morning)\n\n## drop 2 unknown-sex mice\nknown.sex <- subset(housemouse, !is.na(covariates(housemouse)$sex))\n\n## reveal multiple captures\ntable(trap(housemouse), occasion(housemouse))\n\n## Not run: \n##D \n##D ## assess need to distinguish morning and afternoon samples\n##D housemouse.0 <- secr.fit (housemouse, buffer = 20)\n##D housemouse.ampm <- secr.fit (housemouse, model = g0~tcov, buffer = 20,\n##D timecov = c(0,1,0,1,0,1,0,1,0,1))\n##D AIC(housemouse.0, housemouse.ampm)\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"covariates","snippet":"### Name: covariates\n### Title: Covariates Attribute\n### Aliases: covariates covariates<-\n### Keywords: manip\n\n### ** Examples\n\n## detector covariates\ntemptrap <- make.grid(nx = 6, ny = 8)\ncovariates (temptrap) <- data.frame(halfnhalf = \n factor(rep(c(\"left\",\"right\"),c(24,24))) )\nsummary(covariates(temptrap))\n\n\n"} {"package":"secr","topic":"deermouse","snippet":"### Name: deermouse\n### Title: Deermouse Live-trapping Datasets\n### Aliases: deermouse deermouse.ESG deermouse.WSG\n### Keywords: datasets\n\n### ** Examples\n\n\npar(mfrow = c(1,2), mar = c(1,1,4,1))\nplot(deermouse.ESG, title = \"Peromyscus data from East Stuart Gulch\",\n border = 10, gridlines = FALSE, tracks = TRUE)\nplot(deermouse.WSG, title = \"Peromyscus data from Wet Swizer Gulch\",\n border = 10, gridlines = FALSE, tracks = TRUE)\n\nclosure.test(deermouse.ESG, SB = TRUE)\n\n## reveal multiple captures\ntable(trap(deermouse.ESG), occasion(deermouse.ESG))\ntable(trap(deermouse.WSG), occasion(deermouse.WSG))\n\n\n\n"} {"package":"secr","topic":"deleteMaskPoints","snippet":"### Name: deleteMaskPoints\n### Title: Edit Mask Points\n### Aliases: deleteMaskPoints\n### Keywords: manip\n\n### ** Examples\n\n\nif (interactive()) {\n mask0 <- make.mask (traps(captdata))\n ## Method 1 - click on each point to remove\n mask1 <- deleteMaskPoints (mask0)\n ## Method 2 - click on vertices of removal polygon\n mask2 <- deleteMaskPoints (mask0, onebyone = FALSE)\n ## Method 3 - predefined removal polygon\n plot(captdata)\n poly1 <- locator(5)\n mask3 <- deleteMaskPoints (mask0, poly = poly1)\n}\n\n\n\n"} {"package":"secr","topic":"derived","snippet":"### Name: derived\n### Title: Derived Parameters of Fitted SECR Model\n### Aliases: derived esa derived.secr derived.secrlist\n### Keywords: models\n\n### ** Examples\n\n\n## Not run: \n##D ## extract derived parameters from a model fitted previously\n##D ## by maximizing the conditional likelihood \n##D derived (secrdemo.CL)\n##D \n##D ## what happens when sampling variance is conditional on mask N?\n##D derived(secrdemo.CL, distribution = \"binomial\")\n##D ## fitted g0, sigma\n##D esa(secrdemo.CL)\n##D ## force different g0, sigma\n##D esa(secrdemo.CL, real = c(0.2, 25))\n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"details","snippet":"### Name: details\n### Title: Detail Specification for secr.fit\n### Aliases: details fixedbeta LLonly param miscparm maxdistance\n### Keywords: models\n\n### ** Examples\n\n\n## Not run: \n##D \n##D ## Demo of miscparm and userdist\n##D ## We fix the usual 'sigma' parameter and estimate the same \n##D ## quantity as miscparm[1]. Differences in CI reflect the implied use \n##D ## of the identity link for miscparm[1]. \n##D \n##D mydistfn3 <- function (xy1,xy2, mask) {\n##D if (missing(xy1)) return(character(0))\n##D xy1 <- as.matrix(xy1)\n##D xy2 <- as.matrix(xy2)\n##D miscparm <- attr(mask, 'miscparm')\n##D distmat <- edist(xy1,xy2) / miscparm[1]\n##D distmat\n##D }\n##D \n##D fit0 <- secr.fit (captdata)\n##D fit <- secr.fit (captdata, fixed = list(sigma=1), details = \n##D list(miscparm = c(sig = 20), userdist = mydistfn3)) \n##D predict(fit0)\n##D coef(fit)\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"detector","snippet":"### Name: detector\n### Title: Detector Type\n### Aliases: detector detector<- polygon polygonX transect transectX single\n### multi proximity count capped\n### Keywords: models\n\n### ** Examples\n\n## Default detector type is \"multi\"\ntemptrap <- make.grid(nx = 6, ny = 8)\ndetector(temptrap) <- \"proximity\"\nsummary(temptrap)\n\n\n"} {"package":"secr","topic":"deviance","snippet":"### Name: deviance\n### Title: Deviance of fitted secr model and residual degrees of freedom\n### Aliases: deviance df.residual deviance.secr df.residual.secr\n### Keywords: models\n\n### ** Examples\n\n\ndeviance(secrdemo.0)\ndf.residual(secrdemo.0)\n\n\n\n"} {"package":"secr","topic":"discretize","snippet":"### Name: discretize\n### Title: Rasterize Area Search or Transect Data\n### Aliases: discretize\n### Keywords: manip\n\n### ** Examples\n\n\n## Not run: \n##D \n##D ## generate some polygon data\n##D pol <- make.poly()\n##D CH <- sim.capthist(pol, popn = list(D = 30), detectfn = 'HHN', \n##D detectpar = list(lambda0 = 0.3))\n##D plot(CH, border = 10, gridl = FALSE, varycol = FALSE)\n##D \n##D ## discretize and plot\n##D CH1 <- discretize(CH, spacing = 10, output = 'count')\n##D plot(CH1, add = TRUE, cappar = list(col = 'orange'), varycol =\n##D FALSE, rad = 0)\n##D plot(traps(CH1), add = TRUE)\n##D # overlay cell boundaries\n##D plot(as.mask(traps(CH1)), dots = FALSE, col = NA, meshcol = 'green', \n##D add = TRUE)\n##D \n##D ## show how detections are snapped to new detectors\n##D newxy <- traps(CH1)[nearesttrap(xy(CH),traps(CH1)),]\n##D segments(xy(CH)[,1], xy(CH)[,2], newxy[,1], newxy[,2])\n##D \n##D plot(traps(CH), add = TRUE) # original polygon\n##D \n##D ## Incomplete overlap\n##D \n##D pol <- rotate(make.poly(), 45)\n##D CH2 <- sim.capthist(pol, popn = list(D = 30), detectfn = 'HHN', \n##D detectpar = list(lambda0 = 0.3))\n##D plot(CH2, border = 10, gridl = FALSE, varycol = FALSE)\n##D CH3 <- discretize(CH2, spacing = 10, output = 'count', type = 'any', \n##D cell.overlap = TRUE, tol=0.05)\n##D \n##D plot(CH3, add = TRUE, cappar = list(col = 'orange'), varycol =\n##D FALSE, rad = 0)\n##D plot(traps(CH3), add = TRUE)\n##D \n##D # overlay cell boundaries and usage\n##D msk <- as.mask(traps(CH3))\n##D covariates(msk) <- data.frame(usage = usage(traps(CH3))[,1])\n##D plot(msk, dots = FALSE, cov='usage', meshcol = 'green', \n##D add = TRUE)\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"nearesttrap","snippet":"### Name: distancetotrap\n### Title: Distance To Nearest Detector\n### Aliases: nearesttrap distancetotrap\n### Keywords: manip\n\n### ** Examples\n\n\n ## restrict a habitat mask to points within 70 m of traps \n ## this is nearly equivalent to using make.mask with the \n ## `trapbuffer' option\n temptrap <- make.grid()\n tempmask <- make.mask(temptrap)\n d <- distancetotrap(tempmask, temptrap)\n tempmask <- subset(tempmask, d < 70)\n\n\n\n"} {"package":"secr","topic":"ellipse.secr","snippet":"### Name: ellipse.secr\n### Title: Confidence Ellipses\n### Aliases: ellipse.secr ellipse.bvn\n### Keywords: hplot\n\n### ** Examples\n\n\nellipse.secr(secrdemo.0)\n\n\n\n"} {"package":"secr","topic":"empirical.varD","snippet":"### Name: empirical.varD\n### Title: Empirical Variance of H-T Density Estimate\n### Aliases: empirical.varD derivednj derivedSession derivedMash\n### derivedCluster derivedExternal derivedSystematic\n### Keywords: models\n\n### ** Examples\n\n\n## The `ovensong' data are pooled from 75 replicate positions of a\n## 4-microphone array. The array positions are coded as the first 4\n## digits of each sound identifier. The sound data are initially in the\n## object `signalCH'. We first impose a 52.5 dB signal threshold as in\n## Dawson & Efford (2009, J. Appl. Ecol. 46:1201--1209). The vector nj\n## includes 33 positions at which no ovenbird was heard. The first and\n## second columns of `temp' hold the estimated effective sampling area\n## and its standard error.\n\n## Not run: \n##D \n##D signalCH.525 <- subset(signalCH, cutval = 52.5)\n##D nonzero.counts <- table(substring(rownames(signalCH.525),1,4))\n##D nj <- c(nonzero.counts, rep(0, 75 - length(nonzero.counts)))\n##D temp <- derived(ovensong.model.1, se.esa = TRUE)\n##D derivednj(nj, temp[\"esa\",1:2])\n##D \n##D ## The result is very close to that reported by Dawson & Efford\n##D ## from a 2-D Poisson model fitted by maximizing the full likelihood.\n##D \n##D ## If nj vector has length 1, a theoretical variance is used...\n##D msk <- ovensong.model.1$mask\n##D A <- nrow(msk) * attr(msk, \"area\")\n##D derivednj (sum(nj), temp[\"esa\",1:2], method = \"poisson\")\n##D derivednj (sum(nj), temp[\"esa\",1:2], method = \"binomial\", area = A)\n##D \n##D ## Set up an array of small (4 x 4) grids,\n##D ## simulate a Poisson-distributed population,\n##D ## sample from it, plot, and fit a model.\n##D ## mash() condenses clusters to a single cluster\n##D \n##D testregion <- data.frame(x = c(0,2000,2000,0),\n##D y = c(0,0,2000,2000))\n##D t4 <- make.grid(nx = 4, ny = 4, spacing = 40)\n##D t4.16 <- make.systematic (n = 16, cluster = t4,\n##D region = testregion)\n##D popn1 <- sim.popn (D = 5, core = testregion,\n##D buffer = 0)\n##D capt1 <- sim.capthist(t4.16, popn = popn1)\n##D fit1 <- secr.fit(mash(capt1), CL = TRUE, trace = FALSE)\n##D \n##D ## Visualize sampling\n##D tempmask <- make.mask(t4.16, spacing = 10, type =\n##D \"clusterbuffer\")\n##D plot(tempmask)\n##D plot(t4.16, add = TRUE)\n##D plot(capt1, add = TRUE)\n##D \n##D ## Compare model-based and empirical variances.\n##D ## Here the answers are similar because the data\n##D ## were simulated from a Poisson distribution,\n##D ## as assumed by \\code{derived}\n##D \n##D derived(fit1)\n##D derivedMash(fit1)\n##D \n##D ## Now simulate a patchy distribution; note the\n##D ## larger (and more credible) SE from derivedMash().\n##D \n##D popn2 <- sim.popn (D = 5, core = testregion, buffer = 0,\n##D model2D = \"hills\", details = list(hills = c(-2,3)))\n##D capt2 <- sim.capthist(t4.16, popn = popn2)\n##D fit2 <- secr.fit(mash(capt2), CL = TRUE, trace = FALSE)\n##D derived(fit2)\n##D derivedMash(fit2)\n##D \n##D ## The detection model we have fitted may be extrapolated to\n##D ## a more fine-grained systematic sample of points, with\n##D ## detectors operated on a single occasion at each...\n##D ## Total effort 400 x 1 = 400 detector-occasions, compared\n##D ## to 256 x 5 = 1280 detector-occasions for initial survey.\n##D \n##D t1 <- make.grid(nx = 1, ny = 1)\n##D t1.100 <- make.systematic (cluster = t1, spacing = 100,\n##D region = testregion)\n##D capt2a <- sim.capthist(t1.100, popn = popn2, noccasions = 1)\n##D ## one way to get number of animals per point\n##D nj <- attr(mash(capt2a), \"n.mash\")\n##D derivedExternal (fit2, nj = nj, cluster = t1, buffer = 100,\n##D noccasions = 1)\n##D \n##D ## Review plots\n##D library(MASS)\n##D base.plot <- function() {\n##D eqscplot( testregion, axes = FALSE, xlab = \"\",\n##D ylab = \"\", type = \"n\")\n##D polygon(testregion)\n##D }\n##D par(mfrow = c(1,3), xpd = TRUE, xaxs = \"i\", yaxs = \"i\")\n##D base.plot()\n##D plot(popn2, add = TRUE, col = \"blue\")\n##D mtext(side=3, line=0.5, \"Population\", cex=0.8, col=\"black\")\n##D base.plot()\n##D plot (capt2a, add = TRUE,title = \"Extensive survey\")\n##D base.plot()\n##D plot(capt2, add = TRUE, title = \"Intensive survey\")\n##D par(mfrow = c(1,1), xpd = FALSE, xaxs = \"r\", yaxs = \"r\") ## defaults\n##D \n##D \n##D ## Weighted variance\n##D \n##D derivedSession(ovenbird.model.1, method = \"R2\")\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"esa.plot","snippet":"### Name: esa.plot\n### Title: Mask Buffer Diagnostic Plot\n### Aliases: esa.plot\n### Keywords: hplot\n\n### ** Examples\n\n\n## Not run: \n##D \n##D ## with previously fitted model\n##D esa.plot(secrdemo.0)\n##D \n##D ## from scratch\n##D trps <- make.grid()\n##D msk <- make.mask(trps, buffer = 200, spacing = 5, type = \"trapbuffer\")\n##D detectpar <- list(g0 = 0.2, sigma = 25)\n##D esa.plot(trps,,, msk, 0, detectpar, nocc = 10, col = \"blue\")\n##D esa.plot(trps,,, msk, 0, detectpar, nocc = 5, col = \"green\",\n##D add = TRUE)\n##D \n##D esa.plot(trps,,, msk, 0, detectpar, nocc = 5, thin = 0.002, plt = FALSE)\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"expected.n","snippet":"### Name: expected.n\n### Title: Expected Number of Individuals\n### Aliases: expected.n\n### Keywords: models\n\n### ** Examples\n\n\n## Not run: \n##D \n##D expected.n(secrdemo.0)\n##D expected.n(secrdemo.0, bycluster = TRUE)\n##D expected.n(ovenbird.model.D)\n##D \n##D ## Clustered design\n##D mini <- make.grid(nx = 3, ny = 3, spacing = 50, detector =\n##D \"proximity\")\n##D tempgrids <- trap.builder (cluster = mini , method = \"all\",\n##D frame = expand.grid(x = seq(1000, 9000, 2000),\n##D y = seq(1000, 9000, 2000)), plt = TRUE)\n##D capt <- sim.capthist(tempgrids, popn = list(D = 2))\n##D tempmask <- make.mask(tempgrids, buffer = 100,\n##D type = \"clusterbuffer\")\n##D fit <- secr.fit(capt, mask = tempmask, trace = FALSE)\n##D En <- expected.n(fit, bycluster = TRUE)\n##D \n##D ## GoF or overdispersion statistic\n##D p <- length(fit$fit$par)\n##D y <- cluster.counts(capt)\n##D ## scaled by n-p\n##D sum((y - En)^2 / En) / (length(En)-p)\n##D sum((y - En)^2 / En) / sum(y/En)\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"extractMoves","snippet":"### Name: extractMoves\n### Title: Simulated Movements\n### Aliases: extractMoves\n\n### ** Examples\n\nset.seed(12345)\npop3 <- sim.popn(D = 2, core = make.grid(), buffer = 200, nsessions = 3, \n details = list(lambda = 1.0, movemodel = 'BVE', move.a = 50, \n edgemethod = 'stop'))\nm <- extractMoves(pop3, plotn = 10, length = 0.1)\nmean(unlist(sapply(m, '[', 'd'))) # less than nominal 2 x move.a\n\n# For distances closer to nominal for BVE (2 x move.a = 100), \n# increase size of arena (e.g., buffer = 500) and consider only \n# central animals (e.g., maxradius = 300).\n\n\n\n"} {"package":"secr","topic":"fx.total","snippet":"### Name: fx.total\n### Title: Activity Centres of Detected and Undetected Animals\n### Aliases: fx.total\n### Keywords: model\n\n### ** Examples\n\n\n## Not run: \n##D \n##D tmp <- fx.total(secrdemo.0)\n##D \n##D ## to plot we must name one of the covariates:\n##D ## the Dsurface default 'D.0' causes an error \n##D \n##D plot(tmp, covariate = 'D.sum', col = terrain.colors(16),\n##D plottype = 'shaded')\n##D plot(tmp, covariate = 'D.sum', col = 'white', add = TRUE,\n##D plottype = 'contour')\n##D if (interactive()) {\n##D spotHeight(tmp, prefix = 'D.sum')\n##D }\n##D \n##D fxsurface <- fx.total(ovenbird.model.D, sessnum = 3)\n##D plot(fxsurface, covariate = 'D.sum')\n##D \n## End(Not run)\n\n\n\n\n"} {"package":"secr","topic":"fxi.contour","snippet":"### Name: fxi\n### Title: Probability Density of Home Range Centre\n### Aliases: fxi.contour fxi.mode fxi.secr fxi\n### Keywords: hplot\n\n### ** Examples\n\n\n## Not run: \n##D \n##D fxi.secr(secrdemo.0, i = 1, X = c(365,605))\n##D \n##D ## contour first 5 detection histories\n##D plot(secrdemo.0$capthist)\n##D fxi.contour (secrdemo.0, i = 1:5, add = TRUE,\n##D plotmode = TRUE, drawlabels = FALSE)\n##D \n##D ## extract modes only\n##D ## these are more reliable than those from fit.mode called directly as\n##D ## they use a contour-based approximation for the starting point\n##D fxiout <- fxi.contour (secrdemo.0, i = 1:5, plt = FALSE, fitmode = TRUE)\n##D t(sapply(fxiout, \"[[\", \"mode\"))\n##D \n##D ## using fill colours\n##D ## lty = 0 suppresses contour lines\n##D ## nx = 256 ensures smooth outline\n##D plot(traps(captdata))\n##D fxi.contour(secrdemo.0, i = 1:5, add = TRUE, p = c(0.5,0.95), drawlabels\n##D = FALSE, nx = 256, fill = topo.colors(4), lty = 0)\n##D \n##D ## output as simple features\n##D sf <- fxi.contour(secrdemo.0, i = 1:3, plt = FALSE, p = c(0.5,0.95),\n##D nx = 256, output = 'sf', fitmode = TRUE)\n##D \n##D ## save as ESRI shapefile testsf.shp etc.\n##D library(sf)\n##D st_write(sf, 'testsf.shp')\n##D ## plot contours and modes\n##D plot(st_as_sfc(sf)) # outline only\n##D points(sf$modex, sf$modey)\n##D \n##D ## output as SpatialPolygonsDataFrame\n##D spdf <- fxi.contour(secrdemo.0, i = 1:3, plt = FALSE, p = c(0.5,0.95),\n##D nx = 256, output = 'SPDF', fitmode = TRUE)\n##D sp::plot(spdf)\n##D points(data.frame(spdf))\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"gridCells","snippet":"### Name: gridCells\n### Title: Construct Grid Cells\n### Aliases: gridCells\n\n### ** Examples\n\n\nplot(gridCells(traps(captdata)))\nplot(traps(captdata), add = TRUE)\n\n\n"} {"package":"secr","topic":"hcov","snippet":"### Name: hcov\n### Title: Hybrid Mixture Model\n### Aliases: hcov\n### Keywords: models\n\n### ** Examples\n\n\n## Not run: \n##D \n##D ## house mouse dataset, morning trap clearances\n##D ## 81 female, 78 male, 1 unknown\n##D morning <- subset(housemouse, occ = c(1,3,5,7,9))\n##D summary(covariates(morning))\n##D \n##D ## speedy model fitting with coarse mask\n##D mmask <- make.mask(traps(morning), buffer = 20, nx = 32)\n##D \n##D ## assuming equal detection of males and females\n##D ## fitted sex ratio p(female) = 0.509434 = 81 / (81 + 78)\n##D fit.0 <- secr.fit(morning, hcov = \"sex\", mask = mmask, trace = FALSE)\n##D predict(fit.0)\n##D \n##D ## allowing sex-specific detection parameters\n##D ## this leads to new estimate of sex ratio \n##D fit.h2 <- secr.fit(morning, hcov = \"sex\", mask = mmask, trace = FALSE,\n##D model = list(g0 ~ h2, sigma ~ h2))\n##D predict(fit.h2)\n##D \n##D ## specifying newdata for h2 - equivalent to predict(fit.h2)\n##D predict(fit.h2, newdata = data.frame(h2 = factor(c('f','m'))))\n##D \n##D ## conditional likelihood fit of preceding model\n##D ## estimate of sex ratio does not change \n##D fit.CL.h2 <- secr.fit(morning, hcov = \"sex\", mask = mmask, trace = FALSE,\n##D CL = TRUE, model = list(g0 ~ h2, sigma ~ h2))\n##D predict(fit.CL.h2)\n##D \n##D ## did sexes differ in detection parameters?\n##D fit.CL.0 <- secr.fit(morning, hcov = \"sex\", mask = mmask, trace = FALSE,\n##D CL = TRUE, model = list(g0 ~ 1, sigma ~ 1))\n##D LR.test(fit.CL.h2, fit.CL.0)\n##D \n##D ## did sex ratio deviate from 1:1?\n##D fit.CL.h2.50 <- secr.fit(morning, hcov = \"sex\", mask = mmask, trace = FALSE,\n##D CL = TRUE, model = list(g0 ~ h2, sigma ~ h2), fixed = list(pmix = 0.5))\n##D LR.test(fit.CL.h2, fit.CL.h2.50)\n##D \n##D ## did sexes show extra-compensatory variation in lambda0?\n##D ## (Efford and Mowat 2014)\n##D fit.CL.a0 <- secr.fit(morning, hcov = \"sex\", mask = mmask, trace = FALSE,\n##D CL = TRUE, model = list(a0 ~ 1, sigma ~ h2))\n##D LR.test(fit.CL.h2, fit.CL.a0)\n##D \n##D ## trend in ovenbird sex ratio, assuming sex-specific detection\n##D omask <- make.mask(traps(ovenCH), buffer = 300, nx = 32)\n##D fit.sextrend <- secr.fit(ovenCH, model = list(g0~h2, sigma~h2, pmix~Session),\n##D hcov = \"Sex\", CL = TRUE, mask = omask, trace = FALSE)\n##D predict(fit.sextrend)[1:5]\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"head.mask","snippet":"### Name: head\n### Title: First or Last Part of an Object\n### Aliases: head.mask head.Dsurface head.traps head.capthist tail.mask\n### tail.Dsurface tail.traps tail.capthist\n### Keywords: manip\n\n### ** Examples\n\nhead(possummask)\n\n\n"} {"package":"secr","topic":"dbar","snippet":"### Name: homerange\n### Title: Home Range Statistics\n### Aliases: dbar RPSV MMDM ARL moves centroids ORL trapsPerAnimal\n### Keywords: models\n\n### ** Examples\n\n\ndbar(captdata)\nRPSV(captdata)\nRPSV(captdata, CC = TRUE)\n\ncentr <- centroids(captdata)\nplot(traps(captdata), border = 20 )\ntext(centr[,1], centr[,2], attr(centr, 'Ndetections'))\ntext(centr[,1]+2, centr[,2]+3, rownames(captdata), cex = 0.6,\n adj = 0)\n\n\n\n"} {"package":"secr","topic":"hornedlizard","snippet":"### Name: hornedlizard\n### Title: Flat-tailed Horned Lizard Dataset\n### Aliases: hornedlizard hornedlizardCH\n### Keywords: datasets\n\n### ** Examples\n\n\nplot(hornedlizardCH, tracks = TRUE, varycol = FALSE,\n lab1 = TRUE, laboff = 6, border = 10, title =\n \"Flat-tailed Horned Lizards (Royle & Young 2008)\")\n\ntable(table(animalID(hornedlizardCH)))\ntraps(hornedlizardCH)\n\n## show first few x-y coordinates\nhead(xy(hornedlizardCH))\n\n## Not run: \n##D \n##D ## Compare default (Poisson) and binomial models for number\n##D ## caught\n##D FTHL.fit <- secr.fit(hornedlizardCH)\n##D FTHLbn.fit <- secr.fit(hornedlizardCH, details =\n##D list(distribution = \"binomial\"))\n##D collate(FTHL.fit, FTHLbn.fit)[,,,\"D\"]\n##D \n##D ## Collapse occasions (does not run faster)\n##D hornedlizardCH.14 <- reduce(hornedlizardCH, newoccasions =\n##D list(1:14), outputdetector = \"polygon\")\n##D FTHL14.fit <- secr.fit(hornedlizardCH.14, binomN = 14)\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"join","snippet":"### Name: join\n### Title: Combine or Split Sessions of capthist Object\n### Aliases: join unjoin\n### Keywords: manip\n\n### ** Examples\n\n\njoined.ovenCH <- join (ovenCH)\nsummary(joined.ovenCH)\nattr(joined.ovenCH, \"intervals\")\n\nsummary(unjoin(joined.ovenCH))\n\n## Not run: \n##D \n##D ## suppose the 5-year ovenbird covariates include a column for weight\n##D ## (here generated as random numbers)\n##D for (i in 1:5) covariates(ovenCH[[i]])$wt <- runif(nrow(ovenCH[[i]]))\n##D ## construct single-session version of data for openCR\n##D ## identify 'wt' as varying across years\n##D ovenCHj <- join(ovenCH, timevaryingcov = 'wt')\n##D head(covariates(ovenCHj))\n##D timevaryingcov(ovenCHj)\n##D ## Use example: openCR.fit(ovenCHj, model = p~wt)\n##D \n## End(Not run)\n\n\n\n\n"} {"package":"secr","topic":"kfn","snippet":"### Name: kfn\n### Title: Overlap Index\n### Aliases: kfn\n\n### ** Examples\n\n\nkfn(secrdemo.0)\n\n## compare\n## fitk <- secr.fit(captdata, model = sigmak~1, buffer = 100, trace = FALSE)\n## predict(fitk)\n\n\n\n"} {"package":"secr","topic":"logit","snippet":"### Name: logit\n### Title: Logit Transformation\n### Aliases: logit invlogit\n### Keywords: manip\n\n### ** Examples\n\nlogit(0.5)\ninvlogit(logit(0.2))\n\n\n"} {"package":"secr","topic":"logmultinom","snippet":"### Name: logmultinom\n### Title: Multinomial Coefficient of SECR Likelihood\n### Aliases: logmultinom\n### Keywords: models\n\n### ** Examples\n\n\n## no groups\nlogmultinom(stoatCH)\n\n\n\n"} {"package":"secr","topic":"make.capthist","snippet":"### Name: make.capthist\n### Title: Construct capthist Object\n### Aliases: make.capthist\n### Keywords: manip\n\n### ** Examples\n\n\n## peek at demonstration data\nhead(captXY)\nhead(trapXY)\n\ndemotraps <- read.traps(data = trapXY)\ndemoCHxy <- make.capthist (captXY, demotraps, fmt = \"XY\")\n\ndemoCHxy ## print method for capthist\nplot(demoCHxy) ## plot method for capthist\nsummary(demoCHxy) ## summary method for capthist\n\n\n## To enter `count' data without manually repeating rows\n## need a frequency vector f, length(f) == nrow(captXY)\nn <- nrow(captXY)\nf <- sample (1:5, size = n, prob = rep(0.2,5), replace = TRUE)\n## repeat rows as required...\ncaptXY <- captXY[rep(1:n, f),]\ncounttraps <- read.traps(data = trapXY, detector = \"count\")\ncountCH <- make.capthist (captXY, counttraps, fmt = \"XY\")\n\n\n\n"} {"package":"secr","topic":"make.lacework","snippet":"### Name: make.lacework\n### Title: Construct Lacework Detector Design\n### Aliases: make.lacework\n### Keywords: manip\n\n### ** Examples\n\n\ntrps <- make.lacework(possumarea, c(1000,100), rotate = 45, detector = 'proximity')\nplot(trps, gridspace = 1000)\nlines(possumarea)\npoints(attr(trps, 'crossings'), pch = 16)\n\n\n\n"} {"package":"secr","topic":"make.mask","snippet":"### Name: make.mask\n### Title: Build Habitat Mask\n### Aliases: make.mask\n### Keywords: datagen\n\n### ** Examples\n\n\ntemptrap <- make.grid(nx = 10, ny = 10, spacing = 30)\n\n## default method: traprect\ntempmask <- make.mask(temptrap, spacing = 5)\nplot(tempmask)\nsummary (tempmask)\n\n## make irregular detector array by subsampling \n## form mask by `trapbuffer' method\ntemptrap <- subset (temptrap, sample(nrow(temptrap), size = 30))\ntempmask <- make.mask (temptrap, spacing = 5, type = \"trapbuffer\")\nplot (tempmask)\nplot (temptrap, add = TRUE)\n\n## Not run: \n##D \n##D ## form mask by \"pdot\" method\n##D temptrap <- make.grid(nx = 6, ny = 6)\n##D tempmask <- make.mask (temptrap, buffer = 150, type = \"pdot\", \n##D pdotmin = 0.0001, detectpar = list(g0 = 0.1, sigma = 30),\n##D noccasions = 4)\n##D plot (tempmask)\n##D plot (temptrap, add = TRUE)\n##D \n##D ## Using an ESRI polygon shapefile for clipping (shapefile\n##D ## polygons may include multiple islands and holes).\n##D \n##D library(sf)\n##D shpfilename <- system.file(\"extdata/possumarea.shp\", package = \"secr\")\n##D possumarea <- st_read(shpfilename)\n##D \n##D possummask2 <- make.mask(traps(possumCH), spacing = 20,\n##D buffer = 250, type = \"trapbuffer\", poly = possumarea)\n##D par(mar = c(1,6,6,6), xpd = TRUE)\n##D plot (possummask2, ppoly = TRUE)\n##D plot(traps(possumCH), add = TRUE)\n##D par(mar = c(5,4,4,2) + 0.1, xpd = FALSE)\n##D \n##D ## if the polygon delineates non-habitat ...\n##D seaPossumMask <- make.mask(traps(possumCH), buffer = 1000, \n##D type = \"traprect\", poly = possumarea, poly.habitat = FALSE)\n##D plot(seaPossumMask)\n##D plot(traps(possumCH), add = TRUE)\n##D ## this mask is not useful!\n##D \n## End(Not run)\n\n\n\n\n"} {"package":"secr","topic":"make.systematic","snippet":"### Name: make.systematic\n### Title: Construct Systematic Detector Design\n### Aliases: make.systematic\n### Keywords: manip\n\n### ** Examples\n\n\nmini <- make.grid(nx = 2, ny = 2, spacing = 100)\nregion <- cbind(x=c(0,2000,2000,0), y=c(0,0,2000,2000))\ntemp <- make.systematic(25, mini, region, plt = TRUE)\ntemp <- make.systematic(c(6, 6), mini, region, plt = TRUE,\n rotation = -1)\n\n## Example using shapefile \"possumarea.shp\" in\n## \"extdata\" folder. By default, each cluster is \n## a single multi-catch detector\n\n## Not run: \n##D \n##D library(sf)\n##D shpfilename <- system.file(\"extdata/possumarea.shp\", package = \"secr\")\n##D possumarea <- st_read(shpfilename)\n##D \n##D possumgrid <- make.systematic(spacing = 100, region =\n##D possumarea, plt = TRUE)\n##D \n##D ## or with 2 x 2 clusters\n##D possumgrid2 <- make.systematic(spacing = 300,\n##D cluster = make.grid(nx = 2, ny = 2, spacing = 100),\n##D region = possumarea, plt = TRUE, edgemethod =\n##D \"allinside\")\n##D ## label clusters\n##D text(cluster.centres(possumgrid2), levels(clusterID\n##D (possumgrid2)), cex=0.7)\n##D \n##D ## If you have GPSBabel installed and on the Path\n##D ## then coordinates can be projected and uploaded\n##D ## to a GPS with `writeGPS', which also requires the\n##D ## package `proj4'. Defaults are for a Garmin GPS\n##D ## connected by USB.\n##D \n##D if (interactive()) {\n##D writeGPS(possumgrid, proj = \"+proj=nzmg\")\n##D }\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"make.grid","snippet":"### Name: make.traps\n### Title: Build Detector Array\n### Aliases: make.grid make.circle make.poly make.transect make.telemetry\n### Keywords: datagen\n\n### ** Examples\n\ndemo.traps <- make.grid()\nplot(demo.traps)\n\n## compare numbering schemes\npar (mfrow = c(2,4), mar = c(1,1,1,1), xpd = TRUE)\nfor (id in c(\"numx\", \"numy\", \"alphax\", \"alphay\", \"numxb\", \n \"numyb\"))\n{\n temptrap <- make.grid(nx = 7, ny = 5, ID = id)\n plot (temptrap, border = 10, label = TRUE, offset = 7, \n gridl = FALSE)\n}\n\ntemptrap <- make.grid(nx = 7, ny = 5, hollow = TRUE)\nplot (temptrap, border = 10, label = TRUE, gridl = FALSE)\n\nplot(make.circle(n = 20, spacing = 30), label = TRUE, offset = 9)\nsummary(make.circle(n = 20, spacing = 30))\n\n\n## jitter locations randomly within grid square\n## and plot over `mask'\ntemptrap <- make.grid(nx = 7, ny = 7, spacing = 30)\ntempmask <- make.mask(temptrap, buffer = 15, nx = 7, ny = 7)\ntemptrap[,] <- temptrap[,] + 30 * (runif(7*7*2) - 0.5)\nplot(tempmask, dots = FALSE, mesh = 'white')\nplot(temptrap, add = TRUE)\n\n\n\n"} {"package":"secr","topic":"make.tri","snippet":"### Name: make.tri\n### Title: Build Detector Array on Triangular or Hexagonal Grid\n### Aliases: make.tri clip.hex\n### Keywords: datagen\n\n### ** Examples\n\n\ntri.grid <- make.tri(spacing = 10)\nplot(tri.grid, border = 5)\n\nhex <- clip.hex(tri.grid, side = 30, ID = \"alpha\")\nplot (hex, add = TRUE, detpar = list(pch = 16, cex = 1.4),\n label = TRUE, offset = 2.5 )\n\n\n\n"} {"package":"secr","topic":"makeStart","snippet":"### Name: makeStart\n### Title: Initial Parameter Values\n### Aliases: makeStart\n\n### ** Examples\n\n\nmakeStart(secrdemo.0, list(D = 1, g0 = 2:3, sigma = 4))\n\n\n\n"} {"package":"secr","topic":"intervals","snippet":"### Name: intervals\n### Title: Work with Open Population data\n### Aliases: intervals intervals<- sessionlabels sessionlabels<-\n### Keywords: manip\n\n### ** Examples\n\n\nsinglesessionCH <- join(ovenCH)\nintervals(singlesessionCH)\nsessionlabels(singlesessionCH)\n\n\n\n"} {"package":"secr","topic":"mask.check","snippet":"### Name: mask.check\n### Title: Mask Diagnostics\n### Aliases: mask.check\n### Keywords: manip\n\n### ** Examples\n\n\n## Not run: \n##D \n##D ## from a capthist object, specifying almost everything\n##D mask.check (possumCH, spacings = c(20, 30), buffers =c(200, 300),\n##D realpar = list(g0 = 0.2, sigma = 50), CL = TRUE)\n##D \n##D ## from a fitted model, using defaults\n##D mask.check (stoat.model.HN)\n##D ## LL did not change with varying buffer (rows) or spacing (cols):\n##D ## 78.125 58.59375 39.0625\n##D ## 1000 -144.0015 -144.0015 -144.0015\n##D ## 1500 -144.0017 -144.0017 -144.0017\n##D ## 2000 -144.0017 -144.0017 -144.0017\n##D \n##D ## fit new models for each combination of buffer & spacing,\n##D ## and save fitted models to a file\n##D mask.check (stoat.model.HN, buffers = 1500, spacings =\n##D c(40,60,80), LLonly = FALSE, file = \"test\", CL = TRUE)\n##D \n##D ## look in more detail at the preceding fits\n##D ## restores objects `mask.check.output' and `mask.check.fit'\n##D load(\"test.RData\") \n##D lapply(mask.check.fit, predict)\n##D lapply(mask.check.fit, derived)\n##D \n##D ## multi-session data\n##D mask.check(ovenbird.model.1, session = c(\"2005\",\"2009\"))\n##D \n##D ## clipping mask\n##D txtfilename <- system.file(\"extdata/possumarea.txt\", package = \"secr\")\n##D possumarea <- read.table(txtfilename, header = TRUE)\n##D mask.check (possum.model.0, spacings = c(20, 30), buffers =\n##D c(200, 300), poly = possumarea, LLonly = FALSE,\n##D file = \"temp\", CL = TRUE)\n##D \n##D ## review fitted models\n##D load (\"temp.RData\")\n##D par(mfrow = c(2,2), mar = c(1,4,4,4))\n##D for (i in 1:4) {\n##D plot(traps(mask.check.fit[[i]]$capthist), border = 300,\n##D gridlines = FALSE)\n##D plot(mask.check.fit[[i]]$mask, add = TRUE)\n##D lines(possumarea)\n##D text ( 2698618, 6078427, names(mask.check.fit)[i])\n##D box()\n##D }\n##D par(mfrow = c(1,1), mar = c(5,4,4,2) + 0.1) ## defaults\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"modelAverage","snippet":"### Name: modelAverage\n### Title: Averaging of SECR Models Using Akaike's Information Criterion\n### Aliases: modelAverage modelAverage.secr modelAverage.secrlist\n### Keywords: models\n\n### ** Examples\n\n## Compare two models fitted previously\n## secrdemo.0 is a null model\n## secrdemo.b has a learned trap response\n\nmodelAverage(secrdemo.0, secrdemo.b)\nmodelAverage(secrdemo.0, secrdemo.b, betanames = c(\"D\",\"g0\",\"sigma\"))\n\n## In this case we find the difference was actually trivial...\n## (subscripting of output is equivalent to setting fields = 1)\n\n\n\n"} {"package":"secr","topic":"ms","snippet":"### Name: ms\n### Title: Multi-session Objects\n### Aliases: ms ms.default ms.mask ms.secr\n### Keywords: manip\n\n### ** Examples\n\nms(ovenCH)\nms(ovenbird.model.1)\nms(ovenCH[[1]])\n\n\n"} {"package":"secr","topic":"nontarget","snippet":"### Name: nontarget\n### Title: Non-target Data\n### Aliases: nontarget nontarget<- interference\n\n### ** Examples\n\n\nset.seed(123)\nch <- captdata\n\n# traps that caught something\ncaught <- t(apply(ch, 2:3, sum))\n\n# construct artificial nontarget data\n# (positive for half the traps that caught nothing)\nnontarget(ch) <- (1-caught) * (runif(500)>0.5)\n\nhead(caught)\nhead(nontarget(ch))\n\n# the summary method recognises the 'nontarget' attribute\nsummary(ch)$nontarget\n\n\n\n"} {"package":"secr","topic":"occasionKey","snippet":"### Name: occasionKey\n### Title: Key to Petal Plot\n### Aliases: occasionKey\n### Keywords: hplot\n\n### ** Examples\n\n\nplot(captdata, border = 50)\noccasionKey(captdata, rad = 8, cex = 0.8)\n\n\n\n"} {"package":"secr","topic":"ovenbird","snippet":"### Name: ovenbird\n### Title: Ovenbird Mist-netting Dataset\n### Aliases: ovenbird ovenCH ovenCHp ovenbird.model.1 ovenbird.model.D\n### ovenmask\n### Keywords: datasets\n\n### ** Examples\n\n\n## commands used to create ovenCH from the input files\n## \"netsites0509.txt\" and \"ovencapt.txt\"\n## for information only - these files not distributed\n\n# netsites0509 <- read.traps(file = \"netsites0509.txt\",\n# skip = 1, detector = \"proximity\")\n# temp <- read.table(\"ovencapt.txt\", colClasses=c(\"character\",\n# \"character\", \"numeric\", \"numeric\", \"character\"))\n# ovenCHp <- make.capthist(temp, netsites0509, covnames = \"Sex\")\n# ovenCHp <- reduce(ovenCHp, dropunused = FALSE) # drop repeat detections\n\npar(mfrow = c(1,5), mar = c(1,1,4,1))\nplot(ovenCHp, tracks = TRUE, varycol = TRUE)\npar(mfrow = c(1,1), mar = c(5,4,4,2) + 0.1) ## defaults\n\ncounts(ovenCHp, \"n\")\n\n## Not run: \n##D \n##D ## trimmed version of data - for consistency with earlier versions\n##D \n##D ovenCH <- reduce(ovenCHp, outputdetector = \"multi\", dropunused = FALSE)\n##D \n##D ## array constant over years, so build mask only once\n##D ovenmask <- make.mask(traps(ovenCH)[[\"2005\"]], type = \"pdot\", \n##D buffer = 400, spacing = 15, detectpar = list(g0 = 0.03, \n##D sigma = 90), nocc = 10)\n##D \n##D ## fit constant-density model\n##D ovenbird.model.1 <- secr.fit(ovenCH, mask = ovenmask)\n##D \n##D ## fit temporal trend in density (Session capitalized)\n##D ovenbird.model.D <- secr.fit(ovenCH, mask = ovenmask, \n##D model = list(D ~ Session))\n##D \n##D ## compare pre-fitted models\n##D AIC(ovenbird.model.1, ovenbird.model.D)\n##D \n## End(Not run)\n\n\n\n\n"} {"package":"secr","topic":"ovensong","snippet":"### Name: ovensong\n### Title: Ovenbird Acoustic Dataset\n### Aliases: ovensong signalCH ovensong.model.1 ovensong.model.2\n### Keywords: datasets\n\n### ** Examples\n\n\nsummary(signalCH)\ntraps(signalCH)\nsignal(signalCH)\n\n## apply signal threshold\nsignalCH.525 <- subset(signalCH, cutval = 52.5)\n\n## Not run: \n##D \n##D ## models with and without spherical spreading\n##D omask <- make.mask(traps(signalCH), buffer = 200)\n##D ostart <- c(log(20), 80, log(0.1), log(2))\n##D ovensong.model.1 <- secr.fit( signalCH.525, mask = omask, \n##D start = ostart, detectfn = 11 ) \n##D ovensong.model.2 <- secr.fit( signalCH.525, mask = omask, \n##D start = ostart, detectfn = 10 ) \n##D \n## End(Not run)\n\n## compare fit of models\nAIC(ovensong.model.1, ovensong.model.2)\n\n## density estimates, dividing by 75 to allow for replication\ncollate(ovensong.model.1, ovensong.model.2)[1,,,\"D\"]/75\n\n## plot attenuation curves cf Dawson & Efford (2009) Fig 5\npars1 <- predict(ovensong.model.1)[c(\"beta0\", \"beta1\"), \"estimate\"]\npars2 <- predict(ovensong.model.2)[c(\"beta0\", \"beta1\"), \"estimate\"]\nattenuationplot(pars1, xval=0:150, spherical = TRUE, ylim = c(40,110))\nattenuationplot(pars2, xval=0:150, spherical = FALSE, add = TRUE, \n col = \"red\")\n## spherical spreading only\npars1[2] <- 0 \nattenuationplot(pars1, xval=0:150, spherical = TRUE, add = TRUE, lty=2)\n\n\n\n"} {"package":"secr","topic":"par.secr.fit","snippet":"### Name: par.secr.fit\n### Title: Fit Multiple SECR Models\n### Aliases: par.secr.fit par.derived par.region.N\n### Keywords: model\n\n### ** Examples\n\n\n## Not run: \n##D \n##D fit0 <- list(capthist = 'captdata', model = g0~1) \n##D fitb <- list(capthist = 'captdata', model = g0~b)\n##D fits <- par.secr.fit (c('fit0','fitb'))\n##D AIC(fits)\n##D \n##D par.derived(fits, se.esa = FALSE)\n##D \n##D par.region.N(fits)\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"pdot","snippet":"### Name: pdot\n### Title: Net Detection Probability\n### Aliases: pdot CVpdot\n### Keywords: manip\n\n### ** Examples\n\n\n## Not run: \n##D \n##D temptrap <- make.grid()\n##D ## per-session detection probability for an individual centred\n##D ## at a corner trap. By default, noccasions = 5.\n##D pdot (c(0,0), temptrap, detectpar = list(g0 = 0.2, sigma = 25),\n##D noccasions = 5)\n##D \n##D msk <- make.mask(temptrap, buffer = 100)\n##D CVpdot(msk, temptrap, detectpar = list(g0 = 0.2, sigma = 25),\n##D noccasions = 5)\n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"plot.capthist","snippet":"### Name: plot.capthist\n### Title: Plot Detection Histories\n### Aliases: plot.capthist plotMCP\n### Keywords: hplot\n\n### ** Examples\n\ndemotrap <- make.grid()\ntempcapt <- sim.capthist(demotrap, \n popn = list(D = 5, buffer = 50), \n detectpar = list(g0 = 0.15, sigma = 30))\nplot(tempcapt, border = 10, rad = 3, tracks = TRUE, \n lab1cap = TRUE, laboffset = 2.5)\n\n## type = n.per.cluster\n\n## generate some captures\ntestregion <- data.frame(x = c(0,2000,2000,0),\n y = c(0,0,2000,2000))\npopn <- sim.popn (D = 10, core = testregion, buffer = 0,\n model2D = \"hills\", details = list(hills = c(-2,3)))\nt1 <- make.grid(nx = 1, ny = 1)\nt1.100 <- make.systematic (cluster = t1, spacing = 100,\n region = testregion)\ncapt <- sim.capthist(t1.100, popn = popn, noccasions = 1)\n\n## now plot captures ...\ntemp <- plot(capt, title = \"Individuals per cluster\",\n type = \"n.per.cluster\", hidetraps = FALSE,\n gridlines = FALSE, cappar = list(cex = 1.5))\n\nif (interactive()) {\n ## add legend; click on map to place top left corner\n legend (locator(1), pch = 21, pt.bg = temp$colour,\n pt.cex = 1.3, legend = temp$legend, cex = 0.8)\n}\n\n## Not run: \n##D \n##D ## try varying individual colours - requires RColorBrewer\n##D library(RColorBrewer)\n##D plot(infraCH[[2]], icolours = brewer.pal(12, \"Set3\"), tracks = TRUE,\n##D bg = \"black\", cappar = list(cex = 2), border = 10, rad = 2,\n##D gridlines = FALSE)\n##D \n##D ## generate telemetry data\n##D te <- make.telemetry()\n##D tr <- make.grid(detector = \"proximity\")\n##D totalpop <- sim.popn(tr, D = 20, buffer = 100)\n##D tepop <- subset(totalpop, runif(nrow(totalpop)) < 0.05)\n##D teCH <- sim.capthist(te, popn = tepop, renumber=FALSE, detectfn = \"HHN\",\n##D detectpar = list(lambda0 = 3, sigma = 25))\n##D plot(teCH, type = 'telemetry', tracks = TRUE)\n##D \n##D ## simple \"centres\" example\n##D ## polygon data require 'hazard' detection function 14:19\n##D CH <- sim.capthist(make.poly(), nocc = 20, detectfn = 'HHN', \n##D detectpar = list(lambda0 = 1, sigma = 10))\n##D plot(CH, cappar = list(col = 'orange'), varycol = FALSE, border = 10)\n##D plot(CH, type = 'centres', add = TRUE, rad = 0)\n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"plot.mask","snippet":"### Name: plot.mask\n### Title: Plot Habitat Mask, Density or Resource Surface\n### Aliases: plot.mask plot.Dsurface plot.Rsurface spotHeight\n### Keywords: hplot\n\n### ** Examples\n\n\n# simple\n\ntemptrap <- make.grid()\ntempmask <- make.mask(temptrap)\nplot (tempmask)\n\n## Not run: \n##D \n##D ## restrict to points over an arbitrary detection threshold,\n##D ## add covariate, plot image and overlay traps\n##D \n##D tempmask <- subset(tempmask, pdot(tempmask, temptrap,\n##D noccasions = 5)>0.001)\n##D covariates (tempmask) <- data.frame(circle = \n##D exp(-(tempmask$x^2 + tempmask$y^2)/10000) )\n##D plot (tempmask, covariate = \"circle\", dots = FALSE, axes = TRUE, \n##D add = TRUE, breaks = 8, col = terrain.colors(8), mesh = NA)\n##D plot (temptrap, add = TRUE)\n##D \n##D ## add a legend\n##D par(cex = 0.9)\n##D covrange <- range(covariates(tempmask)$circle)\n##D step <- diff(covrange)/8\n##D colourlev <- terrain.colors(9)\n##D zlev <- format(round(seq(covrange[1],covrange[2],step),2))\n##D legend (x = \"topright\", fill = colourlev, legend = zlev, \n##D y.intersp = 0.8, title = \"Covariate\")\n##D \n##D title(\"Colour mask points with p.(X) > 0.001\")\n##D mtext(side=3,line=-1, \"g0 = 0.2, sigma = 20, nocc = 5\")\n##D \n##D ## Waitarere possum density surface extrapolated across region\n##D \n##D regionmask <- make.mask(traps(possumCH), buffer = 1000, spacing = 10,\n##D poly = possumremovalarea)\n##D dts <- distancetotrap(regionmask, possumarea)\n##D covariates(regionmask) <- data.frame(d.to.shore = dts)\n##D shorePossums <- predictDsurface(possum.model.Ds, regionmask)\n##D \n##D ## plot as coloured pixels with white lines\n##D colourlev <- terrain.colors(7)\n##D plot(shorePossums, breaks = seq(0,3.5,0.5), plottype = \"shaded\",\n##D poly = FALSE, col = colourlev, mesh = NA)\n##D plot(traps(possumCH), add = TRUE, detpar = list(col = \"black\"))\n##D polygon(possumremovalarea)\n##D \n##D ## check some point densities\n##D spotHeight(shorePossums, dec = 1, col = \"black\")\n##D \n##D ## add a legend\n##D zlev <- format(seq(0,3,0.5), digits = 1)\n##D legend (x = \"topright\", fill = colourlev, legend =\n##D paste(zlev,\"--\"), y.intersp = 1, title = \"Density / ha\")\n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"plot.popn","snippet":"### Name: plot.popn\n### Title: Plot Population Object\n### Aliases: plot.popn\n### Keywords: hplot\n\n### ** Examples\n\n\ntemppopn <- sim.popn(D = 5, expand.grid(\n x = c(0,100), y = c(0,100)))\n# specify collapse to avoid partial match of col \nplot(temppopn, pch = 16, collapse = FALSE, col = \"blue\")\n\nplot(temppopn, circles = 20, bg = \"tan\", fg = \"white\")\nplot(temppopn, pch = 16, cex = 0.5, add = TRUE)\n\n\n\n"} {"package":"secr","topic":"plot.secr","snippet":"### Name: plot.secr\n### Title: Plot Detection Functions\n### Aliases: plot.secr plot.secrlist detectfnplot attenuationplot\n### Keywords: hplot\n\n### ** Examples\n\n\nplot (secrdemo.b, xval = 0:100, ylim = c(0, 0.4))\n## Add recapture probability\nplot (secrdemo.b, newdata = data.frame(b = 1), add = TRUE,\n col = \"red\")\n\n## signal strength detection: 70dB at source, attenuation\n## 0.3dB/m, sdS 5dB; detection threshold 40 dB.\ndetectfnplot (detectfn = 10, c(70, -0.3, 5), details =\n list(cutval = 40))\n\n## add a function with louder source and spherical spreading...\ndetectfnplot (detectfn = 11, c(110, -0.3, 5), details =\n list(cutval = 40), add = TRUE, col = \"red\")\n\n## matching sound attenuation curves; `spherical-only' dashed line\nattenuationplot (c(70, -0.3), spherical = FALSE, ylim=c(-10,110))\nattenuationplot (c(110, 0), spherical = TRUE, add=TRUE, lty=2)\nattenuationplot (c(110, -0.3), spherical = TRUE, add = TRUE,\n col = \"red\")\n\n\n\n"} {"package":"secr","topic":"plot.traps","snippet":"### Name: plot.traps\n### Title: Plot traps Object\n### Aliases: plot.traps\n### Keywords: hplot\n\n### ** Examples\n\n temptrap <- make.grid()\n plot (temptrap, detpar = list(pch = 16, col = \"blue\"), \n label = TRUE, offset = 7)\n\n\n"} {"package":"secr","topic":"plotMaskEdge","snippet":"### Name: plotMaskEdge\n### Title: Outline Around Mask Cells\n### Aliases: plotMaskEdge\n### Keywords: hplot\n\n### ** Examples\n\n\n## Not run: \n##D plot(possummask)\n##D plotMaskEdge (possummask, add = TRUE)\n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"pmixProfileLL","snippet":"### Name: pmixProfileLL\n### Title: Mixture Model Check\n### Aliases: pmixProfileLL\n### Keywords: model\n\n### ** Examples\n\n\n## Not run: \n##D \n##D pmvals <- seq(0.02,0.99,0.02)\n##D mask <- make.mask(traps(ovenCH[[1]]), nx = 32, buffer = 100)\n##D \n##D ## only g0 ~ h2, so reduce pmi from 5 to 4\n##D outPL <- pmixProfileLL(ovenCH[[1]], model = list(g0~h2), \n##D mask = mask, pmvals, CL = TRUE, trace = FALSE, pmi = 4) \n##D \n##D plot(pmvals, outPL, xlim = c(0,1),\n##D xlab = 'Fixed pmix', ylab = 'Profile log-likelihood')\n##D \n## End(Not run)\n\n\n"} {"package":"secr","topic":"pointsInPolygon","snippet":"### Name: pointsInPolygon\n### Title: Points Inside Polygon\n### Aliases: pointsInPolygon\n### Keywords: manip\n\n### ** Examples\n\n\n## 100 random points in unit square\nxy <- matrix(runif(200), ncol = 2)\n## triangle centred on (0.5, 0.5)\npoly <- data.frame(x = c(0.2,0.5,0.8,0.2), y = c(0.2,0.8,0.2,0.2))\nplot(xy, pch = 1 + pointsInPolygon(xy, poly))\nlines(poly)\n\n\n\n"} {"package":"secr","topic":"polyarea","snippet":"### Name: polyarea\n### Title: Area of Polygon(s)\n### Aliases: polyarea\n### Keywords: manip\n\n### ** Examples\n\npolyarea(make.grid(hollow = TRUE))\n\n\n"} {"package":"secr","topic":"possum","snippet":"### Name: possum\n### Title: Brushtail Possum Trapping Dataset\n### Aliases: possum possumCH possumarea possumremovalarea possummask\n### possum.model.0 possum.model.Ds\n### Keywords: datasets\n\n### ** Examples\n\n\nplot(possummask)\nplot(possumCH, tracks = TRUE, add = TRUE)\nplot(traps(possumCH), add = TRUE)\nlines(possumarea)\nsummary(possumCH)\n\n## compare & average pre-fitted models\nAIC(possum.model.0, possum.model.Ds)\nmodelAverage(possum.model.0, possum.model.Ds)\n\n## Not run: \n##D \n##D ## Roughly estimate tag-loss error by dropping dubious histories\n##D ## i.e. restrict to \"not previously tagged\"\n##D NPT <- !covariates(possumCH)$prev.tagged\n##D possum.model.0.NPT <- secr.fit(subset(possumCH,NPT), mask =\n##D possummask, trace = FALSE)\n##D predict(possum.model.0)[1,2]/ predict(possum.model.0.NPT)[1,2]\n##D ## ...about 9%\n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"predict.secr","snippet":"### Name: predict.secr\n### Title: SECR Model Predictions\n### Aliases: predict.secr predict.secrlist detectpar detectpar.secr\n### Keywords: models\n\n### ** Examples\n\n\n## load previously fitted secr model with trap response\n## and extract estimates of `real' parameters for both\n## naive (b = 0) and previously captured (b = 1) animals\n\npredict (secrdemo.b, newdata = data.frame(b = 0:1))\n\n## OR from secr 3.1.4 \npredict (secrdemo.b, all.levels = TRUE)\n\ntemp <- predict (secrdemo.b, all.levels = TRUE, save = TRUE)\nattr(temp, \"newdata\")\n\ndetectpar(secrdemo.0)\n\n\n"} {"package":"secr","topic":"predictDsurface","snippet":"### Name: predictDsurface\n### Title: Predict Density Surface\n### Aliases: predictDsurface\n### Keywords: manip\n\n### ** Examples\n\n\n## use canned possum model\nshorePossums <- predictDsurface(possum.model.Ds)\npar(mar = c(1,1,1,6))\nplot(shorePossums, plottype = \"shaded\", polycol = \"blue\", border = 100)\nplot(traps(possumCH), detpar = list(col = \"black\"), add = TRUE)\npar(mar = c(5,4,4,2) + 0.1) ## reset to default\n## extract and summarise\nsummary(covariates(shorePossums))\n\n## Not run: \n##D \n##D ## extrapolate to a new mask; add covariate needed by model; plot\n##D regionmask <- make.mask(traps(possumCH), buffer = 1000, spacing = 10,\n##D poly = possumremovalarea)\n##D dts <- distancetotrap(regionmask, possumarea)\n##D covariates(regionmask) <- data.frame(d.to.shore = dts)\n##D regionPossums <- predictDsurface(possum.model.Ds, regionmask,\n##D se.D = TRUE, cl.D = TRUE)\n##D par(mfrow = c(1,2), mar = c(1,1,1,6))\n##D plot(regionPossums, plottype = \"shaded\", mesh = NA, breaks = 20)\n##D plot(regionPossums, plottype = \"contour\", add = TRUE)\n##D plot(regionPossums, covariate = \"SE\", plottype = \"shaded\",\n##D mesh = NA, breaks = 20)\n##D plot(regionPossums, covariate = \"SE\", plottype = \"contour\",\n##D add = TRUE)\n##D \n##D ## confidence surfaces\n##D plot(regionPossums, covariate = \"lcl\", breaks = seq(0,3,0.2),\n##D plottype = \"shaded\")\n##D plot(regionPossums, covariate = \"lcl\", plottype = \"contour\",\n##D add = TRUE, levels = seq(0,2.7,0.2))\n##D title(\"lower 95% surface\")\n##D plot(regionPossums, covariate = \"ucl\", breaks=seq(0,3,0.2),\n##D plottype = \"shaded\")\n##D plot(regionPossums, covariate = \"ucl\", plottype = \"contour\",\n##D add = TRUE, levels = seq(0,2.7,0.2))\n##D title(\"upper 95% surface\")\n##D \n##D ## annotate with CI\n##D par(mfrow = c(1,1))\n##D plot(regionPossums, plottype = \"shaded\", mesh = NA, breaks = 20)\n##D plot(traps(possumCH), add = TRUE, detpar = list(col = \"black\"))\n##D \n##D if (interactive()) {\n##D spotHeight(regionPossums, dec = 1, pre = c(\"lcl\",\"ucl\"), cex = 0.8)\n##D }\n##D \n##D ## perspective plot\n##D pm <- plot(regionPossums, plottype = \"persp\", box = FALSE, zlim =\n##D c(0,3), phi=30, d = 5, col = \"green\", shade = 0.75, border = NA)\n##D lines(trans3d (possumremovalarea$x, possumremovalarea$y,\n##D rep(1,nrow(possumremovalarea)), pmat = pm))\n##D \n##D par(mfrow = c(1,1), mar = c(5, 4, 4, 2) + 0.1) ## reset to default\n##D \n##D ## compare estimates of region N\n##D ## grid cell area is 0.01 ha\n##D sum(covariates(regionPossums)[,\"D.0\"]) * 0.01\n##D region.N(possum.model.Ds, regionmask)\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"print.capthist","snippet":"### Name: print.capthist\n### Title: Print Detections\n### Aliases: print.capthist\n### Keywords: print\n\n### ** Examples\n\n## simulated detections of simulated default population of 5/ha\nprint(sim.capthist(make.grid(nx=5,ny=3)))\n\n\n"} {"package":"secr","topic":"print.secr","snippet":"### Name: print.secr\n### Title: Print or Summarise secr Object\n### Aliases: print.secr summary.secr\n### Keywords: print\n\n### ** Examples\n\n\n## load & print previously fitted null (constant parameter) model\nprint(secrdemo.0)\n\nsummary(secrdemo.0)\n\n## combine AIC tables from list of summaries\ndo.call(AIC, lapply(list(secrdemo.b, secrdemo.0), summary))\n\n## Not run: \n##D \n##D print(secrdemo.CL, deriv = TRUE)\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"print.traps","snippet":"### Name: print.traps\n### Title: Print Detectors\n### Aliases: print.traps\n### Keywords: print\n\n### ** Examples\n\nprint(make.grid(nx = 5, ny = 3))\n\n\n"} {"package":"secr","topic":"randomHabitat","snippet":"### Name: randomHabitat\n### Title: Random Landscape\n### Aliases: randomHabitat randomDensity\n### Keywords: datagen\n\n### ** Examples\n\n\n## Not run: \n##D \n##D tempmask <- make.mask(nx = 100, ny = 100, spacing = 20)\n##D mrcmask <- randomHabitat(tempmask, p = 0.4, A = 0.4)\n##D plot(mrcmask, dots = FALSE, col = \"green\")\n##D pop <- sim.popn(10, mrcmask, model2D = \"IHP\")\n##D plot(pop, add = TRUE)\n##D \n##D # OR\n##D plot(sim.popn(D = randomDensity, core = tempmask, model2D = \"IHP\",\n##D details = list(D = 10, p = 0.4, A = 0.4, plt = TRUE)), \n##D add = TRUE, frame = FALSE)\n##D \n##D ## plot intermediate steps A, C, D\n##D opar <- par(mfrow = c(1,3))\n##D mrcmask <- randomHabitat(tempmask, p = 0.4, A = 0.4, plt = TRUE)\n##D par(opar) \n##D \n##D ## keep non-habitat cells\n##D mrcmask <- randomHabitat(tempmask, p = 0.4, A = 0.4, drop = FALSE)\n##D plot(mrcmask, covariate = \"habitat\", dots = FALSE,\n##D col = c(\"grey\",\"green\"), breaks = 2)\n##D \n##D ## effect of purging small patches\n##D opar <- par(mfrow=c(1,2))\n##D mrcmask <- randomHabitat(tempmask, p = 0.4, A = 0.4, minpatch = 1)\n##D plot(mrcmask, dots = FALSE, col =\"green\")\n##D mrcmask <- randomHabitat(tempmask, p = 0.4, A = 0.4, minpatch = 5)\n##D plot(mrcmask, dots = FALSE, col =\"green\")\n##D par(opar)\n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"raster","snippet":"### Name: raster\n### Title: Create a RasterLayer Object from Mask or Dsurface\n### Aliases: raster raster,mask-method raster,Dsurface-method rast\n### rast,mask-method rast,Dsurface-method\n### Keywords: methods spatial\n\n### ** Examples\n\n\n## Not run: \n##D \n##D shorePossums <- predictDsurface(possum.model.Ds)\n##D tmp <- raster(shorePossums, covariate = \"D.0\")\n##D library(raster)\n##D plot(tmp, useRaster = FALSE)\n##D \n##D ## alternative with same result\n##D tmp <- raster(shorePossums, values = covariates(shorePossums)$D.0)\n##D \n##D ## set the projection\n##D ## here the crs PROJ.4 spec refers simply to the old NZ metric grid\n##D tmp <- raster(shorePossums, \"D.0\", crs = \"+proj=nzmg\")\n##D ## check the projection\n##D proj4string(tmp)\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"rbind.capthist","snippet":"### Name: rbind.capthist\n### Title: Combine capthist Objects\n### Aliases: rbind.capthist MS.capthist\n### Keywords: manip\n\n### ** Examples\n\n\n## extend a multi-session object\n## we fake the 2010 data by copying from 2005\n## note how we name the appended session\nfakeCH <- ovenCH[[\"2005\"]]\nMS.capthist(ovenCH, \"2010\" = fakeCH)\n\n## simulate sessions for 2-part mixture\ntemptrap <- make.grid(nx = 8, ny = 8)\ntemp1 <- sim.capthist(temptrap,\n detectpar = list(g0 = 0.1, sigma = 40))\ntemp2 <- sim.capthist(temptrap,\n detectpar = list(g0 = 0.2, sigma = 20))\n\n## concatenate sessions\ntemp3 <- MS.capthist(large.range = temp1, small.range = temp2)\nsummary(temp3)\n## session-specific movement statistic\nRPSV(temp3)\n\n## pool sessions\ntemp4 <- rbind(temp1, temp2)\nsummary(temp4)\nRPSV(temp4)\n\n## compare mixture to sum of components\n## note `detectors visited' is not additive for 'multi' detector\n## nor is `detectors used'\n(summary(temp1)$counts + summary(temp2)$counts) -\n summary(temp4)$counts\n\n## Not run: \n##D \n##D ## compare two different model fits \n##D tempfit3 <- secr.fit(temp3, CL = TRUE, buffer = 150, model = list\n##D (g0 ~ session, sigma ~ session), trace = FALSE)\n##D predict(tempfit3)\n##D \n##D ## if we can tell which animals had large ranges...\n##D covariates(temp4) <- data.frame(range.size = rep(c(\"large\",\n##D \"small\"), c(nrow(temp1), nrow(temp2))))\n##D tempfit4 <- secr.fit(temp4, CL = TRUE, buffer = 150, model = list\n##D (g0 ~ range.size, sigma ~ range.size), trace = FALSE)\n##D predict(tempfit4, newdata = data.frame(range.size = c(\"large\",\n##D \"small\")))\n##D \n##D ## polygon data\n##D pol1 <- make.poly()\n##D pol2 <- make.poly(x = c(50,50,150,150))\n##D ch1 <- sim.capthist(pol1, popn = list(D = 30), detectfn = 'HHN', \n##D detectpar = list(lambda0 = 0.3))\n##D ch2 <- sim.capthist(pol2, popn = list(D = 30), detectfn = 'HHN', \n##D detectpar = list(lambda0 = 0.3))\n##D plot(ch1); plot(pol2, add = TRUE); plot(ch2, add = TRUE)\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"rbind.popn","snippet":"### Name: rbind.popn\n### Title: Combine popn Objects\n### Aliases: rbind.popn\n### Keywords: manip\n\n### ** Examples\n\n\n## generate and combine two subpopulations\ntrapobj <- make.grid()\np1 <- sim.popn(D = 3, core = trapobj)\np2 <- sim.popn(D = 2, core = trapobj)\ncovariates(p1) <- data.frame(size = rep(\"small\", nrow(p1)))\ncovariates(p2) <- data.frame(size = rep(\"large\", nrow(p2)))\npop <- rbind(p1,p2)\n\n## or\npop <- do.call(rbind, list(p1,p2))\n\n\n\n"} {"package":"secr","topic":"rbind.traps","snippet":"### Name: rbind.traps\n### Title: Combine traps Objects\n### Aliases: rbind.traps\n### Keywords: manip\n\n### ** Examples\n\n\n## nested hollow grids\nhollow1 <- make.grid(nx = 8, ny = 8, hollow = TRUE)\nhollow2 <- shift(make.grid(nx = 6, ny = 6, hollow = TRUE), \n c(20, 20))\nnested <- rbind (hollow1, hollow2)\nplot(nested, gridlines = FALSE, label = TRUE)\n\n\n"} {"package":"secr","topic":"read.mask","snippet":"### Name: read.mask\n### Title: Read Habitat Mask From File\n### Aliases: read.mask\n### Keywords: IO\n\n### ** Examples\n\n## Replace file name with a valid local name and remove `#'\n# read.mask (file = \"c:\\\\myfolder\\\\mask.txt\",\n# spacing = 3, header = TRUE)\n## \"mask.txt\" should have lines like this\n# x y\n# 265 265\n# 268 265\n# ...\n\n\n"} {"package":"secr","topic":"read.telemetry","snippet":"### Name: read.telemetry\n### Title: Import Telemetry Fixes\n### Aliases: read.telemetry\n### Keywords: IO\n\n### ** Examples\n\n\n## Not run: \n##D \n##D olddir <- setwd('D:/bears/alberta')\n##D ## peek at raw data\n##D head(readLines('gps2008.txt'))\n##D gps2008CH <- read.telemetry(\"gps2008.txt\")\n##D setwd(olddir)\n##D \n##D plot( gps2008CH, gridsp = 10000)\n##D head(gps2008CH)\n##D secr.fit(gps2008CH, start = log(4000), detectfn = 'HHN', \n##D details = list(telemetryscale = 1e12))\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"read.traps","snippet":"### Name: read.traps\n### Title: Read Detector Data From File\n### Aliases: read.traps\n### Keywords: IO\n\n### ** Examples\n\n\n## Not run: \n##D ## \"trap.txt\" should have lines like this \n##D # 1 365 365\n##D # 2 365 395\n##D # 3 365 425\n##D # etc.\n##D ## in following, replace file name with a valid local name\n##D filename <- paste0(system.file(\"extdata\", package = \"secr\"), '/trap.txt')\n##D tr1 <- read.traps (filename, detector = \"proximity\")\n##D summary(tr1)\n##D \n##D ## Or if we have a dataframe of coordinates...\n##D mytrapdf <- data.frame(x = c(365,365,365), y = c(365,395,425),\n##D row.names = c('A','B','C'))\n##D mytrapdf\n##D # x y\n##D # A 365 365\n##D # B 365 395\n##D # C 365 425\n##D ## ...then we can convert it to a `traps' object with\n##D tr2 <- read.traps(data = mytrapdf)\n##D summary(tr2)\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"rectangularMask","snippet":"### Name: rectangularMask\n### Title: Rectangular Mask\n### Aliases: rectangularMask\n### Keywords: manip\n\n### ** Examples\n\n\nrMask <- rectangularMask(possummask)\nplot(rMask)\nplot(possummask, add = TRUE, col = \"blue\")\n\n\n\n"} {"package":"secr","topic":"reduce","snippet":"### Name: reduce\n### Title: Combine Columns\n### Aliases: reduce reduce.default\n### Keywords: manip\n\n### ** Examples\n\n\n## matrix with random zeros\ntemp <- matrix(runif(20), nc = 4)\ntemp[sample(20,10)] <- 0\ntemp\n\nreduce(temp, list(1:2, 3:4))\n\n\n\n"} {"package":"secr","topic":"reduce.traps","snippet":"### Name: reduce.capthist\n### Title: Combine Occasions Or Detectors\n### Aliases: reduce.traps reduce.capthist\n### Keywords: manip\n\n### ** Examples\n\ntempcapt <- sim.capthist (make.grid(nx = 6, ny = 6), nocc = 6)\nclass(tempcapt)\n\npooled.tempcapt <- reduce(tempcapt, newocc = list(1,2:3,4:6))\nsummary (pooled.tempcapt)\n\npooled.tempcapt2 <- reduce(tempcapt, by = 2)\nsummary (pooled.tempcapt2)\n\n## collapse multi-session dataset to single-session 'open population'\nonesess <- join(reduce(ovenCH, by = \"all\"))\nsummary(onesess)\n\n# group detectors within 60 metres\nplot (traps(captdata))\nplot (reduce(captdata, span = 60), add = TRUE)\n\n# plot linking old and new\nold <- traps(captdata)\nnew <- reduce(old, span = 60)\nnewtrap <- attr(new, \"newtrap\")\nplot(old, border = 10)\nplot(new, add = TRUE, detpar = list(pch = 16), label = TRUE)\nsegments (new$x[newtrap], new$y[newtrap], old$x, old$y)\n\n## Not run: \n##D \n##D # compare binary proximity with collapsed binomial count\n##D # expect TRUE for each year\n##D for (y in 1:5) {\n##D CHA <- abs(ovenCHp[[y]]) ## abs() to ignore one death\n##D usage(traps(CHA)) <- matrix(1, 44, ncol(CHA))\n##D CHB <- reduce(CHA, by = 'all', output = 'count')\n##D # summary(CHA, terse = TRUE)\n##D # summary(CHB, terse = TRUE)\n##D fitA <- secr.fit(CHA, buffer = 300, trace = FALSE)\n##D fitB <- secr.fit(CHB, buffer = 300, trace = FALSE, binomN = 1, biasLimit = NA)\n##D A <- predict(fitA)[,-1] \n##D B <- predict(fitB)[,-1]\n##D cat(y, ' ', all(abs(A-B)/A < 1e-5), '\\n')\n##D }\n##D ## multi-session fit\n##D ## expect TRUE overall\n##D CHa <- ovenCHp\n##D for (y in 1:5) {\n##D usage(traps(CHa[[y]])) <- matrix(1, 44, ncol(CHa[[y]]))\n##D CHa[[y]][,,] <- abs(CHa[[y]][,,])\n##D }\n##D CHb <- reduce(CHa, by = 'all', output = 'count')\n##D summary(CHa, terse = TRUE)\n##D summary(CHb, terse = TRUE)\n##D fita <- secr.fit(CHa, buffer = 300, trace = FALSE)\n##D fitb <- secr.fit(CHb, buffer = 300, trace = FALSE, binomN = 1, biasLimit = NA)\n##D A <- predict(fita)[[1]][,-1] \n##D B <- predict(fitb)[[1]][,-1]\n##D all(abs(A-B)/A < 1e-5)\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"region.N","snippet":"### Name: region.N\n### Title: Population Size\n### Aliases: region.N region.N.secr region.N.secrlist 'population size'\n### Keywords: models\n\n### ** Examples\n\n\n## Not run: \n##D \n##D ## routine examples using arbitrary mask from model fit\n##D region.N(secrdemo.0)\n##D region.N(secrdemo.CL)\n##D region.N(ovenbird.model.D)\n##D \n##D ## region defined as vector polygon\n##D ## retain and plot region mask\n##D temp <- region.N(possum.model.0, possumarea, spacing = 40,\n##D keep.region = TRUE)\n##D temp\n##D plot (attr(temp, \"region\"), type = \"l\")\n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"score.test","snippet":"### Name: score.test\n### Title: Score Test for SECR Models\n### Aliases: score.test score.table\n### Keywords: htest\n\n### ** Examples\n\n\n## Not run: \n##D AIC (secrdemo.0, secrdemo.b)\n##D st <- score.test (secrdemo.0, g0 ~ b)\n##D st\n##D score.table(st)\n##D \n##D ## adding a time covariate to separate occasions (1,2) from (3,4,5)\n##D secrdemo.0$timecov <- data.frame(t2 = factor(c(1,1,2,2,2)))\n##D st2 <- score.test (secrdemo.0, g0 ~ t2)\n##D score.table(st,st2)\n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"boundarytoSF","snippet":"### Name: Internal\n### Title: Internal Functions\n### Aliases: boundarytoSF Dfn2\n\n### ** Examples\n\n\n## Not run: \n##D \n##D poly <- cbind(x = c(0,6,6,0,0), y = c(0,0,6,6,0)) \n##D secr:::boundarytoSF(poly)\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"secr-package","snippet":"### Name: secr-package\n### Title: Spatially Explicit Capture-Recapture Models\n### Aliases: secr-package secr\n### Keywords: package\n\n### ** Examples\n\n\n## Not run: \n##D \n##D ## generate some data & plot\n##D detectors <- make.grid (nx = 10, ny = 10, spacing = 20,\n##D detector = \"multi\")\n##D plot(detectors, label = TRUE, border = 0, gridspace = 20)\n##D detections <- sim.capthist (detectors, noccasions = 5,\n##D popn = list(D = 5, buffer = 100),\n##D detectpar = list(g0 = 0.2, sigma = 25))\n##D session(detections) <- \"Simulated data\"\n##D plot(detections, border = 20, tracks = TRUE, varycol = TRUE)\n##D \n##D ## generate habitat mask\n##D mask <- make.mask (detectors, buffer = 100, nx = 48)\n##D \n##D ## fit model and display results\n##D secr.model <- secr.fit (detections, model = g0~b, mask = mask)\n##D secr.model\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"secr.design.MS","snippet":"### Name: secr.design.MS\n### Title: Construct Detection Model Design Matrices and Lookups\n### Aliases: secr.design.MS make.lookup insertdim\n### Keywords: manip\n\n### ** Examples\n\nsecr.design.MS (captdata, models = list(g0 = ~b))$designMatrices\nsecr.design.MS (captdata, models = list(g0 = ~b))$parameterTable\n\n## peek at design data constructed for learned response model\nhead(captdata)\ntemp <- secr.design.MS (captdata, models = list(g0 = ~b),\n keep.dframe = TRUE)\na1 <- temp$dframe$animal == 1 & temp$dframe$detector %in% 8:10\ntemp$dframe[a1,]\n\n## ... and trap specific learned response model\ntemp <- secr.design.MS (captdata, models = list(g0 = ~bk),\n keep.dframe = TRUE)\na1 <- temp$dframe$animal == 1 & temp$dframe$detector %in% 8:10\ntemp$dframe[a1,]\n\n## place values 1:6 in different dimensions\ninsertdim(1:6, 1:2, c(2,3,6))\ninsertdim(1:6, 3, c(2,3,6))\n\n\n\n"} {"package":"secr","topic":"secr.fit","snippet":"### Name: secr.fit\n### Title: Spatially Explicit Capture-Recapture\n### Aliases: secr.fit binomN\n### Keywords: models\n\n### ** Examples\n\n\n## Not run: \n##D \n##D ## construct test data (array of 48 `multi-catch' traps)\n##D \n##D detectors <- make.grid (nx = 6, ny = 8, detector = \"multi\")\n##D detections <- sim.capthist (detectors, popn = list(D = 10,\n##D buffer = 100), detectpar = list(g0 = 0.2, sigma = 25))\n##D \n##D ## fit & print null (constant parameter) model\n##D secr0 <- secr.fit (detections)\n##D secr0 ## uses print method for secr\n##D \n##D ## compare fit of null model with learned-response model for g0\n##D \n##D secrb <- secr.fit (detections, model = g0~b)\n##D AIC (secr0, secrb)\n##D \n##D ## typical result\n##D \n##D ## model detectfn npar logLik AIC AICc dAICc AICwt\n##D ## secr0 D~1 g0~1 sigma~1 halfnormal 3 -347.1210 700.242 700.928 0.000 0.7733\n##D ## secrb D~1 g0~b sigma~1 halfnormal 4 -347.1026 702.205 703.382 2.454 0.2267\n## End(Not run)\n\n\n"} {"package":"secr","topic":"newdata","snippet":"### Name: newdata\n### Title: Create Default Design Data\n### Aliases: newdata makeNewData makeNewData.secr makeNewData.default\n### Keywords: models\n\n### ** Examples\n\n\n## from previously fitted model\nmakeNewData(secrdemo.b)\n\n\n\n"} {"package":"secr","topic":"secr.test","snippet":"### Name: secr.test\n### Title: Goodness-of-Fit Test\n### Aliases: secr.test\n### Keywords: htest\n\n### ** Examples\n\n\n## Not run: \n##D \n##D secr.test(secrdemo.0, nsim = 99)\n##D \n##D secr.test(ovenbird.model.1, nsim = 20)\n##D \n##D ## example combining raw data summary and model fit\n##D ## assumes single-session\n##D bothfn <- function(object) {\n##D CH <- object$capthist\n##D f1 <- sum(apply(abs(CH) > 0, 1, sum) == 1) / nrow(CH)\n##D devdf <- deviance(object) / df.residual(object)\n##D c(f1 = f1, devdf = devdf)\n##D }\n##D test <- secr.test (secrdemo.0, nsim = 19, statfn = bothfn, fit = TRUE)\n##D test\n##D plot(test, main = '')\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"random numbers","snippet":"### Name: secrRNG\n### Title: Random Number Seed\n### Aliases: 'random numbers' seed secrRNG\n### Keywords: datagen\n\n### ** Examples\n\n\n## Not run: \n##D \n##D lmfit <- lm(speed ~ dist, data = cars)\n##D \n##D ## 1. NULL seed\n##D r1 <- simulate(lmfit, seed = NULL)\n##D r2 <- simulate(lmfit, seed = NULL)\n##D ## restore RNGstate, assuming RNGkind unchanged\n##D .Random.seed <- attr(r1, \"seed\")\n##D r3 <- simulate(lmfit, seed = NULL)\n##D r1[1:6,1]\n##D r2[1:6,1]\n##D r3[1:6,1]\n##D \n##D ## 2. explicit seed\n##D r4 <- simulate(lmfit, seed = 123)\n##D r5 <- simulate(lmfit, seed = attr(r4, \"seed\"))\n##D r4[1:6,1]\n##D r5[1:6,1]\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"secrdemo","snippet":"### Name: secrdemo\n### Title: SECR Models Fitted to Demonstration Data\n### Aliases: secrdemo captXY trapXY captdata secrdemo.0 secrdemo.b\n### secrdemo.CL\n### Keywords: datasets\n\n### ** Examples\n\n\n## Not run: \n##D \n##D ## navigate to folder with raw data files\n##D olddir <- setwd (system.file(\"extdata\", package=\"secr\"))\n##D \n##D ## construct capthist object from raw data\n##D captdata <- read.capthist (\"capt.txt\", \"trap.txt\", fmt = \"XY\", detector = \"single\")\n##D \n##D ## generate demonstration fits\n##D secrdemo.0 <- secr.fit (captdata)\n##D secrdemo.CL <- secr.fit (captdata, CL = TRUE)\n##D secrdemo.b <- secr.fit (captdata, model = list(g0 ~ b))\n##D \n##D ## restore previous setting\n##D setwd(olddir)\n## End(Not run)\n\n## display the null model fit, using the print method for secr\nsecrdemo.0\n\n## compare fit of models\nAIC(secrdemo.0, secrdemo.b)\n\n## display estimates for the two models (single session)\ncollate(secrdemo.0, secrdemo.b)[1,,,]\n\n\n\n"} {"package":"secr","topic":"secrtest","snippet":"### Name: secrtest\n### Title: Goodness-of-fit Test Results\n### Aliases: secrtest print.secrtest plot.secrtest\n### Keywords: classes\n\n### ** Examples\n\n\n## Not run: \n##D \n##D tmp <- secr.test(ovenbird.model.1)\n##D if (inherits(tmp, 'secrtest')) {\n##D tmp ## terse print\n##D print(tmp, terse = FALSE)\n##D par(mfrow = c(1,5))\n##D plot(tmp, main = '', xlim=c(0,1), breaks=seq(0,1,0.05))\n##D par(mfrow = c(1,1)) ## reset to default\n##D }\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"session","snippet":"### Name: session\n### Title: Session Vector\n### Aliases: session session<-\n### Keywords: models\n\n### ** Examples\n\n session(captdata)\n\n\n"} {"package":"secr","topic":"setNumThreads","snippet":"### Name: setNumThreads\n### Title: Number of Threads\n### Aliases: setNumThreads\n\n### ** Examples\n\n\n# determine current number of threads\n\nsetNumThreads()\n\n## Not run: \n##D \n##D # set new number of threads\n##D setNumThreads(7)\n##D \n##D # a call to secr.fit that specifies 'ncores' also sets the \n##D # number of threads, as we see here\n##D \n##D fit <- secr.fit(captdata, trace = FALSE, ncores = 8)\n##D setNumThreads()\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"signalframe","snippet":"### Name: signal\n### Title: Signal Fields\n### Aliases: signalframe signal.capthist noise.capthist signalframe<-\n### signal noise signal<- noise<-\n### Keywords: manip\n\n### ** Examples\n\n\n## ovensong dataset has very simple signalframe\nhead(signalframe(signalCH))\n\n\n\n"} {"package":"secr","topic":"signalmatrix","snippet":"### Name: signalmatrix\n### Title: Reformat Signal Data\n### Aliases: signalmatrix\n### Keywords: manip\n\n### ** Examples\n\n\n## use 'secr' ovenbird data\nsignalmatrix(signalCH)\n\n\n\n"} {"package":"secr","topic":"sim.capthist","snippet":"### Name: sim.capthist\n### Title: Simulate Detection Histories\n### Aliases: sim.capthist sim.resight\n### Keywords: datagen\n\n### ** Examples\n\n## simple example\n## detector = \"multi\" (default)\ntemptrap <- make.grid(nx = 6, ny = 6, spacing = 20)\nsim.capthist (temptrap, detectpar = list(g0 = 0.2, sigma = 20))\n\n## with detector = \"proximity\", there may be more than one\n## detection per individual per occasion\ntemptrap <- make.grid(nx = 6, ny = 6, spacing = 20, detector =\n \"proximity\")\nsummary(sim.capthist (temptrap, detectpar = list(g0 = 0.2, \n sigma = 20)))\n\n## marking on occasions 1, 3 only\ntemptrap <- make.grid(nx = 6, ny = 6, spacing = 20, detector = 'proximity')\nmarkocc(temptrap) <- c(1,0,1,0,0)\nCH <- sim.resight (temptrap, detectpar = list(g0 = 0.2, sigma = 20))\nsummary(CH)\n\n## multiple sessions\ngrid4 <- make.grid(nx = 2, ny = 2)\ntemp <- sim.capthist (grid4, popn = list(D = 1), nsessions = 20)\nsummary(temp, terse = TRUE)\n\n## unmarked or presence types\n# grid <- make.grid(nx = 10, ny = 10, detector = \"unmarked\")\n# CH <- sim.capthist (grid, noccasions = 5)\n# CH\n## \"presence\" and \"unmarked\" data are stored as \"count\" data\n## behaviour is controlled by detector type, e.g.\n# detector(traps(CH)) <- \"presence\"\n# CH\n\n\n\n\n"} {"package":"secr","topic":"sim.popn","snippet":"### Name: sim.popn\n### Title: Simulate 2-D Population\n### Aliases: sim.popn tile\n### Keywords: datagen\n\n### ** Examples\n\n\ntemppop <- sim.popn (D = 10, expand.grid(x = c(0,100), y =\n c(0,100)), buffer = 50)\n\n## plot, distinguishing \"M\" and \"F\"\nplot(temppop, pch = 1, cex= 1.5,\n col = c(\"green\",\"red\")[covariates(temppop)$sex])\n\n## add a continuous covariate\n## assumes covariates(temppop) is non-null\ncovariates(temppop)$size <- rnorm (nrow(temppop), mean = 15, sd = 3)\nsummary(covariates(temppop))\n\n## Neyman-Scott cluster distribution (see also rThomas)\npar(xpd = TRUE, mfrow=c(2,3))\nfor (h in c(5,15))\nfor (m in c(1,4,16)) {\n temppop <- sim.popn (D = 10, expand.grid(x = c(0,100),\n y = c(0,100)), model2D = \"cluster\", buffer = 100,\n details = list(mu = m, hsigma = h))\n plot(temppop)\n text (50,230,paste(\" mu =\",m, \"hsigma =\",h))\n}\npar(xpd = FALSE, mfrow=c(1,1)) ## defaults\n\n## Inhomogeneous Poisson distribution\nxy <- secrdemo.0$mask$x + secrdemo.0$mask$y - 900\ntempD <- xy^2 / 1000\nplot(sim.popn(tempD, secrdemo.0$mask, model2D = \"IHP\"))\n\n## Coastal distribution in 1000-m square, homogeneous in\n## x-direction\narena <- data.frame(x = c(0, 1000, 1000, 0),\n y = c(0, 0, 1000, 1000))\nplot(sim.popn(D = 5, core = arena, buffer = 0, model2D =\n \"coastal\", details = list(Beta = c(1, 1, 5, 1))))\n\n## Hills\nplot(sim.popn(D = 100, core = arena, model2D = \"hills\",\n buffer = 0, details = list(hills = c(-2,3,0,0))), \n cex = 0.4)\n\n## tile demonstration\npop <- sim.popn(D = 100, core = make.grid(), model2D = \"coastal\")\npar(mfrow = c(1,2), mar = c(2,2,2,2))\nplot(tile(pop, \"copy\"))\npolygon(cbind(-100,200,200,-100), c(-100,-100,200,200),\n col = \"red\", density = 0)\ntitle(\"copy\")\nplot(tile(pop, \"reflect\"))\npolygon(cbind(-100,200,200,-100), c(-100,-100,200,200),\n col = \"red\", density = 0)\ntitle(\"reflect\")\n\n## Not run: \n##D \n##D ## simulate from inhomogeneous fitted density model\n##D \n##D regionmask <- make.mask(traps(possumCH), type = \"polygon\",\n##D spacing = 20, poly = possumremovalarea)\n##D dts <- distancetotrap(regionmask, possumarea)\n##D covariates(regionmask) <- data.frame(d.to.shore = dts)\n##D dsurf <- predictDsurface(possum.model.Ds, regionmask)\n##D possD <- covariates(dsurf)$D.0\n##D posspop <- sim.popn(D = possD, core = dsurf, model = \"IHP\")\n##D plot(regionmask, dots = FALSE, ppoly = FALSE)\n##D plot(posspop, add = TRUE, frame = FALSE)\n##D plot(traps(possumCH), add = TRUE)\n##D \n##D ## randomHabitat demonstration\n##D ## - assumes igraph has been installed\n##D \n##D # The wrapper function randomDensity may be passed to generate\n##D # a new habitat map each time sim.popn is called. The `details' argument\n##D # of sim.popn is passed to randomDensity as the `parm' argument.\n##D \n##D tempmask <- make.mask(nx = 100, ny = 100, spacing = 20)\n##D pop <- sim.popn(D = randomDensity, core = tempmask, model2D = \"IHP\",\n##D details = list(D = 10, p = 0.4, A = 0.5))\n##D plot(attr(pop, 'mask'), cov = 'D', dots = FALSE)\n##D plot(pop, add = TRUE)\n##D \n##D ## rLGCP demonstration\n##D ## - assumes spatstat and RandomFields have been installed\n##D \n##D if (requireNamespace(\"spatstat\") && requireNamespace(\"RandomFields\")) {\n##D msk <- make.mask(traps(captdata))\n##D # details argument 'spacing' ensures core matches Lambda below\n##D pop <- sim.popn(D = 20, core = msk, buffer = 0, \n##D model2D = \"rLGCP\", details = list(var=1, scale = 30, \n##D spacing = spacing(msk)), seed = 1234)\n##D plot(pop)\n##D plot(traps(captdata), add = TRUE)\n##D \n##D # another IHP realisation from same LGCP intensity surface\n##D lgcp <- attr(pop, 'Lambda')\n##D pop2 <- sim.popn(D = 'Lambda', core = lgcp, model2D = \"IHP\")\n##D plot (lgcp, covariate = \"Lambda\", dots = FALSE)\n##D plot (pop2, add = TRUE, frame = FALSE)\n##D \n##D # check input and output masks match\n##D summary(lgcp)\n##D summary(msk)\n##D }\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"simulate.secr","snippet":"### Name: sim.secr\n### Title: Simulate From Fitted secr Model\n### Aliases: simulate.secr sim.secr sim.detect simulate\n### Keywords: datagen models\n\n### ** Examples\n\n\n## Not run: \n##D \n##D ## previously fitted model\n##D simulate(secrdemo.0, nsim = 2)\n##D \n##D ## The following has been superceded by secr.test()\n##D \n##D ## this would take a long time...\n##D sims <- sim.secr(secrdemo.0, nsim = 99)\n##D deviance(secrdemo.0)\n##D devs <- c(deviance(secrdemo.0),sims$deviance)\n##D quantile(devs, probs=c(0.95))\n##D rank(devs)[1] / length(devs)\n##D \n##D ## to assess bias and CI coverage\n##D extrfn <- function (object) unlist(predict(object)[\"D\",-1])\n##D sims <- sim.secr(secrdemo.0, nsim = 50, hessian = \"auto\",\n##D extractfn = extrfn)\n##D sims\n##D \n##D ## with a larger sample, could get parametric bootstrap CI\n##D quantile(sims[,1], c(0.025, 0.975))\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"skink","snippet":"### Name: skink\n### Title: Skink Pitfall Data\n### Aliases: skink infraCH lineoCH LStraps\n### Keywords: datasets\n\n### ** Examples\n\nsummary (infraCH)\nsummary (lineoCH)\n\n## check mean distance to nearest trap etc.\nsummary(LStraps)\n\n## LStraps has several site covariates; terse descriptions are in\n## an extra attribute that may be displayed thus\nattr(LStraps, \"habitat.variables\")\n\n## For density modelling we need covariate values at each point in the\n## habitat mask. This requires both on-grid interpolation and\n## extrapolation beyond the grids. One (crude) possibility is to\n## extrapolate a mask covariate from a covariate of the nearest trap:\n\nLSmask <- make.mask(LStraps, buffer = 30, type = \"trapbuffer\")\ntemp <- nearesttrap(LSmask, LStraps)\nhabclass <- covariates(LStraps)$class[temp]\nhabclass <- factor (habclass, levels = c(1,2))\ncovariates(LSmask) <- data.frame(habclass)\n\n## plot mask with colour-coded covariate\npar(fg = \"white\") ## white pixel borders\nplot (LSmask, covariate = \"habclass\", dots = FALSE, axes = FALSE,\n col = c(\"yellow\", \"green\"), border = 0)\nplot(LStraps, add = TRUE, detpar = list(pch = 16))\npar(fg = \"black\") ## default\n\n\n"} {"package":"secr","topic":"smooths","snippet":"### Name: smooths\n### Title: Smooth Terms in SECR Models\n### Aliases: smooths\n### Keywords: models\n\n### ** Examples\n\n\n## Not run: \n##D \n##D ## smooth density surface\n##D possum.model.sxy <- secr.fit(possumCH, mask = possummask,\n##D model = D ~ s(x,y, k = 6, fx = TRUE), trace = FALSE)\n##D fittedsurface <- predictDsurface(possum.model.sxy)\n##D par(mar = c(1,1,1,6))\n##D plot(fittedsurface)\n##D plot(fittedsurface, plottype = 'contour', add = TRUE)\n##D par(mar = c(5,4,4,2) + 0.1) ## reset to default\n##D \n##D ## Now try smooth on g0\n##D \n##D ## For the smooth we use 'Session' which is coded numerically (0:4)\n##D ## rather than the factor 'session' ('2005', '2006', '2007', '2008',\n##D ## '2009')\n##D \n##D ovenbird.model.g0 <- secr.fit(ovenCH, mask = ovenmask,\n##D model = g0 ~ session, trace = FALSE)\n##D ovenbird.model.sg0 <- secr.fit(ovenCH, mask = ovenmask,\n##D model = g0 ~ s(Session, k = 3, fx = TRUE), trace = FALSE)\n##D \n##D AIC(ovenbird.model.g0, ovenbird.model.sg0)\n##D \n##D ## Or over occasions within a session...\n##D \n##D fit.sT3 <- secr.fit(captdata, model = g0 ~ s(T, k = 3, fx = TRUE),\n##D trace = FALSE)\n##D pred <- predict(fit.sT3, newdata = data.frame(T = 0:4))\n##D \n##D plot(sapply(pred, '[', 'g0', 'estimate'))\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"snip","snippet":"### Name: snip\n### Title: Slice Transect Into Shorter Sections\n### Aliases: snip\n### Keywords: manip\n\n### ** Examples\n\n\nx <- seq(0, 4*pi, length = 41)\ntemptrans <- make.transect(x = x*100, y = sin(x)*300)\nplot (snip(temptrans, by = 200), markvertices = 1)\n\n## Not run: \n##D \n##D ## simulate some captures\n##D tempcapt <- sim.capthist(temptrans, popn = list(D = 2,\n##D buffer = 300), detectfn = 'HHN', binomN = 0,\n##D detectpar = list(lambda0 = 0.5, sigma = 50))\n##D \n##D ## snip capture histories\n##D tempCH <- snip(tempcapt, by = 20)\n##D \n##D ## collapse from 'transect' to 'count', discarding location within transects\n##D tempCH <- reduce(tempCH, outputdetector = \"count\")\n##D \n##D ## fit secr model and examine H-T estimates of density\n##D ## fails with detectfn = 'HN'\n##D fit <- secr.fit(tempCH, buffer = 300, CL = TRUE, detectfn = 'HHN', trace = FALSE)\n##D derived(fit)\n##D \n##D ## also, may split an existing transect into equal lengths\n##D ## same result:\n##D plot(snip(temptrans, by = transectlength(temptrans)/10),\n##D markvertices = 1)\n##D plot(snip(temptrans, length.out = 10), markvertices = 1)\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"sort.capthist","snippet":"### Name: sort.capthist\n### Title: Sort Rows of capthist or mask Object\n### Aliases: sort.capthist sort.mask\n### Keywords: manip\n\n### ** Examples\n\nsort(ovenCH, by = \"Sex\")\ncovariates(ovenCH)[[\"2005\"]]\ncovariates(sort(ovenCH, by = \"Sex\"))[[\"2005\"]]\n\n\n"} {"package":"secr","topic":"spacing","snippet":"### Name: spacing\n### Title: Detector or Mask Spacing\n### Aliases: spacing spacing.traps spacing.mask spacing<-\n### Keywords: models\n\n### ** Examples\n\ntemptrap <- make.grid(nx = 6, ny = 8)\nspacing(temptrap)\n\n\n"} {"package":"secr","topic":"speed","snippet":"### Name: speed\n### Title: Speed Tips\n### Aliases: speed 'Speed tips'\n### Keywords: manip\n\n### ** Examples\n\n\n## Not run: \n##D \n##D ## compare timing of combined model with separate single-session models\n##D ## for 5-session ovenbird mistnetting data: 2977/78 = 38-fold difference\n##D \n##D setNumThreads(7)\n##D \n##D system.time(fit1 <- secr.fit(ovenCH, buffer = 300, trace = FALSE, \n##D model = list(D ~ session, g0 ~ session, sigma ~ session)))\n##D ## user system elapsed \n##D ## 1837.71 31.81 730.56 \n##D \n##D system.time(fit2 <- lapply (ovenCH, secr.fit, buffer = 300, trace = FALSE))\n##D ## user system elapsed \n##D ## 43.74 0.46 11.13 \n##D \n##D ## ratio of density estimates\n##D collate(fit1)[,1,1,\"D\"] / sapply(fit2, function(x) predict(x)[\"D\",\"estimate\"])\n##D ## session=2005 session=2006 session=2007 session=2008 session=2009 \n##D ## 1.0000198 1.0000603 0.9999761 0.9999737 0.9999539 \n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"stoatDNA","snippet":"### Name: stoatDNA\n### Title: Stoat DNA Data\n### Aliases: stoatDNA stoatCH stoat.model.HN stoat.model.EX\n### Keywords: datasets\n\n### ** Examples\n\nsummary(stoatCH)\n\n## Not run: \n##D \n##D stoat.model.HN <- secr.fit(stoatCH, buffer = 1000, detectfn = 0)\n##D \n##D # this generates an error unless we use biasLimit = NA\n##D # to suppress the default bias check\n##D \n##D stoat.model.EX <- secr.fit(stoatCH, buffer = 1000, detectfn = 2)\n##D confint(stoat.model.HN, \"D\")\n##D ## Profile likelihood interval(s)...\n##D ## lcl ucl\n##D ## D 0.01275125 0.04055662\n##D \n## End(Not run)\n\n## plot fitted detection functions\nxv <- seq(0,800,10)\nplot(stoat.model.EX, xval = xv, ylim = c(0,0.12), limits = FALSE,\n lty = 2)\nplot(stoat.model.HN, xval = xv, limits = FALSE, lty = 1, add = TRUE)\n\n## review density estimates\ncollate(stoat.model.HN, stoat.model.EX,\n realnames = \"D\", perm = c(2,3,4,1))\nmodelAverage(stoat.model.HN, stoat.model.EX, realnames = \"D\")\n\n\n\n"} {"package":"secr","topic":"strip.legend","snippet":"### Name: strip.legend\n### Title: Colour Strip Legend\n### Aliases: strip.legend\n### Keywords: hplot\n\n### ** Examples\n\n\n\ntemptrap <- make.grid()\ntempmask <- make.mask(temptrap)\ncovariates (tempmask) <- data.frame(circle = \n exp(-(tempmask$x^2 + tempmask$y^2)/10000) )\ntmpleg <- plot (tempmask, covariate = \"circle\", dots = FALSE, \n breaks = 10, legend = FALSE)\nstrip.legend (xy = 'topright', col = terrain.colors(10),\n legend = tmpleg, title = \"Test plot\")\n\nif (interactive()) {\n ## a custom axis using the returned values\n par(mar = c(2,2,2,6))\n plot (tempmask, covariate = \"circle\", dots = FALSE, \n breaks = 10, legend = FALSE)\n b <- strip.legend (locator(1), col = terrain.colors(10),\n legendtype = \"other\", legend = \" \", title = \"Test plot\",\n height = 0.3, box = NA)\n axis(side = 4, pos = b[2]+5, at = seq(b[4], b[3], length = 3),\n lab = seq(0,1,0.5), las = 1, tck = -0.02)\n par(mar = c(5,4,4,2) + 0.1) ## reset to default\n}\n\n\n\n"} {"package":"secr","topic":"subset.capthist","snippet":"### Name: subset.capthist\n### Title: Subset or Split capthist Object\n### Aliases: subset.capthist split.capthist\n### Keywords: manip\n\n### ** Examples\n\n\ntempcapt <- sim.capthist (make.grid(nx = 6, ny = 6), noccasions = 6)\nsummary(subset(tempcapt, occasions = c(1,3,5)))\n\n## Consider `proximity' detections at a random subset of detectors\n## This would not make sense for `multi' detectors, as the \n## excluded detectors influence detection probabilities in \n## sim.capthist.\n\ntempcapt2 <- sim.capthist (make.grid(nx = 6, ny = 6, \n detector = \"proximity\"), noccasions = 6)\ntempcapt3 <- subset(tempcapt2, traps = sample(1:36, 18, \n replace = FALSE))\nsummary(tempcapt3)\nplot(tempcapt3)\n\ntempcapt4 <- split (tempcapt2, f = sample (c(\"A\",\"B\"), \n nrow(tempcapt2), replace = TRUE))\nsummary(tempcapt4)\n\n## Split out captures on alternate rows of a grid\ntempcapt5 <- split(captdata, f = rep(1:2, 50), bytrap = TRUE)\nsummary(tempcapt5)\n\n## Divide one session into two by occasion\ntempcapt6 <- split(captdata, f = factor(c(1,1,2,2,2)), byoccasion = TRUE)\nsummary(tempcapt6)\n\n## Applying a covariate criterion across all sessions of a\n## multi-session capthist object e.g. selecting male ovenbirds from the\n## 2005--2009 ovenCH dataset. We include a restriction on occasions\n## to demonstrate the use of 'MoreArgs'. Note that mapply() creates a\n## list, and the class of the output must be restored manually.\n\novenCH.males <- mapply(subset, ovenCH,\n subset = lapply(ovenCH, function(x) covariates(x)$Sex == \"M\"),\n MoreArgs = list(occasions = 1:5))\nclass(ovenCH.males) <- class(ovenCH)\nsummary(ovenCH.males, terse = TRUE)\n\n## A simpler approach using a function to define subset\nsubsetfn <- function(x, sex) covariates(x)$Sex == sex\novenCH.males <- subset(ovenCH, subset = subsetfn, sex = \"M\")\nsummary(ovenCH.males, terse = TRUE)\n\n\n\n"} {"package":"secr","topic":"subset.mask","snippet":"### Name: subset.mask\n### Title: Subset, Split or Combine Mask Objects\n### Aliases: subset.mask split.mask rbind.mask\n### Keywords: models\n\n### ** Examples\n\ntempmask <- make.mask(make.grid())\nOK <- (tempmask$x + tempmask$y) > 100\ntempmask <- subset(tempmask, subset = OK)\nplot(tempmask)\n\n\n\n"} {"package":"secr","topic":"subset.popn","snippet":"### Name: subset.popn\n### Title: Subset popn Object\n### Aliases: subset.popn\n### Keywords: manip\n\n### ** Examples\n\n\ntemppop <- sim.popn (D = 10, expand.grid(x = c(0,100), y =\n c(0,100)), buffer = 50)\n## 50% binomial sample of simulated population\ntemppops <- subset(temppop, runif(nrow(temppop)) < 0.5)\nplot(temppop)\nplot(temppops, add = TRUE, pch = 16)\n\n\n\n"} {"package":"secr","topic":"subset.traps","snippet":"### Name: subset.traps\n### Title: Subset traps Object\n### Aliases: subset.traps split.traps\n### Keywords: manip\n\n### ** Examples\n\n## odd-numbered traps only, using modulo operator\ntemptrap <- make.grid(nx = 7, ny = 7)\nt2 <- subset(temptrap, as.logical(1:nrow(temptrap) %% 2))\nplot(t2)\n\n## this works also for even number of rows, but must change 'outer' call\ntemptrap <- make.grid(nx = 8, ny = 8)\nt3 <- subset(temptrap, !as.logical(outer(1:8,1:8,'+')%%2))\nplot(t3)\n\n\n"} {"package":"secr","topic":"suggest.buffer","snippet":"### Name: suggest.buffer\n### Title: Mask Buffer Width\n### Aliases: suggest.buffer bias.D\n### Keywords: models\n\n### ** Examples\n\n\n## Not run: \n##D \n##D temptraps <- make.grid()\n##D detpar <- list(g0 = 0.2, sigma = 25)\n##D suggest.buffer(temptraps, \"halfnormal\", detpar, 5)\n##D \n##D suggest.buffer(secrdemo.0)\n##D \n##D suggest.buffer(ovenCH[[1]])\n##D \n##D RB <- bias.D(50:150, temptraps, \"halfnormal\", detpar, 5)\n##D plot(RB)\n##D \n##D detpar <- list(g0 = 0.2, sigma = 25, z=5)\n##D RB <- bias.D(50:150, temptraps, \"hazard rate\", detpar, 5)\n##D lines(RB)\n##D \n##D ## compare to esa plot\n##D esa.plot (temptraps, max.buffer = 150, spacing = 4, detectfn = 0,\n##D detectpar = detpar, noccasions = 5, type = \"density\")\n##D \n##D ## compare detection histories and fitted model as input\n##D suggest.buffer(captdata)\n##D suggest.buffer(secrdemo.0)\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"summary.capthist","snippet":"### Name: summary.capthist\n### Title: Summarise Detections\n### Aliases: summary.capthist print.summary.capthist counts\n### Keywords: models\n\n### ** Examples\n\ntemptrap <- make.grid(nx = 5, ny = 3)\nsummary(sim.capthist(temptrap))\nsummary(sim.capthist(temptrap))$counts[\"n\",]\nsummary(captdata, moves = TRUE)\n\n\n"} {"package":"secr","topic":"summary.mask","snippet":"### Name: summary.mask\n### Title: Summarise Habitat Mask\n### Aliases: summary.mask print.summary.mask\n### Keywords: models\n\n### ** Examples\n\ntempmask <- make.mask(make.grid())\n## left to right gradient\ncovariates (tempmask) <- data.frame(x = tempmask$x)\nsummary(tempmask)\n\n\n"} {"package":"secr","topic":"summary.popn","snippet":"### Name: summary.popn\n### Title: Summarise Simulated Population\n### Aliases: summary.popn print.summary.popn\n### Keywords: datagen\n\n### ** Examples\n\n\n\ngrid <- make.grid(8,8)\nturnover <- list(phi = 0.8, lambda = 1)\npop <- sim.popn(Nbuffer = 200, core = grid, buffer = 200, Ndist = 'fixed', \n nsessions = 5, details = turnover)\nsummary(pop, collapse = TRUE)\n\n\n\n"} {"package":"secr","topic":"summary.traps","snippet":"### Name: summary.traps\n### Title: Summarise Detector Array\n### Aliases: summary.traps print.summary.traps\n### Keywords: models\n\n### ** Examples\n\ndemo.traps <- make.grid()\nsummary(demo.traps) ## uses print method for summary.traps object\n\n\n"} {"package":"secr","topic":"timevaryingcov","snippet":"### Name: timevaryingcov\n### Title: Time-varying Covariates\n### Aliases: timevaryingcov timevaryingcov<-\n### Keywords: manip\n\n### ** Examples\n\n\n# make a trapping grid with simple covariates\ntemptrap <- make.grid(nx = 6, ny = 8, detector = \"multi\") \ncovariates (temptrap) <- data.frame(matrix(\n c(rep(1,48*3),rep(2,48*2)), ncol = 5))\nhead(covariates (temptrap))\n\n# identify columns 1-5 as daily covariates\ntimevaryingcov(temptrap) <- list(blockt = 1:5)\ntimevaryingcov(temptrap)\n\n## Not run: \n##D \n##D # default density = 5/ha, noccasions = 5\n##D CH <- sim.capthist(temptrap, detectpar = list(g0 = c(0.15, 0.15,\n##D 0.15, 0.3, 0.3), sigma = 25))\n##D \n##D fit.1 <- secr.fit(CH, trace = FALSE) \n##D fit.tvc2 <- secr.fit(CH, model = g0 ~ blockt, trace = FALSE) \n##D \n##D # because variation aligns with occasions, we get the same with:\n##D fit.t2 <- secr.fit(CH, model = g0 ~ tcov, timecov = c(1,1,1,2,2),\n##D trace = FALSE) \n##D \n##D predict(fit.t2, newdata = data.frame(tcov = 1:2))\n##D predict(fit.tvc2, newdata = data.frame(blockt = 1:2))\n##D \n##D # now model some more messy variation\n##D covariates (traps(CH))[1:10,] <- 3\n##D fit.tvc3 <- secr.fit(CH, model = g0 ~ blockt, trace = FALSE) \n##D \n##D AIC(fit.tvc2, fit.t2, fit.tvc3)\n##D # fit.tvc3 is the 'wrong' model\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"transformations","snippet":"### Name: transformations\n### Title: Transform Point Array\n### Aliases: transformations flip flip.traps flip.popn rotate rotate.traps\n### rotate.popn shift shift.traps shift.popn shift.mask\n### Keywords: manip\n\n### ** Examples\n\n\ntemp <- matrix(runif (20) * 2 - 1, nc = 2)\n\n## flip\ntemp2 <- flip(temp, lr = 1)\nplot(temp, xlim=c(-1.5,4), ylim = c(-1.5,1.5), pch = 16)\npoints (temp2, pch = 1)\narrows (temp[,1], temp[,2], temp2[,1], temp2[,2], length = 0.1)\nabline(v = 1, lty = 2)\n\n## rotate\ntemp2 <- rotate(temp, 25)\nplot(temp, xlim=c(-1.5,1.5), ylim = c(-1.5,1.5), pch = 16)\npoints (0,0, pch=2)\npoints (temp2, pch = 1)\narrows (temp[,1], temp[,2], temp2[,1], temp2[,2], length = 0.1)\t\n\n## shiftxy\ntemp2 <- shift(temp, c(0.1, 0.1))\nplot(temp, xlim=c(-1.5,1.5), ylim = c(-1.5,1.5), pch = 16)\npoints (0,0, pch=2)\npoints (temp2, pch = 1)\narrows (temp[,1], temp[,2], temp2[,1], temp2[,2], length = 0.1)\t\n\n## flip.traps\npar(mfrow = c(1,2), xpd = TRUE)\ntraps1 <- make.grid(nx = 8, ny = 6, ID = \"numxb\")\ntraps2 <- flip (traps1, lr = TRUE)\nplot(traps1, border = 5, label = TRUE, offset = 7, gridl = FALSE)\nplot(traps2, border = 5, label = TRUE, offset = 7, gridl = FALSE)\npar(mfrow = c(1,1), xpd = FALSE)\n\n## rotate.traps\nhollow1 <- make.grid(nx = 8, ny = 8, hollow = TRUE)\nnested <- rbind (hollow1, rotate(hollow1, 45, c(70, 70)))\nplot(nested, gridlines = FALSE)\n\n## shift.traps\nhollow1 <- make.grid(nx = 8, ny = 8, hollow = TRUE)\nhollow2 <- shift(make.grid(nx = 6, ny = 6, hollow = TRUE), c(20, 20))\nnested <- rbind (hollow1, hollow2)\nplot(nested, gridlines = FALSE, label = TRUE)\n\n\n\n"} {"package":"secr","topic":"trap.builder","snippet":"### Name: trap.builder\n### Title: Complex Detector Layouts\n### Aliases: trap.builder mash cluster.counts cluster.centres\n### Keywords: manip\n\n### ** Examples\n\n\n## solitary detectors placed randomly within a rectangle\ntempgrid <- trap.builder (n = 10, method = \"SRS\",\n region = cbind(x = c(0,1000,1000,0),\n y = c(0,0,1000,1000)), plt = TRUE)\n\n## one detector in each 100-m grid cell -\n## a form of stratified simple random sample\norigins <- expand.grid(x = seq(0, 900, 100),\n y = seq(0, 1100, 100))\nXY <- origins + runif(10 * 12 * 2) * 100\ntemp <- trap.builder (frame = XY, method = \"all\",\n detector = \"multi\")\n## same as temp <- read.traps(data = XY)\nplot(temp, border = 0) ## default grid is 100 m\n\n## Not run: \n##D \n##D ## simulate some data\n##D ## regular lattice of mini-arrays\n##D minigrid <- make.grid(nx = 3, ny = 3, spacing = 50,\n##D detector = \"proximity\")\n##D tempgrid <- trap.builder (cluster = minigrid , method =\n##D \"all\", frame = expand.grid(x = seq(1000, 5000, 2000),\n##D y = seq(1000, 5000, 2000)), plt = TRUE)\n##D tempcapt <- sim.capthist(tempgrid, popn = list(D = 10))\n##D cluster.counts(tempcapt)\n##D cluster.centres(tempgrid)\n##D \n##D ## \"mash\" the CH\n##D summary(mash(tempcapt))\n##D \n##D ## compare timings (estimates are near identical)\n##D tempmask1 <- make.mask(tempgrid, type = \"clusterrect\",\n##D buffer = 200, spacing = 10)\n##D fit1 <- secr.fit(tempcapt, mask = tempmask1, trace = FALSE) \n##D \n##D tempmask2 <- make.mask(minigrid, spacing = 10)\n##D fit2 <- secr.fit(mash(tempcapt), mask = tempmask2, trace = FALSE) \n##D ## density estimate is adjusted automatically\n##D ## for the number of mashed clusters (9)\n##D \n##D predict(fit1)\n##D predict(fit2)\n##D fit1$proctime\n##D fit2$proctime\n##D \n##D ## SRS excluding detectors from a polygon\n##D \n##D region <- cbind(x = c(0,6000,6000,0,0), y = c(0,0,6000,6000,0))\n##D exclude <- cbind(x = c(3000,7000,7000,3000,3000), y = c(2000,2000,4000,4000,2000))\n##D newgrid <- trap.builder (n = 40, cluster = minigrid,\n##D method = \"SRS\", edgemethod = \"allinside\", region = region,\n##D exclude = exclude, exclmethod = \"alloutside\",\n##D plt = TRUE)\n##D \n##D ## two-phase design: preliminary sample across region,\n##D ## followed by selection of sites for intensive grids\n##D \n##D arena <- data.frame(x = c(0,2000,2000,0), y = c(0,0,2500,2500))\n##D t1 <- make.grid(nx = 1, ny = 1)\n##D t4 <- make.grid(nx = 4, ny = 4, spacing = 50)\n##D singletraps <- make.systematic (n = c(8,10), cluster = t1,\n##D region = arena)\n##D CH <- sim.capthist(singletraps, popn = list(D = 2))\n##D plot(CH, type = \"n.per.cluster\", title = \"Number per cluster\")\n##D temp <- trap.builder(10, frame = traps(CH), cluster = t4,\n##D ranks = cluster.counts(CH), method = \"rank\",\n##D edgemethod = \"allowoverlap\", plt = TRUE, add = TRUE)\n##D \n##D ## GRTS sample of mini-grids within a rectangle\n##D ## GRTS uses package 'spsurvey' >= 5.3.0\n##D \n##D minigrid <- make.grid(nx = 3, ny = 3, spacing = 50,\n##D detector = \"proximity\")\n##D region <- cbind(x = c(0,6000,6000,0,0), y = c(0,0,6000,6000,0))\n##D \n##D if (requireNamespace(\"spsurvey\", versionCheck = list(version = \">=5.3.0\"))) {\n##D \n##D tempgrid <- trap.builder (n = 20, cluster = minigrid, region = region, \n##D plt = TRUE, method = \"GRTS\")\n##D \n##D # specifying minimum distance between cluster origins\n##D tempgrid2 <- trap.builder (n = 20, cluster = minigrid, region = region, \n##D plt = TRUE, method = \"GRTS\", mindis = 500, maxtry = 10)\n##D # use spsurvey::warnprnt() to view warnings (e.g., maxtry inadequate)\n##D \n##D }\n##D \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"traps object","snippet":"### Name: traps\n### Title: Detector Array\n### Aliases: 'traps object' traps traps<-\n### Keywords: classes\n\n### ** Examples\n\n\ndemotraps <- make.grid(nx = 8, ny = 6, spacing = 30)\ndemotraps ## uses print method for traps\nsummary (demotraps)\n\nplot (demotraps, border = 50, label = TRUE, offset = 8, \n gridlines=FALSE) \n\n## generate an arbitrary covariate `randcov'\ncovariates (demotraps) <- data.frame(randcov = rnorm(48))\n\n## overplot detectors that have high covariate values\ntemptr <- subset(demotraps, covariates(demotraps)$randcov > 0.5)\nplot (temptr, add = TRUE, \n detpar = list (pch = 16, col = \"green\", cex = 2)) \n\n\n"} {"package":"secr","topic":"searcharea","snippet":"### Name: traps.info\n### Title: Detector Attributes\n### Aliases: searcharea polyID polyID<- transectID transectID<-\n### transectlength\n### Keywords: manip\n\n### ** Examples\n\n\n## default is a single polygon\ntemp <- make.grid(detector = \"polygon\", hollow = TRUE)\npolyID(temp)\nplot(temp)\n\n## split in two\ntemp <- make.grid(detector = \"polygon\", hollow = TRUE)\npolyID(temp) <- factor(rep(c(1,2),rep(10,2)))\nplot(temp)\n \n\n\n"} {"package":"secr","topic":"predictDlambda","snippet":"### Name: Trend\n### Title: Density Trend\n### Aliases: predictDlambda\n### Keywords: classes\n\n### ** Examples\n\n\n## No test: \n# a model with constant lambda\nmsk <- make.mask(traps(ovenCH[[1]]), buffer = 300, nx = 25)\nfit <- secr.fit(ovenCH, model = D~1, mask = msk, trace = FALSE, \n details = list(Dlambda = TRUE), ncores = 2)\npredictDlambda(fit)\n\n## End(No test)\n\n\n\n"} {"package":"secr","topic":"trim","snippet":"### Name: trim\n### Title: Drop Unwanted List Components\n### Aliases: trim trim.default trim.secr trim.secrlist\n### Keywords: manip\n\n### ** Examples\n\n\nnames(secrdemo.0)\nnames(trim(secrdemo.0))\n\nobject.size(secrdemo.0)\nobject.size(trim(secrdemo.0))\n\nobject.size(trim(secrlist(secrdemo.0, secrdemo.b)))\n\n\n\n"} {"package":"secr","topic":"turnover","snippet":"### Name: turnover\n### Title: Specifying a Dynamic Population\n### Aliases: turnover\n### Keywords: datagen\n\n### ** Examples\n\n\npar (mfrow = c(2,3), mar = c(1,1,1,1))\n\n## birth and death only\ngrid <- make.grid(nx = 7, ny = 4, detector = 'proximity', spacing = 10)\npop <- sim.popn (Nbuffer = 100, core = grid, nsessions = 6, \n details = list(lambda = 0.8, phi = 0.6))\nsapply(pop, nrow) ## how many individuals?\nplot(pop)\n\n## movement only\npop2 <- sim.popn (Nbuffer = 100, core = grid, nsessions = 6, \n details = list(lambda = 1, phi = 1, movemodel = 'normal', \n move.a = 10, edgemethod = \"wrap\"))\npop3 <- sim.popn (Nbuffer = 100, core = grid, nsessions = 6, \n details = list(lambda = 1, phi = 1, movemodel = 'normal', \n move.a = 10, edgemethod = \"clip\"))\npop4 <- sim.popn (Nbuffer = 100, core = grid, nsessions = 10, \n details = list(lambda = 1, phi = 1, movemodel = 'normal', \n move.a = 10, edgemethod = \"stop\"))\nsapply(pop2, nrow) ## how many individuals?\nplot(pop2)\n\n## show effect of edgemethod --\n## first session blue, last session red\ncols <- c('blue',rep('white',4),'red')\npar (mfrow=c(1,2))\nplot(pop2, collapse = TRUE, seqcol = cols)\nplot(pop3, collapse = TRUE, seqcol = cols)\n\n## zero-inflated movement\n## move.b is zero-inflation probability\npop5 <- sim.popn (Nbuffer = 1000, core = grid, nsessions = 6, \n details = list(lambda = 1, phi = 1, movemodel = 'RDEzi', \n move.a = 50, move.b = 0.5, edgemethod = \"none\"))\nmean(do.call(rbind,extractMoves(pop5))$d) # approx 50 * 0.5\n\n\n\n"} {"package":"secr","topic":"updateCH","snippet":"### Name: updateCH\n### Title: Update Old capthist Format\n### Aliases: updateCH\n### Keywords: manip\n\n### ** Examples\n\n\n# if we had the old ovenCH !\nsapply(ovenCH, dim)\nsapply(updateCH(ovenCH), dim)\n\n\n\n"} {"package":"secr","topic":"usage","snippet":"### Name: usage\n### Title: Detector Usage\n### Aliases: usage usage<- effort\n### Keywords: manip\n\n### ** Examples\n\ndemo.traps <- make.grid(nx = 6, ny = 8)\n## random usage over 5 occasions\nusage(demo.traps) <- matrix (sample(0:1, 48*5, replace = TRUE, \n p = c(0.5,0.5)), nc = 5)\nusage(demo.traps)\nsummary(demo.traps)\n\nusage(traps(ovenCH)) <- c(1,9,10,10,10,10)\n## restore lost names\nnames(ovenCH) <- 2005:2009\n\n\n\n"} {"package":"secr","topic":"usagePlot","snippet":"### Name: usagePlot\n### Title: Plot usage, detections or sightings.\n### Aliases: usagePlot sightingPlot\n### Keywords: hplot\n\n### ** Examples\n\n\nsimgrid <- make.grid(nx = 10, ny = 10, detector = \"proximity\")\nusage(simgrid) <- matrix(rep(1:10, 50), nrow = 100, ncol = 5)\nusagePlot(simgrid, border = 20, scale = 1.5, fill = FALSE,\n metres = FALSE)\n\n# It is hard to get the legend just right\n# here is one attempt\nlegend (x = -50, y = 185, legend = c(1,2,5,10), pch = 1, pt.cex =\n c(1,2,5,10)^0.5 * 1.5, x.intersp = 3, y.intersp = 1.8, adj = 1,\n bty = \"n\", title = \"Usage\")\n\nusagePlot(simgrid, occasion = NULL, border = 20, scale = 1.5, fill = FALSE,\n metres = FALSE)\n\n## Not run: \n##D # bubble plot in package 'sp'\n##D library(sp)\n##D simgrid$usage <- usage(simgrid)[,1] ## occasion 1\n##D class(simgrid) <- \"data.frame\"\n##D coordinates(simgrid) <- c(\"x\",\"y\")\n##D bubble(simgrid) \n## End(Not run)\n\n\n\n"} {"package":"secr","topic":"userdist","snippet":"### Name: userdist\n### Title: Non-Euclidean Distances\n### Aliases: userdist noneuc\n### Keywords: models\n\n### ** Examples\n\n\n## see secr-noneuclidean.pdf\n\n\n\n"} {"package":"secr","topic":"getMeanSD","snippet":"### Name: utility\n### Title: Utility Functions\n### Aliases: getMeanSD maskarea masklength edist nedist\n\n### ** Examples\n\n\ngetMeanSD(possummask)\n\n\n\n"} {"package":"secr","topic":"vcov.secr","snippet":"### Name: vcov.secr\n### Title: Variance - Covariance Matrix of SECR Parameters\n### Aliases: vcov.secr\n### Keywords: models\n\n### ** Examples\n\n## previously fitted secr model\nvcov(secrdemo.0)\n\n\n"} {"package":"secr","topic":"verify","snippet":"### Name: verify\n### Title: Check SECR Data\n### Aliases: verify verify.default verify.traps verify.capthist verify.mask\n### Keywords: manip\n\n### ** Examples\n\n\nverify(captdata)\n\n## create null (complete) usage matrix, and mess it up\ntemptraps <- make.grid()\nusage(temptraps) <- matrix(1, nr = nrow(temptraps), nc = 5)\nusage(temptraps)[,5] <- 0\nverify (temptraps)\n\n## create mask, and mess it up\ntempmask <- make.mask(temptraps)\nverify(tempmask)\ntempmask[1,1] <- NA\nverify(tempmask)\n\n\n\n"} {"package":"secr","topic":"write.captures","snippet":"### Name: write.captures\n### Title: Write Data to Text File\n### Aliases: write.captures write.traps write.mask\n### Keywords: IO\n\n### ** Examples\n\n\n write.captures (captdata)\n\n\n\n"} {"package":"secr","topic":"writeGPS","snippet":"### Name: writeGPS\n### Title: Upload to GPS\n### Aliases: writeGPS\n### Keywords: IO\n\n### ** Examples\n\n\n## Example using shapefile \"possumarea.shp\" in\n## \"extdata\" folder. As 'cluster' is not specified,\n## the grid comprises single multi-catch detectors.\n\n## Not run: \n##D \n##D ## test for availability of GPSBabel\n##D \n##D if (nzchar(Sys.which(\"gpsbabel\"))) {\n##D \n##D library(sf)\n##D shpfilename <- system.file(\"extdata/possumarea.shp\", package = \"secr\")\n##D possumarea <- st_read(shpfilename)\n##D \n##D possumgrid <- make.systematic(spacing = 100, region = possumarea, \n##D plt = TRUE)\n##D \n##D ## May upload directly to GPS...\n##D # writeGPS(possumgrid, proj = \"+proj=nzmg\")\n##D \n##D ## ...or save as Mapsource file\n##D writeGPS(possumgrid, o = \"gdb\", F = \"tempgrid.gdb\",\n##D proj = \"+proj=nzmg\")\n##D \n##D ## If `region' had been specified in another projection we\n##D ## would need to specify this as in Proj.4. Here is a\n##D ## hypothetical example for New Zealand Transverse Mercator\n##D ## with datum NZGD2000 (EPSG:2193)\n##D \n##D NZTM <- paste(\"+proj=tmerc +lat_0=0 +lon_0=173 +k=0.9996\",\n##D \"+x_0=1600000 +y_0=10000000 +ellps=GRS80\",\n##D \" +towgs84=0,0,0,0,0,0,0 +units=m +no_defs\")\n##D \n##D # writeGPS(possumgridNZTM, o = \"gdb\", F = \"tempNZTM.txt\", \n##D # proj = NZTM)\n##D \n##D ## Or to upload coordinates from UTM Zone 18 in eastern\n##D ## Maryland, USA...\n##D \n##D # writeGPS(MarylandUTMgrid, proj = \n##D # \"+proj=utm +zone=18 +ellps=WGS84\")\n##D \n##D }\n##D \n## End(Not run)\n\n\n\n"} {"package":"mirtCAT","topic":"computeCriteria","snippet":"### Name: computeCriteria\n### Title: Compute the values given the criteria and internal objects\n### Aliases: computeCriteria\n\n### ** Examples\n\n## Not run: \n##D # test defined in mirtCAT help file, first example\n##D CATdesign <- mirtCAT(df, mod, design_elements = TRUE)\n##D \n##D computeCriteria(CATdesign, criteria = 'MI')\n##D computeCriteria(CATdesign, criteria = 'MEI')\n##D \n## End(Not run)\n\n\n"} {"package":"mirtCAT","topic":"createShinyGUI","snippet":"### Name: createShinyGUI\n### Title: Function returning an object used by shiny\n### Aliases: createShinyGUI\n\n### ** Examples\n\n## Not run: \n##D \n##D mirtCAT_preamble(df=df)\n##D runApp(createShinyGUI(host_server = FALSE), port = 8000) # run locally\n##D \n##D person <- getPerson()\n##D summary(person)\n##D \n##D runApp(createShinyGUI(), port = 8000) # for remote server hosting\n##D \n## End(Not run) \n\n\n"} {"package":"mirtCAT","topic":"extract.mirtCAT","snippet":"### Name: extract.mirtCAT\n### Title: Extract elements from the internal person, test, and design\n### objects\n### Aliases: extract.mirtCAT\n\n### ** Examples\n\n\n## Not run: \n##D #example test\n##D set.seed(1234)\n##D nitems <- 25\n##D itemnames <- paste0('Item.', 1:nitems)\n##D a <- matrix(rlnorm(nitems, .2, .3))\n##D d <- matrix(rnorm(nitems))\n##D dat <- simdata(a, d, 500, itemtype = 'dich')\n##D colnames(dat) <- itemnames\n##D mod <- mirt(dat, 1, verbose = FALSE, TOL = .01)\n##D \n##D # simple math items\n##D questions <- answers <- character(nitems)\n##D choices <- matrix(NA, nitems, 5)\n##D spacing <- floor(d - min(d)) + 1 #easier items have more variation in the options\n##D \n##D for(i in 1:nitems){\n##D n1 <- sample(1:50, 1)\n##D n2 <- sample(51:100, 1)\n##D ans <- n1 + n2\n##D questions[i] <- paste0(n1, ' + ', n2, ' = ?')\n##D answers[i] <- as.character(ans)\n##D ch <- ans + sample(c(-5:-1, 1:5) * spacing[i,], 5)\n##D ch[sample(1:5, 1)] <- ans\n##D choices[i, ] <- as.character(ch)\n##D }\n##D \n##D df <- data.frame(Question=questions, Option=choices, \n##D Type = 'radio', stringsAsFactors = FALSE)\n##D df$Answer <- answers\n##D \n##D pat <- generate_pattern(mod, Theta = 0, df)\n##D \n##D #------------------------------------------------\n##D # administer items in sequence\n##D customNextItem <- function(person, design, test){\n##D # browser()\n##D items_left_2_choose_from <- extract.mirtCAT(person, 'items_in_bank')\n##D min(items_left_2_choose_from)\n##D }\n##D \n##D res <- mirtCAT(df, local_pattern=pat, \n##D design = list(customNextItem=customNextItem))\n##D summary(res)\n##D \n##D #------------------------------------------------\n##D # administer items in order, but stop after 10 items\n##D customNextItem <- function(person, design, test){\n##D items_left_2_choose_from <- extract.mirtCAT(person, 'items_in_bank')\n##D items_answered <- extract.mirtCAT(person, 'items_answered')\n##D total <- sum(!is.na(items_answered))\n##D ret <- if(total < 10) min(items_left_2_choose_from)\n##D else return(NA)\n##D ret\n##D }\n##D \n##D res <- mirtCAT(df, local_pattern=pat, \n##D design = list(customNextItem=customNextItem))\n##D summary(res)\n##D \n##D #------------------------------------------------\n##D # using findNextItem() and stopping after 10 items\n##D \n##D customNextItem <- function(person, design, test){\n##D items_answered <- extract.mirtCAT(person, 'items_answered')\n##D total <- sum(!is.na(items_answered))\n##D ret <- NA\n##D if(total < 10) \n##D ret <- findNextItem(person=person, test=test, design=design, criteria = 'MI')\n##D ret\n##D }\n##D \n##D res <- mirtCAT(df, mod, local_pattern=pat, start_item = 'MI',\n##D design = list(customNextItem=customNextItem))\n##D summary(res)\n##D \n##D # equivalent to the following\n##D res2 <- mirtCAT(df, mod, local_pattern=pat, start_item = 'MI', \n##D criteria = 'MI', design = list(max_items = 10))\n##D summary(res2)\n##D \n## End(Not run)\n\n\n"} {"package":"mirtCAT","topic":"findNextItem","snippet":"### Name: findNextItem\n### Title: Find next CAT item\n### Aliases: findNextItem\n\n### ** Examples\n\n## Not run: \n##D \n##D # test defined in mirtCAT help file, first example \n##D # equivalent to criteria = 'MI'\n##D customNextItem <- function(design, person, test){\n##D item <- findNextItem(person=person, design=design, test=test,\n##D criteria = 'MI')\n##D item\n##D }\n##D \n##D set.seed(1)\n##D nitems <- 100\n##D itemnames <- paste0('Item.', 1:nitems)\n##D a <- matrix(rlnorm(nitems, .2, .3))\n##D d <- matrix(rnorm(nitems))\n##D dat <- simdata(a, d, 500, itemtype = 'dich')\n##D colnames(dat) <- itemnames\n##D mod <- mirt(dat, 1, verbose = FALSE)\n##D \n##D # simple math items\n##D questions <- answers <- character(nitems)\n##D choices <- matrix(NA, nitems, 5)\n##D spacing <- floor(d - min(d)) + 1 #easier items have more variation in the options\n##D \n##D for(i in 1:nitems){\n##D n1 <- sample(1:50, 1)\n##D n2 <- sample(51:100, 1)\n##D ans <- n1 + n2\n##D questions[i] <- paste0(n1, ' + ', n2, ' = ?')\n##D answers[i] <- as.character(ans)\n##D ch <- ans + sample(c(-5:-1, 1:5) * spacing[i,], 5)\n##D ch[sample(1:5, 1)] <- ans\n##D choices[i, ] <- as.character(ch)\n##D }\n##D \n##D df <- data.frame(Question=questions, Option=choices, \n##D Type = 'radio', stringsAsFactors = FALSE)\n##D \n##D response <- generate_pattern(mod, 1)\n##D result <- mirtCAT(mo=mod, local_pattern = response, \n##D design = list(customNextItem=customNextItem))\n##D \n##D -----------------------------------------------------------\n##D # direct manipulation of internal objects\n##D CATdesign <- mirtCAT(df=df, mo=mod, criteria = 'MI', design_elements = TRUE)\n##D \n##D # returns number 1 in this case, since that's the starting item\n##D findNextItem(CATdesign)\n##D \n##D # determine next item if item 1 and item 10 were answered correctly\n##D CATdesign <- updateDesign(CATdesign, new_item = 1, new_response = 1)\n##D extract.mirtCAT(CATdesign$person, 'thetas') # updated thetas\n##D CATdesign <- updateDesign(CATdesign, new_item = 10, new_response = 1)\n##D extract.mirtCAT(CATdesign$person, 'thetas') # updated thetas again\n##D findNextItem(CATdesign)\n##D findNextItem(CATdesign, all_index = TRUE) # all items rank in terms of most optimal\n##D \n##D #-------------------------------------------------------------\n##D ## Integer programming example (e.g., shadow testing)\n##D \n##D # find maximum information subject to constraints\n##D # sum(xi) <= 5 ### 5 or fewer items\n##D # x1 + x2 <= 1 ### items 1 and 2 can't be together\n##D # x4 == 0 ### item 4 not included\n##D # x5 + x6 == 1 ### item 5 or 6 must be included, but not both\n##D \n##D # constraint function\n##D constr_fun <- function(design, person, test){\n##D \n##D # left hand side constrains\n##D # - 1 row per constraint, and ncol must equal number of items\n##D mo <- extract.mirtCAT(test, 'mo')\n##D nitems <- extract.mirt(mo, 'nitems')\n##D lhs <- matrix(0, 4, nitems)\n##D lhs[1,] <- 1\n##D lhs[2,c(1,2)] <- 1\n##D lhs[3, 4] <- 1\n##D lhs[4, c(5,6)] <- 1\n##D \n##D # relationship direction\n##D dirs <- c(\"<=\", \"<=\", '==', '==')\n##D \n##D #right hand side\n##D rhs <- c(5, 1, 0, 1)\n##D \n##D #all together\n##D constraints <- data.frame(lhs, dirs, rhs)\n##D constraints\n##D }\n##D \n##D CATdesign <- mirtCAT(df=df, mo=mod, design_elements = TRUE,\n##D design = list(constr_fun=constr_fun))\n##D \n##D # MI criteria value associated with each respective item\n##D objective <- computeCriteria(CATdesign, criteria = 'MI')\n##D \n##D # most optimal item, given constraints\n##D findNextItem(CATdesign, objective=objective)\n##D \n##D # all the items which solve the problem\n##D findNextItem(CATdesign, objective=objective, all_index = TRUE)\n##D \n##D ## within a customNextItem() definition the above code would look like\n##D # customNextItem <- function(design, person, test){\n##D # objective <- computeCriteria(person=person, design=design, test=test,\n##D # criteria = 'MI')\n##D # item <- findNextItem(person=person, design=design, test=test,\n##D # objective=objective)\n##D # item\n##D # }\n##D \n## End(Not run)\n\n\n"} {"package":"mirtCAT","topic":"generate.mirt_object","snippet":"### Name: generate.mirt_object\n### Title: Generate a mirt object from population parameters\n### Aliases: generate.mirt_object\n\n### ** Examples\n\n## Not run: \n##D \n##D ### build a unidimensional test with all 3PL items\n##D \n##D nitems <- 50\n##D a1 <- rlnorm(nitems, .2,.2)\n##D d <- rnorm(nitems)\n##D g <- rbeta(nitems, 20, 80)\n##D \n##D pars <- data.frame(a1=a1, d=d, g=g)\n##D head(pars)\n##D \n##D obj <- generate.mirt_object(pars, '3PL')\n##D coef(obj, simplify = TRUE)\n##D plot(obj, type = 'trace')\n##D \n##D ### build a two-dimensional test \n##D ## all graded items with 5 response categories\n##D \n##D nitems <- 30\n##D as <- matrix(rlnorm(nitems*2, .2, .2), nitems)\n##D diffs <- t(apply(matrix(runif(nitems*4, .3, 1), nitems), 1, cumsum)) \n##D diffs <- -(diffs - rowMeans(diffs)) \n##D ds <- diffs + rnorm(nitems)\n##D pars2 <- data.frame(as, ds)\n##D colnames(pars2) <- c('a1', 'a2', paste0('d', 1:4))\n##D head(pars2)\n##D \n##D obj <- generate.mirt_object(pars2, 'graded')\n##D coef(obj, simplify = TRUE)\n##D \n##D ### unidimensional mixed-item test\n##D \n##D library(plyr)\n##D pars3 <- rbind.fill(pars, pars2) #notice the NA's where parameters do not exist\n##D obj <- generate.mirt_object(pars3, itemtype = c(rep('2PL', 50), rep('graded', 30)))\n##D coef(obj)\n##D itemplot(obj, 51)\n##D itemplot(obj, 1, drop.zeros=TRUE)\n##D \n## End(Not run)\n\n\n"} {"package":"mirtCAT","topic":"generate_pattern","snippet":"### Name: generate_pattern\n### Title: Generate a CAT patterns\n### Aliases: generate_pattern\n\n### ** Examples\n\n## Not run: \n##D \n##D # return real response vector given choices and (optional) answers \n##D pat <- generate_pattern(mod, Theta = 0, df=df)\n##D # mirtCAT(df, mo=mod, local_pattern = pat)\n##D \n##D # generate single pattern observed in dataset used to define mod\n##D pat2 <- generate_pattern(mod, Theta = 0)\n##D # mirtCAT(mo=mod, local_pattern = pat2)\n##D \n##D # generate multiple patterns to be analyzed independently \n##D pat3 <- generate_pattern(mod, Theta = matrix(c(0, 2, -2), 3))\n##D # mirtCAT(mo=mod, local_pattern = pat3)\n##D \n## End(Not run)\n\n\n"} {"package":"mirtCAT","topic":"getPerson","snippet":"### Name: getPerson\n### Title: Retrieve person object after running createShinyGUI\n### Aliases: getPerson\n\n### ** Examples\n\n## Not run: \n##D \n##D mirtCAT_preamble(df=df)\n##D runApp(createShinyGUI(), port = 8000)\n##D \n##D person <- getPerson()\n##D summary(person)\n## End(Not run) \n\n\n"} {"package":"mirtCAT","topic":"mirtCAT","snippet":"### Name: mirtCAT\n### Title: Generate an adaptive or non-adaptive test HTML interface\n### Aliases: mirtCAT print.mirtCAT summary.mirtCAT plot.mirtCAT\n### Keywords: adaptive computerized testing\n\n### ** Examples\n\n## Not run: \n##D \n##D ### unidimensional scored example with generated items\n##D \n##D # create mo from estimated parameters\n##D set.seed(1234)\n##D nitems <- 50\n##D itemnames <- paste0('Item.', 1:nitems)\n##D a <- matrix(rlnorm(nitems, .2, .3))\n##D d <- matrix(rnorm(nitems))\n##D dat <- simdata(a, d, 1000, itemtype = 'dich')\n##D mod <- mirt(dat, 1)\n##D coef(mod, simplify=TRUE)\n##D \n##D # alternatively, define mo from population values (not run)\n##D pars <- data.frame(a1=a, d=d)\n##D mod2 <- generate.mirt_object(pars, itemtype='2PL')\n##D coef(mod2, simplify=TRUE)\n##D \n##D # simple math items\n##D questions <- answers <- character(nitems)\n##D choices <- matrix(NA, nitems, 5)\n##D spacing <- floor(d - min(d)) + 1 #easier items have more variation in the options\n##D \n##D for(i in 1:nitems){\n##D n1 <- sample(1:50, 1)\n##D n2 <- sample(51:100, 1)\n##D ans <- n1 + n2\n##D questions[i] <- paste0(n1, ' + ', n2, ' = ?')\n##D answers[i] <- as.character(ans)\n##D ch <- ans + sample(c(-5:-1, 1:5) * spacing[i,], 5)\n##D ch[sample(1:5, 1)] <- ans\n##D choices[i, ] <- as.character(ch)\n##D }\n##D \n##D df <- data.frame(Question=questions, Option=choices, \n##D Type = 'radio', stringsAsFactors = FALSE)\n##D head(df)\n##D \n##D (res <- mirtCAT(df)) #collect response only (no scoring or estimating thetas)\n##D summary(res)\n##D \n##D # include scoring by providing Answer key\n##D df$Answer <- answers\n##D (res_seq <- mirtCAT(df, mod)) #sequential scoring \n##D (res_random <- mirtCAT(df, mod, criteria = 'random')) #random\n##D (res_MI <- mirtCAT(df, mod, criteria = 'MI', start_item = 'MI')) #adaptive, MI starting item\n##D \n##D summary(res_seq)\n##D summary(res_random)\n##D summary(res_MI)\n##D \n##D #-----------------------------------------\n##D # HTML tags for better customization, coerced to characters for compatibility\n##D \n##D # help(tags, package='shiny')\n##D options <- matrix(c(\"Strongly Disagree\", \"Disagree\", \"Neutral\", \"Agree\", \"Strongly Agree\"),\n##D nrow = 3, ncol = 5, byrow = TRUE)\n##D shinyStems <- list(HTML('Building CATs with mirtCAT is difficult.'),\n##D div(HTML('mirtCAT requires a'), br(), HTML('substantial amount of coding.')),\n##D div(strong('I would use'), HTML('mirtCAT in my research.')))\n##D questions <- sapply(shinyStems, as.character)\n##D df <- data.frame(Question=questions,\n##D Option = options, \n##D Type = \"radio\",\n##D stringsAsFactors=FALSE)\n##D \n##D res <- mirtCAT(df)\n##D res\n##D \n##D #-----------------------------------------\n##D \n##D # run locally, random response pattern given Theta\n##D set.seed(1)\n##D pat <- generate_pattern(mod, Theta = 0, df=df)\n##D head(pat)\n##D \n##D # seq scoring with character pattern for the entire test (adjust min_items)\n##D res <- mirtCAT(df, mod, local_pattern=pat, design = list(min_items = 50)) \n##D summary(res)\n##D \n##D # same as above, but using special input vector that doesn't require df input\n##D set.seed(1)\n##D pat2 <- generate_pattern(mod, Theta = 0)\n##D head(pat2)\n##D print(mirtCAT(mo=mod, local_pattern=pat2))\n##D \n##D # run CAT, and save results to object called person (start at 10th item)\n##D person <- mirtCAT(df, mod, item_answers = answers, criteria = 'MI', \n##D start_item = 10, local_pattern = pat)\n##D print(person)\n##D summary(person)\n##D \n##D # plot the session\n##D plot(person) #standard errors\n##D plot(person, SE=1.96) #95 percent confidence intervals\n##D \n##D #-----------------------------------------\n##D \n##D ### save response object to temp directory in case session ends early\n##D wdf <- paste0(getwd(), '/temp_file.rds')\n##D res <- mirtCAT(df, mod, shinyGUI = list(temp_file = wdf))\n##D \n##D # resume test this way if test was stopped early (and temp files were saved)\n##D res <- mirtCAT(df, mod, shinyGUI = list(temp_file = wdf))\n##D print(res)\n##D \n## End(Not run)\n\n\n"} {"package":"mirtCAT","topic":"mirtCAT_preamble","snippet":"### Name: mirtCAT_preamble\n### Title: Preamble function called by mirtCAT\n### Aliases: mirtCAT_preamble\n\n### ** Examples\n\n## Not run: \n##D \n##D mirtCAT_preamble(df = df)\n##D \n## End(Not run)\n\n\n"} {"package":"mirtCAT","topic":"updateDesign","snippet":"### Name: updateDesign\n### Title: Update design elements\n### Aliases: updateDesign\n\n### ** Examples\n\n## Not run: \n##D \n##D set.seed(1)\n##D nitems <- 100\n##D itemnames <- paste0('Item.', 1:nitems)\n##D a <- matrix(rlnorm(nitems, .2, .3))\n##D d <- matrix(rnorm(nitems))\n##D dat <- simdata(a, d, 500, itemtype = 'dich')\n##D colnames(dat) <- itemnames\n##D mod <- mirt(dat, 1, verbose = FALSE)\n##D \n##D # test defined in mirtCAT help file, first example\n##D CATdesign <- mirtCAT(mo = mod, criteria = 'MI', design_elements = TRUE,\n##D start_item = 2)\n##D \n##D # returns 2 in this case, since that was the starting item\n##D findNextItem(CATdesign) \n##D \n##D # first iteration, no answered items\n##D CATdesign$person$items_answered\n##D \n##D # update when next item is item 2 and answered correctly\n##D CATdesign <- updateDesign(CATdesign, new_item = 2, new_response = 1)\n##D CATdesign$person$items_answered # item 2 answered first\n##D CATdesign$person$responses # in item 2 element response was = 1 \n##D CATdesign$person$thetas # current estimate\n##D findNextItem(CATdesign) \n##D \n##D # determine next item if item 70 were also answered correctly next\n##D CATdesign <- updateDesign(CATdesign, new_item = 70, new_response = 1)\n##D CATdesign$person$items_answered \n##D CATdesign$person$responses \n##D findNextItem(CATdesign) \n##D \n##D # continue on, now with item 95 added next (answered incorrectly)\n##D CATdesign <- updateDesign(CATdesign, new_item = 95, new_response = 0)\n##D CATdesign$person$thetas\n##D CATdesign$person$thetas_history\n##D CATdesign$person$thetas_SE_history\n##D findNextItem(CATdesign)\n##D \n## End(Not run)\n\n\n\n"} {"package":"MTPS","topic":"AUC","snippet":"### Name: AUC\n### Title: Area Under Curve\n### Aliases: AUC\n\n### ** Examples\n\nset.seed(1)\n# simulate predictors\nx1 <- rnorm(200)\nx2 <- rnorm(200)\n# simulate outcome\npr <- 1/(1+exp(-(3 * x1 + 2 * x2 + 1)))\ny <- rbinom(200, 1, pr)\ndf <- data.frame(y = y,x1 = x1, x2 = x2)\n# fit logistic regression model on the first 100 observation\nlg.model <- glm(y ~ x1 + x2, data = df[1 : 100, ], family=\"binomial\")\n# predict outcome for the last 100 observation\nprob <- predict(lg.model, df[101:200, c(\"x1\", \"x2\")], type = \"response\")\n# calculate AUC and plot thr ROC Curve\nAUC(prob, y[101:200], ROC=TRUE)\n# calculate AUC and plot thr ROC Curve with cutoff\nAUC(prob, y[101:200], cutoff=0.2, ROC=TRUE)\n\n\n"} {"package":"MTPS","topic":"HIV","snippet":"### Name: HIV\n### Title: HIV Drug Resistance Database\n### Aliases: HIV XX YY\n### Keywords: datasets\n\n### ** Examples\n\ndata(HIV)\n\n\n"} {"package":"MTPS","topic":"cv.MTPS","snippet":"### Name: cv.MTPS\n### Title: Evaluation using Cross-Validation\n### Aliases: cv.MTPS\n\n### ** Examples\n\ndata(\"HIV\")\ncv.MTPS(xmat=XX, ymat=YY, family=\"gaussian\", nfolds=2,\n method.step1=rpart1, method.step2=lm1)\n\n\n"} {"package":"MTPS","topic":"list.learners","snippet":"### Name: list.learners\n### Title: List Available Base Learners\n### Aliases: list.learners KNN1 glm1 glmnet.lasso glmnet.ridge glmnet1 lda1\n### lm1 qda1 rpart1 svm1\n\n### ** Examples\n\nlist.learners()\n\n\n"} {"package":"MTPS","topic":"modify.parameter","snippet":"### Name: modify.parameter\n### Title: Modify Default Parameters For Base Learner\n### Aliases: modify.parameter\n\n### ** Examples\n\nglmnet.lasso <- modify.parameter(glmnet1, alpha=1)\nglmnet.ridge <- modify.parameter(glmnet1, alpha=0)\n\n\n"} {"package":"MTPS","topic":"multiFit","snippet":"### Name: multiFit\n### Title: Fit models on multiple outcomes\n### Aliases: multiFit\n\n### ** Examples\n\ndata(\"HIV\")\nset.seed(1)\nxmat <- as.matrix(XX)\nymat <- as.matrix(YY)\nid <- createFolds(rowMeans(XX), k=5, list=FALSE)\ntraining.id <- id != 1\ny.train <- ymat[training.id, ]\ny.test <- ymat[!training.id, ]\nx.train <- xmat[training.id, ]\nx.test <- xmat[!training.id, ]\nfit <- multiFit(xmat = x.train, ymat = y.train,\n method = rpart1, family = \"gaussian\")\npredict(fit, x.test)\n\n# using different base learners for different outcomes\nfit.mixOut <- multiFit(xmat = x.train, ymat = y.train,\n method = c(rpart1, rpart1, glmnet.ridge,lm1,lm1),\n family = \"gaussian\")\npredict(fit.mixOut, x.test)\n\n\n"} {"package":"MTPS","topic":"predict.MTPS","snippet":"### Name: predict.MTPS\n### Title: Make predictions from a \"MTPS\" model\n### Aliases: predict.MTPS\n\n### ** Examples\n\ndata(\"HIV\")\nset.seed(1)\nxmat <- as.matrix(XX)\nymat <- as.matrix(YY)\nid <- createFolds(rowMeans(XX), k=5, list=FALSE)\ntraining.id <- id != 1\ny.train <- ymat[training.id, ]\ny.test <- ymat[!training.id, ]\nx.train <- xmat[training.id, ]\nx.test <- xmat[!training.id, ]\n# Cross-Validation Residual Stacking\nfit.rs <- MTPS(xmat = x.train, ymat = y.train,\n family = \"gaussian\",cv = FALSE, residual = TRUE,\n method.step1 = rpart1, method.step2 = lm1)\npred.rs <- predict(fit.rs, x.test)\n\n\n"} {"package":"MTPS","topic":"predict.multiFit","snippet":"### Name: predict.multiFit\n### Title: Make predictions for multiple outcomes\n### Aliases: predict.multiFit\n\n### ** Examples\n\ndata(\"HIV\")\nset.seed(1)\nxmat <- as.matrix(XX)\nymat <- as.matrix(YY)\nid <- createFolds(rowMeans(XX), k=5, list=FALSE)\ntraining.id <- id != 1\ny.train <- ymat[training.id, ]\ny.test <- ymat[!training.id, ]\nx.train <- xmat[training.id, ]\nx.test <- xmat[!training.id, ]\nfit <- multiFit(xmat = x.train, ymat = y.train,\n method = rpart1, family = \"gaussian\")\npredict(fit, x.test)\n\n\n"} {"package":"MTPS","topic":"MTPS","snippet":"### Name: MTPS\n### Title: Fit Models using Revised Stacking Algorithm\n### Aliases: MTPS\n\n### ** Examples\n\ndata(\"HIV\")\nset.seed(1)\nxmat <- as.matrix(XX)\nymat <- as.matrix(YY)\nid <- createFolds(rowMeans(XX), k=5, list=FALSE)\ntraining.id <- id != 1\ny.train <- ymat[training.id, ]\ny.test <- ymat[!training.id, ]\nx.train <- xmat[training.id, ]\nx.test <- xmat[!training.id, ]\n\n# Residual Stacking\nfit.rs <- MTPS(xmat = x.train, ymat = y.train,\n family = \"gaussian\",cv = FALSE, residual = TRUE,\n method.step1 = rpart1, method.step2 = lm1)\npredict(fit.rs, x.test)\n\n# using different base learners for different outcomes\n fit.mixOut <- MTPS(xmat=x.train, ymat=y.train,\n family=\"gaussian\",cv = FALSE, residual = TRUE,\n method.step1 = c(rpart1,glmnet.ridge,rpart1,lm1,lm1),\n method.step2 = c(rpart1,lm1,lm1,lm1, glmnet.ridge))\npredict(fit.mixOut, x.test)\n\n\n"} {"package":"ODS","topic":"Bfct","snippet":"### Name: Bfct\n### Title: power basis functions of a spline of given degree\n### Aliases: Bfct\n\n### ** Examples\n\nlibrary(ODS)\n\nx <- matrix(c(1,2,3,4,5),ncol=1)\ndegree <- 2\nknots <- c(1,3,4)\n\nBfct(x, degree, knots)\n\n\n"} {"package":"ODS","topic":"Estimate_PLMODS","snippet":"### Name: Estimate_PLMODS\n### Title: Partial linear model for ODS data\n### Aliases: Estimate_PLMODS\n\n### ** Examples\n\n## No test: \nlibrary(ODS)\n# take the example data from the ODS package\n# please see the documentation for details about the data set ods_data\n\nnknots = 10\ndegree = 2\n\n# get the initial value of the parameters from standard linear regression based on SRS data #\ndataSRS = ods_data[1:200,]\nYS = dataSRS[,1]\nXS = dataSRS[,2]\nZS = dataSRS[,3:5]\n\nknots = quantileknots(XS, nknots, 0)\n# the power basis spline function\nMS = Bfct(as.matrix(XS), degree, knots)\nDS = cbind(MS, ZS)\ntheta00 = as.numeric(lm(YS ~ DS -1)$coefficients)\nsig0_sq00 = var(YS - DS %*% theta00)\npi00 = c(0.15, 0.15)\nv00 = c(0, 0)\neta00 = matrix(c(theta00, pi00, v00, sig0_sq00), ncol=1)\nmu_Y = mean(YS)\nsig_Y = sd(YS)\n\nY = matrix(ods_data[,1])\nX = matrix(ods_data[,2])\nZ = matrix(ods_data[,3:5], nrow=400)\n\n# In this ODS data, the supplemental samples are taken from (-Infty, mu_Y-a*sig_Y) #\n# and (mu_Y+a*sig_Y, +Infty), where a=1 #\nn_f = c(200, 100, 100)\nCpt = 1\n\n# GCV selection to find the optimal smoothing parameter #\nq_s1 = logspace(-6, 7, 10)\ngcv1 = rep(0, 10)\n\nfor (j in 1:10) {\n\n result = Estimate_PLMODS(X,Y,Z,n_f,eta00,q_s1[j],Cpt,mu_Y,sig_Y)\n etajj = matrix(c(result$alpha, result$gam, result$pi0, result$v0, result$sig0_sq0), ncol=1)\n gcv1[j] = gcv_ODS(X,Y,Z,n_f,etajj,q_s1[j],Cpt,mu_Y,sig_Y)\n}\n\nb = which(gcv1 == min(gcv1))\nq_s = q_s1[b]\nq_s\n\n# Estimation of the partial linear model in the setting of outcome-dependent sampling #\nresult = Estimate_PLMODS(X, Y, Z, n_f, eta00, q_s, Cpt, mu_Y, sig_Y)\nresult\n## End(No test)\n\n\n"} {"package":"ODS","topic":"gcv_ODS","snippet":"### Name: gcv_ODS\n### Title: Generalized cross-validation for ODS data\n### Aliases: gcv_ODS\n\n### ** Examples\n\n## No test: \nlibrary(ODS)\n# take the example data from the ODS package\n# please see the documentation for details about the data set ods_data\n\nnknots = 10\ndegree = 2\n\n# get the initial value of the parameters from standard linear regression based on SRS data #\ndataSRS = ods_data[1:200,]\nYS = dataSRS[,1]\nXS = dataSRS[,2]\nZS = dataSRS[,3:5]\n\nknots = quantileknots(XS, nknots, 0)\n# the power basis spline function\nMS = Bfct(as.matrix(XS), degree, knots)\nDS = cbind(MS, ZS)\ntheta00 = as.numeric(lm(YS ~ DS -1)$coefficients)\nsig0_sq00 = var(YS - DS %*% theta00)\npi00 = c(0.15, 0.15)\nv00 = c(0, 0)\neta00 = matrix(c(theta00, pi00, v00, sig0_sq00), ncol=1)\nmu_Y = mean(YS)\nsig_Y = sd(YS)\n\nY = matrix(ods_data[,1])\nX = matrix(ods_data[,2])\nZ = matrix(ods_data[,3:5], nrow=400)\n\n# In this ODS data, the supplemental samples are taken from (-Infty, mu_Y-a*sig_Y) #\n# and (mu_Y+a*sig_Y, +Infty), where a=1 #\nn_f = c(200, 100, 100)\nCpt = 1\n\n# GCV selection to find the optimal smoothing parameter #\nq_s1 = logspace(-6, 7, 10)\ngcv1 = rep(0, 10)\n\nfor (j in 1:10) {\n\n result = Estimate_PLMODS(X,Y,Z,n_f,eta00,q_s1[j],Cpt,mu_Y,sig_Y)\n etajj = matrix(c(result$alpha, result$gam, result$pi0, result$v0, result$sig0_sq0), ncol=1)\n gcv1[j] = gcv_ODS(X,Y,Z,n_f,etajj,q_s1[j],Cpt,mu_Y,sig_Y)\n}\n\nb = which(gcv1 == min(gcv1))\nq_s = q_s1[b]\n\nq_s\n\n# Estimation of the partial linear model in the setting of outcome-dependent sampling #\nresult = Estimate_PLMODS(X, Y, Z, n_f, eta00, q_s, Cpt, mu_Y, sig_Y)\nresult\n## End(No test)\n\n\n"} {"package":"ODS","topic":"logspace","snippet":"### Name: logspace\n### Title: Generate logarithmically spaced vector\n### Aliases: logspace\n\n### ** Examples\n\nlogspace(-6,7,30)\n\n\n"} {"package":"ODS","topic":"odsmle","snippet":"### Name: odsmle\n### Title: MSELE estimator for analyzing the primary outcome in ODS design\n### Aliases: odsmle\n\n### ** Examples\n\nlibrary(ODS)\n# take the example data from the ODS package\n# please see the documentation for details about the data set ods_data\n\nY <- ods_data[,1]\nX <- cbind(rep(1,length(Y)), ods_data[,2:5])\n\n# use the simple random sample to get an initial estimate of beta, sig #\n# perform an ordinary least squares #\nSRS <- ods_data[1:200,]\nOLS.srs <- lm(SRS[,1] ~ SRS[,2:5])\nOLS.srs.summary <- summary(OLS.srs)\n\nbeta <- coefficients(OLS.srs)\nsig <- OLS.srs.summary$sigma^2\npis <- c(0.1,0.8,0.1)\n\n# the cut points for this data is Y < 0.162, Y > 2.59.\na <- c(0.162,2.59)\nrs.size <- 200\nsize <- c(100,0,100)\nstrat <- c(1,2,3)\n\nodsmle(Y,X,beta,sig,pis,a,rs.size,size,strat)\n\n\n"} {"package":"ODS","topic":"quantileknots","snippet":"### Name: quantileknots\n### Title: Create knots at sample quantiles\n### Aliases: quantileknots\n\n### ** Examples\n\nlibrary(ODS)\n\nx <- c(1, 2, 3, 4, 5)\nquantileknots(x, 3, 0)\n\n\n"} {"package":"ODS","topic":"se.spmle","snippet":"### Name: se.spmle\n### Title: standard error for MSELE estimator\n### Aliases: se.spmle\n\n### ** Examples\n\nlibrary(ODS)\n# take the example data from the ODS package\n# please see the documentation for details about the data set ods_data\n\nY <- ods_data[,1]\nX <- cbind(rep(1,length(Y)), ods_data[,2:5])\n\n# use the simple random sample to get an initial estimate of beta, sig #\n# perform an ordinary least squares #\nSRS <- ods_data[1:200,]\nOLS.srs <- lm(SRS[,1] ~ SRS[,2:5])\nOLS.srs.summary <- summary(OLS.srs)\n\nbeta <- coefficients(OLS.srs)\nsig <- OLS.srs.summary$sigma^2\npis <- c(0.1,0.8,0.1)\n\n# the cut points for this data is Y < 0.162, Y > 2.59.\na <- c(0.162,2.59)\nrs.size <- 200\nsize <- c(100,0,100)\nstrat <- c(1,2,3)\n\n# obtain the parameter estimates\nODS.model = odsmle(Y,X,beta,sig,pis,a,rs.size,size,strat)\n\n# calculate the standard error estimate\ny <- Y\nx <- X\nbeta <- ODS.model$beta\nsig <- ODS.model$sig\npis <- ODS.model$pis\na <- c(0.162,2.59)\nN.edf <- rs.size\nrhos <- size/pis\nstrat <- c(1,3)\nsize.nc <- length(y)\n\nse = se.spmle(y, x, beta, sig, pis, a, N.edf, rhos, strat, size.nc)\n\n# summarize the result\nODS.tvalue <- ODS.model$beta / se\nODS.pvalue <- 2 * pt( - abs(ODS.tvalue), sum(rs.size, size)-2)\n\nODS.results <- cbind(ODS.model$beta, se, ODS.tvalue, ODS.pvalue)\ndimnames(ODS.results)[[2]] <- c(\"Beta\",\"SEbeta\",\"tvalue\",\"Pr(>|t|)\")\nrow.names(ODS.results) <- c(\"(Intercept)\",\"X\",\"Z1\",\"Z2\",\"Z3\")\n\nODS.results\n\n\n"} {"package":"ODS","topic":"secondary_ODS","snippet":"### Name: secondary_ODS\n### Title: Secondary analysis in ODS design\n### Aliases: secondary_ODS\n\n### ** Examples\n\nlibrary(ODS)\n# take the example data from the ODS package\n# please see the documentation for details about the data set ods_data_secondary\ndata <- ods_data_secondary\n\n# divide the original cohort data into SRS, lowerODS, upperODS and NVsample\nSRS <- data[data[,1]==1,2:ncol(data)]\nlowerODS <- data[data[,1]==2,2:ncol(data)]\nupperODS <- data[data[,1]==3,2:ncol(data)]\nNVsample <- data[data[,1]==0,2:ncol(data)]\n\n# obtain the cut off points for ODS design. For this data, the ODS design\n# uses mean plus and minus one standard deviation of Y1 as cut off points.\nmeanY1 <- mean(data[,2])\nsdY1 <- sd(data[,2])\ncutpoint <- c(meanY1-sdY1, meanY1+sdY1)\n\n# the data matrix SRS has Y1, Y2, X and Z. Hence the dimension of Z is ncol(SRS)-3.\nZ.dim <- ncol(SRS)-3\n\nsecondary_ODS(SRS, lowerODS, upperODS, NVsample, cutpoint, Z.dim)\n\n\n"} {"package":"ODS","topic":"secondary_casecohort","snippet":"### Name: secondary_casecohort\n### Title: Secondary analysis in case-cohort data\n### Aliases: secondary_casecohort\n\n### ** Examples\n\n## No test: \nlibrary(ODS)\n# take the example data from the ODS package\n# please see the documentation for details about the data set casecohort_data_secondary\ndata <- casecohort_data_secondary\n\n# obtain SRS, CCH and NVsample from the original cohort data based on subj_ind\nSRS <- data[data[,1]==1, 2:ncol(data)]\nCCH <- data[data[,1]==1 | data[,1]==2, 2:ncol(data)]\nNVsample <- data[data[,1]==0, 2:ncol(data)]\n\n# delete the fourth column (columns for X) from the non-validation sample\nNVsample <- NVsample[,-4]\n\nZ1.dim <- 4\nZ2.dim <- 3\nZ3.dim <- 3\nsecondary_casecohort(SRS, CCH, NVsample, Z1.dim, Z2.dim, Z3.dim)\n## End(No test)\n\n\n"} {"package":"ezcox","topic":"clean_model_dir","snippet":"### Name: clean_model_dir\n### Title: Clean ezcox Model File Directory\n### Aliases: clean_model_dir\n\n### ** Examples\n\n## No test: \nclean_model_dir()\n## End(No test)\n\n\n"} {"package":"ezcox","topic":"ezcox","snippet":"### Name: ezcox\n### Title: Run Cox Analysis in Batch Mode\n### Aliases: ezcox\n\n### ** Examples\n\nlibrary(survival)\n\n# Build unvariable models\nt1 <- ezcox(lung, covariates = c(\"age\", \"sex\", \"ph.ecog\"))\nt1\n\n# Build multi-variable models\n# Control variable 'age'\nt2 <- ezcox(lung, covariates = c(\"sex\", \"ph.ecog\"), controls = \"age\")\nt2\n\n# Return models\nt3 <- ezcox(lung,\n covariates = c(\"age\", \"sex\", \"ph.ecog\"),\n return_models = TRUE\n)\nt3\nt4 <- ezcox(lung,\n covariates = c(\"sex\", \"ph.ecog\"), controls = \"age\",\n return_models = TRUE\n)\nt4\n\n\n"} {"package":"ezcox","topic":"ezcox_group","snippet":"### Name: ezcox_group\n### Title: Group Cox Analysis and Visualization\n### Aliases: ezcox_group\n\n### ** Examples\n\nlibrary(survival)\nezcox_group(lung, grp_var = \"sex\", covariate = \"ph.ecog\")\nezcox_group(lung, grp_var = \"sex\", covariate = \"ph.ecog\", controls = \"age\")\np <- ezcox_group(lung,\n grp_var = \"sex\", covariate = \"ph.ecog\",\n controls = \"age\", add_all = TRUE\n)\n\n\n"} {"package":"ezcox","topic":"ezcox_parallel","snippet":"### Name: ezcox_parallel\n### Title: Parallelly Run Cox Analysis in Batch Mode\n### Aliases: ezcox_parallel\n\n### ** Examples\n\nlibrary(survival)\nt <- ezcox_parallel(lung, covariates = c(\"sex\", \"ph.ecog\"), controls = \"age\")\nt\n\n\n"} {"package":"ezcox","topic":"filter_ezcox","snippet":"### Name: filter_ezcox\n### Title: Filter ezcox\n### Aliases: filter_ezcox\n\n### ** Examples\n\nlibrary(survival)\nlung$ph.ecog <- factor(lung$ph.ecog)\nzz <- ezcox(lung, covariates = c(\"sex\", \"age\"), controls = \"ph.ecog\")\nzz\nfilter_ezcox(zz)\nfilter_ezcox(zz, c(\"0\", \"2\"))\nfilter_ezcox(zz, c(\"0\", \"2\"), type = \"contrast\")\nt <- filter_ezcox(zz, c(\"0\", \"2\"), type = \"ref\")\nt\n\n\n"} {"package":"ezcox","topic":"forester","snippet":"### Name: forester\n### Title: Create a forest plot for simple data\n### Aliases: forester\n\n### ** Examples\n\nlibrary(survival)\n\nt1 <- ezcox(lung, covariates = c(\n \"age\", \"sex\",\n \"ph.karno\", \"pat.karno\"\n))\np <- forester(t1, xlim = c(0, 1.5))\np\np2 <- forester(t1, xlim = c(0.5, 1.5))\np2\n\n\n"} {"package":"ezcox","topic":"get_models","snippet":"### Name: get_models\n### Title: Get Model List from ezcox Object\n### Aliases: get_models\n\n### ** Examples\n\nlibrary(survival)\nzz <- ezcox(lung, covariates = c(\"sex\", \"ph.ecog\"), controls = \"age\", return_models = TRUE)\nmds <- get_models(zz)\nstr(mds, max.level = 1)\n\n\n"} {"package":"ezcox","topic":"show_forest","snippet":"### Name: show_forest\n### Title: Show Forest Plot\n### Aliases: show_forest\n\n### ** Examples\n\nlibrary(survival)\nshow_forest(lung, covariates = c(\"sex\", \"ph.ecog\"), controls = \"age\")\nshow_forest(lung, covariates = c(\"sex\", \"ph.ecog\"), controls = \"age\", merge_models = TRUE)\nshow_forest(lung,\n covariates = c(\"sex\", \"ph.ecog\"), controls = \"age\", merge_models = TRUE,\n drop_controls = TRUE\n)\np <- show_forest(lung,\n covariates = c(\"sex\", \"ph.ecog\"), controls = \"age\", merge_models = TRUE,\n vars_to_show = \"sex\"\n)\np\n\n\n"} {"package":"ezcox","topic":"show_models","snippet":"### Name: show_models\n### Title: Show Cox Models\n### Aliases: show_models\n\n### ** Examples\n\nlibrary(survival)\nzz <- ezcox(lung, covariates = c(\"sex\", \"ph.ecog\"), controls = \"age\", return_models = TRUE)\nmds <- get_models(zz)\nshow_models(mds)\nshow_models(mds, model_names = paste0(\"Model \", 1:2))\nshow_models(mds, covariates = c(\"sex\", \"ph.ecog\"))\nshow_models(mds, drop_controls = TRUE)\nshow_models(mds, merge_models = TRUE)\np <- show_models(mds, merge_models = TRUE, drop_controls = TRUE)\np\n\n\n"} {"package":"FastCUB","topic":"bestcub","snippet":"### Name: bestcub\n### Title: Best-subset variable selection for CUB models via fast EM\n### algorithm\n### Aliases: bestcub\n### Keywords: stats\n\n### ** Examples\n\n## No test: \nlibrary(FastCUB)\ndata(univer)\nordinal<-univer$global\nm<-7\nY<-univer[,c(2,3,4)]\nW<-univer[,8:11]\n## Search for the best CUB model with covariates only for feeling\nbest0q<-bestcub(ordinal,m,Y=NULL,W,toler=1e-4,maxiter=100,iterc=5,alpha=0.05,invgen=TRUE)\n## Search for the best CUB model with covariates only for uncertainty\nbestp0<-bestcub(ordinal,m,Y,W=NULL,toler=1e-4,maxiter=100,iterc=5,alpha=0.05,invgen=TRUE)\n## Search for the best CUB model with covariates for both parameters\nbestpq<-bestcub(ordinal,m,Y,W,toler=1e-4,maxiter=100,iterc=5,alpha=0.05,invgen=TRUE,\n mix=TRUE,tolmix=1e+3,fmix=1)\nfinal<-bestpq$bestmodel; summary(final)\n## End(No test)\n\n\n\n"} {"package":"FastCUB","topic":"bitcsi","snippet":"### Name: bitcsi\n### Title: Shifted Binomial probabilities of ordinal responses\n### Aliases: bitcsi\n### Keywords: distribution\n\n### ** Examples\n\ndata(univer)\nm<-7\ncsi<-0.7\nordinal<-univer$informat\npr<-bitcsi(m,ordinal,csi)\n\n\n"} {"package":"FastCUB","topic":"bitgama","snippet":"### Name: bitgama\n### Title: Shifted Binomial distribution with covariates\n### Aliases: bitgama\n### Keywords: distribution\n\n### ** Examples\n\nn<-100\nm<-7\nW<-sample(c(0,1),n,replace=TRUE)\ngama<-c(0.2,-0.2)\ncsivett<-logis(W,gama)\nordinal<-rbinom(n,m-1,csivett)+1\npr<-bitgama(m,ordinal,W,gama)\n\n\n"} {"package":"FastCUB","topic":"dissim","snippet":"### Name: dissim\n### Title: Normalized dissimilarity measure\n### Aliases: dissim\n### Keywords: univar\n\n### ** Examples\n\nproba<-c(0.01,0.03,0.08,0.07,0.27,0.37,0.17)\nprobb<-c(0.04,0.04,0.05,0.10,0.21,0.32,0.24)\ndissim(proba,probb)\n\n\n"} {"package":"FastCUB","topic":"fastCUB","snippet":"### Name: fastCUB\n### Title: Main function for fast estimation CUB models\n### Aliases: fastCUB\n### Keywords: package\n\n### ** Examples\n\n## No test: \nlibrary(FastCUB)\ndata(univer)\nordinal<-univer$global\nm<-7\neffe<-with(univer, Formula(global~0|gender+freqserv+age +changefa))\ncub0q<-fastCUB(effe,data=univer,m=7, maxiter=100,toler=1e-8,mix=TRUE,verbose=FALSE)\nsummary(cub0q)\n## Fast EM for CUB model with covariates only for uncertainty\neffe<-with(univer, Formula(global~gender+freqserv+age +changefa|0))\ncubp0<-fastCUB(effe,data=univer,m=7, maxiter=100,toler=1e-8,iterc=5,verbose=TRUE)\n## Fast EM for CUB model with covariates for both feeling and uncertainty\neffe<-with(univer, Formula(global~gender+freqserv+age +changefa|gender+freqserv+age +changefa))\ncubpq<-fastCUB(effe,data=univer,m=7, maxiter=100,toler=1e-8,iterc=5)\nsummary(cubpq)\nBIC(cubpq)\n## End(No test)\n\n\n\n"} {"package":"FastCUB","topic":"inibest","snippet":"### Name: inibest\n### Title: Preliminary estimators for CUB models without covariates\n### Aliases: inibest\n### Keywords: htest utilities\n\n### ** Examples\n\nm<-9\nfreq<-c(10,24,28,36,50,43,23,12,5)\nestim<-inibest(m,freq) \npai<-estim[1]\ncsi<-estim[2]\n\n\n"} {"package":"FastCUB","topic":"inibestgama","snippet":"### Name: inibestgama\n### Title: Preliminary parameter estimates of a CUB model with covariates\n### for feeling\n### Aliases: inibestgama\n### Keywords: htest utilities\n\n### ** Examples\n\ndata(univer)\nm<-7; ordinal<-univer$global; cov<-univer$gender\nini<-inibestgama(m,ordinal,W=cov)\n\n\n"} {"package":"FastCUB","topic":"logis","snippet":"### Name: logis\n### Title: The logistic transform\n### Aliases: logis\n### Keywords: utilities\n\n### ** Examples\n\nn<-50 \nY<-sample(c(1,2,3),n,replace=TRUE) \nparam<-c(0.2,0.7)\nlogis(Y,param)\n\n\n"} {"package":"FastCUB","topic":"probbit","snippet":"### Name: probbit\n### Title: Probability distribution of a shifted Binomial random variable\n### Aliases: probbit\n### Keywords: distribution\n\n### ** Examples\n\nm<-7\ncsi<-0.7\npr<-probbit(m,csi)\nplot(1:m,pr,type=\"h\",main=\"Shifted Binomial probability distribution\",xlab=\"Categories\")\npoints(1:m,pr,pch=19)\n\n\n"} {"package":"FastCUB","topic":"probcub00","snippet":"### Name: probcub00\n### Title: Probability distribution of a CUB model without covariates\n### Aliases: probcub00\n### Keywords: distribution\n\n### ** Examples\n\nm<-9\npai<-0.3\ncsi<-0.8\npr<-probcub00(m,pai,csi)\nplot(1:m,pr,type=\"h\",main=\"CUB probability distribution\",xlab=\"Ordinal categories\")\npoints(1:m,pr,pch=19)\n\n\n"} {"package":"FastCUB","topic":"probcub0q","snippet":"### Name: probcub0q\n### Title: Probability distribution of a CUB model with covariates for the\n### feeling component\n### Aliases: probcub0q\n### Keywords: distribution\n\n### ** Examples\n\ndata(relgoods)\nm<-10\nnaord<-which(is.na(relgoods$Physician))\nnacov<-which(is.na(relgoods$Gender))\nna<-union(naord,nacov)\nordinal<-relgoods$Physician[-na]\nW<-relgoods$Gender[-na]\npai<-0.44; gama<-c(-0.91,-0.7)\npr<-probcub0q(m,ordinal,W,pai,gama)\n\n\n"} {"package":"FastCUB","topic":"probcubp0","snippet":"### Name: probcubp0\n### Title: Probability distribution of a CUB model with covariates for the\n### uncertainty component\n### Aliases: probcubp0\n### Keywords: distribution\n\n### ** Examples\n\ndata(relgoods)\nm<-10\nnaord<-which(is.na(relgoods$Physician))\nnacov<-which(is.na(relgoods$Gender))\nna<-union(naord,nacov)\nordinal<-relgoods$Physician[-na]\nY<-relgoods$Gender[-na]\nbet<-c(-0.81,0.93); csi<-0.20\nprobi<-probcubp0(m,ordinal,Y,bet,csi)\n\n\n"} {"package":"FastCUB","topic":"probcubpq","snippet":"### Name: probcubpq\n### Title: Probability distribution of a CUB model with covariates for both\n### feeling and uncertainty\n### Aliases: probcubpq\n### Keywords: distribution\n\n### ** Examples\n\ndata(relgoods)\nm<-10\nnaord<-which(is.na(relgoods$Physician))\nnacov<-which(is.na(relgoods$Gender))\nna<-union(naord,nacov)\nordinal<-relgoods$Physician[-na]\nW<-Y<-relgoods$Gender[-na]\ngama<-c(-0.91,-0.7); bet<-c(-0.81,0.93)\nprobi<-probcubpq(m,ordinal,Y,W,bet,gama)\n\n\n"} {"package":"BayesLogit","topic":"rpg","snippet":"### Name: rpg\n### Title: Polya-Gamma Random Variates\n### Aliases: rpg rpg.devroye rpg.gamma rpg.sp rpg.gamma.R rpg.devroye.R\n### rpg.sp.R\n### Keywords: rpg Polya-Gamma polyagamma\n\n### ** Examples\n\n\nh = c(1, 2, 3);\nz = c(4, 5, 6);\n\n## Devroye-like method -- only use if h contains integers, preferably small integers.\nX = rpg.devroye(100, h, z);\n\nh = c(1.2, 2.3, 3.2);\nz = c(4, 5, 6);\n\n## Sum of gammas method -- this is slow.\nX = rpg.gamma(100, h, z);\n\nh = c(1, 4, 2.3);\nz = c(4, 5, 6);\n\n## Hybrid method -- automatically chooses best procedure.\nX = rpg(100, h, z);\n\n\n\n"} {"package":"sparsereg","topic":"difference","snippet":"### Name: difference\n### Title: Plotting difference in posterior estimates from a sparse\n### regression.\n### Aliases: difference\n\n### ** Examples\n\n## Not run: \n##D set.seed(1)\n##D n<-500\n##D k<-100\n##D Sigma<-diag(k)\n##D Sigma[Sigma==0]<-.5\n##D X<-mvrnorm(n,mu=rep(0,k),Sigma=Sigma)\n##D y.true<-3+X[,2]*2+X[,3]*(-3)\n##D y<-y.true+rnorm(n)\n##D \n##D \n##D \n##D ##Fit a linear model with five covariates.\n##D s1<-sparsereg(y,X[,1:5])\n##D difference(s1,var1=1,var2=2)\n## End(Not run)\n\n\n\n"} {"package":"sparsereg","topic":"plot.sparsereg","snippet":"### Name: plot.sparsereg\n### Title: Plotting output from a sparse regression.\n### Aliases: plot.sparsereg\n\n### ** Examples\n\n## Not run: \n##D set.seed(1)\n##D n<-500\n##D k<-100\n##D Sigma<-diag(k)\n##D Sigma[Sigma==0]<-.5\n##D X<-mvrnorm(n,mu=rep(0,k),Sigma=Sigma)\n##D y.true<-3+X[,2]*2+X[,3]*(-3)\n##D y<-y.true+rnorm(n)\n##D \n##D \n##D \n##D ##Fit a linear model with five covariates.\n##D s1<-sparsereg(y,X[,1:5])\n##D plot(s1)\n## End(Not run)\n\n\n"} {"package":"sparsereg","topic":"print.sparsereg","snippet":"### Name: print.sparsereg\n### Title: A summary of the estimated posterior mode of each parameter.\n### Aliases: print.sparsereg\n\n### ** Examples\n\n\n## Not run: \n##D set.seed(1)\n##D n<-500\n##D k<-100\n##D Sigma<-diag(k)\n##D Sigma[Sigma==0]<-.5\n##D X<-mvrnorm(n,mu=rep(0,k),Sigma=Sigma)\n##D y.true<-3+X[,2]*2+X[,3]*(-3)\n##D y<-y.true+rnorm(n)\n##D \n##D \n##D \n##D ##Fit a linear model with five covariates.\n##D s1<-sparsereg(y,X[,1:5])\n##D print(s1)\n## End(Not run)\n\n\n\n"} {"package":"sparsereg","topic":"sparsereg","snippet":"### Name: sparsereg\n### Title: Sparse regression for experimental and observational data.\n### Aliases: sparsereg\n\n### ** Examples\n\n\n## Not run: \n##D set.seed(1)\n##D n<-500\n##D k<-5\n##D treat<-sample(c(\"a\",\"b\",\"c\"),n,replace=TRUE,pr=c(.5,.25,.25))\n##D treat2<-sample(c(\"a\",\"b\",\"c\",\"d\"),n,replace=TRUE,pr=c(.25,.25,.25,.25))\n##D Sigma<-diag(k)\n##D Sigma[Sigma==0]<-.5\n##D X<-mvrnorm(n,m=rep(0,k),S=Sigma)\n##D y.true<-3+X[,2]*2+(treat==\"a\")*2 +(treat==\"b\")*(-2)+X[,2]*(treat==\"b\")*(-2)+\n##D X[,2]*(treat2==\"c\")*2\n##D y<-y.true+rnorm(n,sd=2)\n##D \n##D ##Fit a linear model.\n##D s1<-sparsereg(y, X, cbind(treat,treat2), scale.type=\"TX\")\n##D s1.EM<-sparsereg(y, X, cbind(treat,treat2), EM=TRUE, scale.type=\"TX\")\n##D \n##D ##Summarize results from MCMC fit\n##D summary(s1)\n##D plot(s1)\n##D violinplot(s1)\n##D \n##D ##Summarize results from MCMC fit\n##D summary(s1.EM)\n##D plot(s1.EM)\n##D \n##D ##Extension using a baseline category\n##D s1.base<-sparsereg(y, X, treat, scale.type=\"TX\", baseline.vec=\"a\")\n##D \n##D summary(s1.base)\n##D plot(s1.base)\n##D violinplot(s1.base)\n##D \n## End(Not run)\n\n\n\n"} {"package":"sparsereg","topic":"summary.sparsereg","snippet":"### Name: summary.sparsereg\n### Title: Summaries for a sparse regression.\n### Aliases: summary.sparsereg\n\n### ** Examples\n\n\n## Not run: \n##D set.seed(1)\n##D n<-500\n##D k<-100\n##D Sigma<-diag(k)\n##D Sigma[Sigma==0]<-.5\n##D X<-mvrnorm(n,mu=rep(0,k),Sigma=Sigma)\n##D y.true<-3+X[,2]*2+X[,3]*(-3)\n##D y<-y.true+rnorm(n)\n##D \n##D \n##D \n##D ##Fit a linear model with five covariates.\n##D s1<-sparsereg(y,X[,1:5])\n##D summary(s1)\n## End(Not run)\n\n\n\n"} {"package":"sparsereg","topic":"violinplot","snippet":"### Name: violinplot\n### Title: Function for plotting posterior distribution of effects of\n### interest.\n### Aliases: violinplot\n\n### ** Examples\n\n\n## Not run: \n##D set.seed(1)\n##D n<-500\n##D k<-100\n##D Sigma<-diag(k)\n##D Sigma[Sigma==0]<-.5\n##D X<-mvrnorm(n,mu=rep(0,k),Sigma=Sigma)\n##D y.true<-3+X[,2]*2+X[,3]*(-3)\n##D y<-y.true+rnorm(n)\n##D \n##D \n##D \n##D ##Fit a linear model with five covariates.\n##D s1<-sparsereg(y,X[,1:5])\n##D violinplot(s1,1:3)\n## End(Not run)\n\n\n\n"} {"package":"gains","topic":"gains","snippet":"### Name: gains\n### Title: Gains Table for a Vector of Predictions\n### Aliases: gains\n### Keywords: misc\n\n### ** Examples\n\ndata(ciaScores)\nwith(subset(ciaScores,train==0), \n gains(actual=CellPhonesPP, predicted=PredOLS, optimal=TRUE))\n\n\n"} {"package":"gains","topic":"plot.gains","snippet":"### Name: plot.gains\n### Title: Plotting Gains Table Objects\n### Aliases: plot.gains\n### Keywords: misc\n\n### ** Examples\n\ndata(ciaScores)\n## Not run: \n##D plot(with(subset(ciaScores,train==0), \n##D gains(actual=CellPhonesPP, predicted=PredOLS, optimal=TRUE)),\n##D main=\"Test Gains Table Plot\")\n## End(Not run)\n\n\n"} {"package":"gains","topic":"print.gains","snippet":"### Name: print.gains\n### Title: Printing Gains Table Objects\n### Aliases: print.gains\n### Keywords: misc\n\n### ** Examples\n\ndata(ciaScores)\nprint(with(subset(ciaScores,train==0), \n gains(actual=CellPhonesPP, predicted=PredOLS, optimal=TRUE)),digits=2)\n\n\n"} {"package":"hdbm","topic":"hdbm","snippet":"### Name: hdbm\n### Title: High Dimensional Bayesian Mediation\n### Aliases: hdbm\n\n### ** Examples\n\nlibrary(hdbm)\n\nY <- hdbm.data$y\nA <- hdbm.data$a\n\n# grab the mediators from the example data.frame\nM <- as.matrix(hdbm.data[, paste0(\"m\", 1:100)], nrow(hdbm.data))\n\n# We just include the intercept term in this example.\nC <- matrix(1, 1000, 1)\nbeta.m <- rep(0, 100)\nalpha.a <- rep(0, 100)\n\nset.seed(12345)\nhdbm.out <- hdbm(Y, A, M, C, C, beta.m, alpha.a,\n burnin = 1000, ndraws = 100)\n\n# Which mediators are active?\nactive <- which(colSums(hdbm.out$r1 * hdbm.out$r3) > 50)\ncolnames(M)[active]\n\n\n"} {"package":"growthcurver","topic":"SummarizeGrowth","snippet":"### Name: SummarizeGrowth\n### Title: Summarize Growth Curves\n### Aliases: SummarizeGrowth\n\n### ** Examples\n\n# We can check that the parameters that are found are the same\n# as we use to generate fake experimental data. To do so, let's first\n# generate the \"experimental\" data using the logistic equation,\n# e.g., absorbance readings from a single well in a plate reader over time.\n\nk_in <- 0.5 # the initial carrying capacity\nn0_in <- 1e-5 # the initial absorbance reading\nr_in <- 1.2 # the initial growth rate\nN <- 50 # the number of \"measurements\" collected during the growth\n # curve experiment\n\ndata_t <- 0:N * 24 / N # the times the measurements were made (in hours)\ndata_n <- NAtT(k = k_in, n0 = n0_in, r = r_in, t = data_t) # the measurements\n\n# Now summarize the \"experimental\" growth data that we just generated\ngc <- SummarizeGrowth(data_t, data_n)\n\n# Get the possible metrics for fitness proxies\ngc$vals$r # growth rate is a common choice for fitness\ngc$vals$t_gen # doubling time, or generation time, is also common\ngc$vals$k\ngc$vals$n0\ngc$vals$auc_l\ngc$vals$auc_e\ngc$vals$t_mid\n\n# Compare the data with the fit visually by plotting it\nplot(gc)\n\n\n\n"} {"package":"growthcurver","topic":"SummarizeGrowthByPlate","snippet":"### Name: SummarizeGrowthByPlate\n### Title: Summarize Growth Curves\n### Aliases: SummarizeGrowthByPlate\n\n### ** Examples\n\n#Get the summary metrics for the entire plate of sample data provided\n#with the Growthcurver package\n\n#First, load the example data provided with Growthcurver. Note that there is\n#a column named \"time\" -- this is necessary for Growthcurver to know which\n#column contains the time measurements. In this dataset, the repeated\n#measurements from a single well in a plate are given in a column of data.\n\nmyPlate <- growthdata\nnames(myPlate)\n\n#Next, do the analysis for all the columns.\nsummary_plate <- SummarizeGrowthByPlate(plate = myPlate)\n\n#The output is a data frame that contains the information on the best\n#fit for each column of data.\nhead(summary_plate) # Use head to display just the first few rows\n\n\n\n"} {"package":"sfc","topic":"sfc","snippet":"### Name: sfc\n### Title: Substance Flow Computation\n### Aliases: sfc\n\n### ** Examples\n\nlibrary(sfc)\n\n## model as txt\ndata <- system.file(\"extdata\", \"data_utf8.csv\", package = \"sfc\")\nmodel <- system.file(\"extdata\", \"model_utf8.txt\", package = \"sfc\")\nsfc(data, model, sample.size = 100, fileEncoding = \"UTF-8\")\n\n## model as csv\ndata <- system.file(\"extdata\", \"data_utf8.csv\", package = \"sfc\")\nmodel <- system.file(\"extdata\", \"model_utf8.csv\", package = \"sfc\")\nsfc(data, model, fileEncoding = \"UTF-8\")\n\n\n"} {"package":"SobolSequence","topic":"SobolSequence-package","snippet":"### Name: SobolSequence-package\n### Title: Sobol Sequence\n### Aliases: SobolSequence-package sobolsequence\n\n### ** Examples\n\nsrange <- sobolSequence.dimMinMax()\nmrange <- sobolSequence.dimF2MinMax(srange[1])\npoints <- sobolSequence.points(dimR=srange[1], dimF2=mrange[1], count=10000)\npoints <- sobolSequence.points(dimR=srange[1], dimF2=mrange[1], count=10000,\n digitalShift=TRUE)\n\n\n"} {"package":"SobolSequence","topic":"sobolsequence","snippet":"### Name: SobolSequence-package\n### Title: Sobol Sequence\n### Aliases: SobolSequence-package sobolsequence\n\n### ** Examples\n\nsrange <- sobolSequence.dimMinMax()\nmrange <- sobolSequence.dimF2MinMax(srange[1])\npoints <- sobolSequence.points(dimR=srange[1], dimF2=mrange[1], count=10000)\npoints <- sobolSequence.points(dimR=srange[1], dimF2=mrange[1], count=10000,\n digitalShift=TRUE)\n\n\n"} {"package":"mcMST","topic":"charVecToEdgelist","snippet":"### Name: charVecToEdgelist\n### Title: Convert characteristic vector to edge list.\n### Aliases: charVecToEdgelist\n\n### ** Examples\n\n# here we generate a random Pruefer-code representing\n# a random spanning tree of a graph with n = 10 nodes\npcode = sample(1:10, 8, replace = TRUE)#'\nedgelist = charVecToEdgelist(prueferToCharVec(pcode))\n\n\n"} {"package":"mcMST","topic":"edgeListToCharVec","snippet":"### Name: edgeListToCharVec\n### Title: Convert edge list to characteristic vector.\n### Aliases: edgeListToCharVec\n\n### ** Examples\n\n# first we generate a small edge list by hand\n# (assume the given graph has n = 4 nodes)\nedgelist = matrix(c(1, 2, 2, 4, 3, 4), ncol = 3)\nprint(edgelist)\n# next we transform the edge into\n# a characteristic vector\ncvec = edgeListToCharVec(edgelist, n = 4)\nprint(cvec)\n\n\n"} {"package":"mcMST","topic":"enumerateTSP","snippet":"### Name: enumerateTSP\n### Title: Enumerate all solution candidates.\n### Aliases: enumerateTSP enumerateMST\n\n### ** Examples\n\nsols = enumerateTSP(4L)\nsols = enumerateMST(4L)\n\n\n"} {"package":"mcMST","topic":"genRandomMCGP","snippet":"### Name: genRandomMCGP\n### Title: Generate a bi-criteria graph with two uniformly randomly\n### distribted edge weights.\n### Aliases: genRandomMCGP\n\n### ** Examples\n\ng = genRandomMCGP(10L)\n## Not run: \n##D pl = grapherator::plot(g)\n## End(Not run)\n\n\n"} {"package":"mcMST","topic":"genRandomSpanningTree","snippet":"### Name: genRandomSpanningTree\n### Title: Generate a random spanning tree.\n### Aliases: genRandomSpanningTree\n\n### ** Examples\n\ngenRandomSpanningTree(10)\ngenRandomSpanningTree(10, type = \"edgelist\")\n\n\n"} {"package":"mcMST","topic":"genRandomSpanningTrees","snippet":"### Name: genRandomSpanningTrees\n### Title: Generate a set of random spanning trees.\n### Aliases: genRandomSpanningTrees\n\n### ** Examples\n\ngenRandomSpanningTrees(3, 10)\ngenRandomSpanningTrees(3, 10, simplify = FALSE)\n\ngenRandomSpanningTrees(3, 10, type = \"edgelist\")\n\n\n"} {"package":"mcMST","topic":"getCommonSubtrees","snippet":"### Name: getCommonSubtrees\n### Title: Get common subtrees of two trees.\n### Aliases: getCommonSubtrees\n\n### ** Examples\n\n# assume we have a graph with n = 10 nodes\nn.nodes = 10\n# we define two trees (matrices with colwise edges)\nstree1 = matrix(c(1, 2, 1, 3, 2, 4, 5, 6, 6, 7), byrow = FALSE, nrow = 2)\nstree2 = matrix(c(1, 3, 1, 2, 2, 4, 5, 8, 6, 7), byrow = FALSE, nrow = 2)\n# ... and compute all common subtrees\nsubtrees = getCommonSubtrees(stree1, stree2, n = 10)\n\n\n"} {"package":"mcMST","topic":"getExactFront","snippet":"### Name: getExactFront\n### Title: Enumerate all Pareto-optimal solutions.\n### Aliases: getExactFront\n\n### ** Examples\n\n# here we enumerate all Pareto-optimal solutions of a bi-objective mcMST problem\n# we use the Pruefer-code enumerator. Thus, we need to define an objective\n# function, which is able to handle this type of endcoding\nobjfunMCMST = function(pcode, instance) {\n getWeight(instance, prueferToEdgeList(pcode))\n}\n\n# next we generate a random bi-objective graph\ng = genRandomMCGP(5L)\n\n# ... and finally compute the exact front of g\nres = getExactFront(g, obj.fun = objfunMCMST, enumerator.fun = enumerateMST, n.objectives = 2L)\n## Not run: \n##D plot(res$pareto.front)\n## End(Not run)\n\n\n"} {"package":"mcMST","topic":"getNumberOfSpanningTrees","snippet":"### Name: getNumberOfSpanningTrees\n### Title: Compute number of spanning trees of a graph\n### Aliases: getNumberOfSpanningTrees\n\n### ** Examples\n\n# generate complete graph\ng = genRandomMCGP(10)\n\n# this is equal to 10^8 (Cayley's theorem)\ngetNumberOfSpanningTrees(g)\n\n\n"} {"package":"mcMST","topic":"getRandomSpanningTree","snippet":"### Name: getRandomSpanningTree\n### Title: Generate random spanning tree.\n### Aliases: getRandomSpanningTree\n\n### ** Examples\n\ng = genRandomMCGP(10L)\nstree = getRandomSpanningTree(g)\n\n\n"} {"package":"mcMST","topic":"getWeight","snippet":"### Name: getWeight\n### Title: Get the overall costs/weight of a subgraph given its edgelist.\n### Aliases: getWeight\n\n### ** Examples\n\n# generate a random bi-objective graph\ng = genRandomMCGP(5)\n\n# generate a random Pruefer code, i.e., a random spanning tree of g\npcode = sample(1:5, 3, replace = TRUE)\n\ngetWeight(g, prueferToEdgeList(pcode))\ngetWeight(g, prueferToEdgeList(pcode), obj.types = \"bottleneck\")\n\n\n"} {"package":"mcMST","topic":"mcMSTEmoaBG","snippet":"### Name: mcMSTEmoaBG\n### Title: Subgraph EMOA for the multi-criteria MST problem.\n### Aliases: mcMSTEmoaBG\n\n### ** Examples\n\ninst = genRandomMCGP(10)\nres = mcMSTEmoaBG(inst, mu = 20L, max.iter = 100L)\nprint(res$pareto.front)\nprint(tail(getStatistics(res$log)))\n\n\n"} {"package":"mcMST","topic":"mcMSTPrim","snippet":"### Name: mcMSTPrim\n### Title: Multi-Objective Prim algorithm.\n### Aliases: mcMSTPrim\n\n### ** Examples\n\ng = genRandomMCGP(30)\nres = mcMSTPrim(g, n.lambdas = 50)\nprint(res$pareto.front)\n\n\n"} {"package":"mcMST","topic":"nodelistToEdgelist","snippet":"### Name: nodelistToEdgelist\n### Title: Convert sequence of nodes to edge list.\n### Aliases: nodelistToEdgelist\n\n### ** Examples\n\n# first generate a random permutation, e.g., representing\n# a roundtrip tour in a graph\nnodelist = sample(1:8)\n# now convert into an edge list\nnodelistToEdgelist(nodelist)\n\n\n"} {"package":"mcMST","topic":"permutationToCharVec","snippet":"### Name: permutationToCharVec\n### Title: Convert permutation to characteristic vector.\n### Aliases: permutationToCharVec\n\n### ** Examples\n\n# first generate a random permutation, e.g., representing\n# a roundtrip tour in a graph\nperm = sample(1:10)\nprint(perm)\n# now convert into an edge list\npermutationToCharVec(perm, n = 10)\n\n\n"} {"package":"mcMST","topic":"permutationToEdgelist","snippet":"### Name: permutationToEdgelist\n### Title: Convert permutation to edge list.\n### Aliases: permutationToEdgelist\n\n### ** Examples\n\n# first generate a random permutation, e.g., representing\n# a roundtrip tour in a graph\nperm = sample(1:10)\nprint(perm)\n# now convert into an edge list\npermutationToEdgelist(perm)\n\n\n"} {"package":"mcMST","topic":"plotEdgeFrequency","snippet":"### Name: plotEdgeFrequency\n### Title: Visualization of edge frequency among solution set.\n### Aliases: plotEdgeFrequency\n\n### ** Examples\n\ng = genRandomMCGP(50L)\nres = mcMSTEmoaBG(mu = 10L, max.iter = 50, instance = g, scalarize = TRUE)\n## Not run: \n##D plotEdgeFrequency(list(g), list(res$pareto.set))\n## End(Not run)\n\n\n"} {"package":"mcMST","topic":"prueferToCharVec","snippet":"### Name: prueferToCharVec\n### Title: Convert Pruefer code to characteristic vector.\n### Aliases: prueferToCharVec\n\n### ** Examples\n\n# here we generate a random Pruefer-code representing\n# a random spanning tree of a graph with n = 10 nodes\npcode = sample(1:10, 8, replace = TRUE)\nprint(pcode)\nprint(prueferToCharVec(pcode))\n\n\n"} {"package":"mcMST","topic":"prueferToEdgeList","snippet":"### Name: prueferToEdgeList\n### Title: Convert Pruefer code to edge list.\n### Aliases: prueferToEdgeList\n\n### ** Examples\n\n# here we generate a random Pruefer-code representing\n# a random spanning tree of a graph with n = 10 nodes\npcode = sample(1:10, 8, replace = TRUE)\nprint(pcode)\nedgelist = prueferToEdgeList(pcode)\nprint(edgelist)\n\n\n"} {"package":"mcMST","topic":"sampleWeights","snippet":"### Name: sampleWeights\n### Title: Sample weights\n### Aliases: sampleWeights\n\n### ** Examples\n\nsampleWeights(2)\n\nweights = replicate(10, sampleWeights(3L))\ncolSums(weights)\n\n\n"} {"package":"mcMST","topic":"similarity_metrics","snippet":"### Name: similarity_metrics\n### Title: Metrics for spanning tree comparisson.\n### Aliases: similarity_metrics getNumberOfCommonEdges\n### getSizeOfLargestCommonSubtree\n\n### ** Examples\n\n# Here we generate two random spanning trees of a complete\n# graph with 10 nodes\nset.seed(1)\nst1 = prueferToEdgeList(sample(1:10, size = 8, replace = TRUE))\nst2 = prueferToEdgeList(sample(1:10, size = 8, replace = TRUE))\n# Now check the number of common edges\nNCE = getNumberOfCommonEdges(st1, st2)\n# And the size of the largest common subtree\nSLS = getSizeOfLargestCommonSubtree(st1, st2)\n\n\n"} {"package":"OscillatorGenerator","topic":"ExpBurst","snippet":"### Name: ExpBurst\n### Title: Generation of a Burst Signal with Exponential Rise and Decline\n### Aliases: ExpBurst\n\n### ** Examples\n\n# test effect of changes in period\nm1 = ExpBurst(baseline = 200, peak = 1000, period = 10, duty_cycle = 0.6,\nsec_duty_cycle = 0.5, sec_peak = 850, trend = 1, peak_pos = 0.3, duration = 500, resolution = 0.1)\nm2 = ExpBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\nsec_duty_cycle = 0.5, sec_peak = 850, trend = 1, peak_pos = 0.3, duration = 500, resolution = 0.1)\nm3 = ExpBurst(baseline = 200, peak = 1000, period = 200, duty_cycle = 0.6,\nsec_duty_cycle = 0.5, sec_peak = 850, trend = 1, peak_pos = 0.3, duration = 500, resolution = 0.1)\n\npar(mfrow = c(3,1))\nplot(m1, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m2, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m3, type = \"l\", xlab = \"time\", ylab = \"abundance\")\n\n# test effect of changes in duty_cycle\nm1 = ExpBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.3,\nsec_duty_cycle = 0.5, sec_peak = 850, trend = 1, peak_pos = 0.3, duration = 500, resolution = 0.1)\nm2 = ExpBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\nsec_duty_cycle = 0.5, sec_peak = 850, trend = 1, peak_pos = 0.3, duration = 500, resolution = 0.1)\nm3 = ExpBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.9,\nsec_duty_cycle = 0.5, sec_peak = 850, trend = 1, peak_pos = 0.3, duration = 500, resolution = 0.1)\npar(mfrow = c(3,1))\nplot(m1, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m2, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m3, type = \"l\", xlab = \"time\", ylab = \"abundance\")\n\n# test effect of changes in sec_duty_cycle\nm1 = ExpBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\nsec_duty_cycle = 0.3, sec_peak = 850, trend = 1, peak_pos = 0.3, duration = 500, resolution = 0.1)\nm2 = ExpBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\nsec_duty_cycle = 0.6, sec_peak = 850, trend = 1, peak_pos = 0.3, duration = 500, resolution = 0.1)\nm3 = ExpBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\nsec_duty_cycle = 0.9, sec_peak = 850, trend = 1, peak_pos = 0.3, duration = 500, resolution = 0.1)\n\npar(mfrow = c(3,1))\nplot(m1, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m2, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m3, type = \"l\", xlab = \"time\", ylab = \"abundance\")\n\n# test effect of changes in trend\nm1 = ExpBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\nsec_duty_cycle = 0.5, sec_peak = 850, trend = 0.7, peak_pos = 0.3, duration = 500, resolution = 0.1)\nm2 = ExpBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\nsec_duty_cycle = 0.5, sec_peak = 850, trend = 1, peak_pos = 0.3, duration = 500, resolution = 0.1)\nm3 = ExpBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\nsec_duty_cycle = 0.5, sec_peak = 850, trend = 1.3, peak_pos = 0.3, duration = 500, resolution = 0.1)\n\npar(mfrow = c(3,1))\nplot(m1, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m2, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m3, type = \"l\", xlab = \"time\", ylab = \"abundance\")\n\n# test effect of changes in peak_pos\nm1 = ExpBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\nsec_duty_cycle = 0.5, sec_peak = 850, trend = 1, peak_pos = 0.3, duration = 500, resolution = 0.1)\nm2 = ExpBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\nsec_duty_cycle = 0.5, sec_peak = 850, trend = 1, peak_pos = 0.6, duration = 500, resolution = 0.1)\nm3 = ExpBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\nsec_duty_cycle = 0.5, sec_peak = 850, trend = 1, peak_pos = 0.9, duration = 500, resolution = 0.1)\n\npar(mfrow = c(3,1))\nplot(m1, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m2, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m3, type = \"l\", xlab = \"time\", ylab = \"abundance\")\n\n\n"} {"package":"OscillatorGenerator","topic":"ExpSpike","snippet":"### Name: ExpSpike\n### Title: Generation of a Spike Signal with Exponential Rise and Decline\n### Aliases: ExpSpike\n\n### ** Examples\n\n# test effect of changes in period\nm1 = ExpSpike(baseline = 200, peak = 1000, period = 50, duty_cycle = 0.6,\npeak_pos = 0.3, trend = 1, duration = 500, resolution = 0.1)\nm2 = ExpSpike(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\npeak_pos = 0.3, trend = 1, duration = 500, resolution = 0.1)\nm3 = ExpSpike(baseline = 200, peak = 1000, period = 200, duty_cycle = 0.6,\npeak_pos = 0.3, trend = 1, duration = 500, resolution = 0.1)\n\npar(mfrow = c(3,1))\nplot(m1, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m2, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m3, type = \"l\", xlab = \"time\", ylab = \"abundance\")\n\n# test effect of changes in duty_cycle\nm1 = ExpSpike(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.3,\npeak_pos = 0.3, trend = 1, duration = 500, resolution = 0.1)\nm2 = ExpSpike(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\npeak_pos = 0.3, trend = 1, duration = 500, resolution = 0.1)\nm3 = ExpSpike(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.9,\npeak_pos = 0.3, trend = 1, duration = 500, resolution = 0.1)\n\npar(mfrow = c(3,1))\nplot(m1, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m2, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m3, type = \"l\", xlab = \"time\", ylab = \"abundance\")\n\n# test effect of changes in peak_pos\nm1 = ExpSpike(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\npeak_pos = 0.3, trend = 1, duration = 500, resolution = 0.1)\nm2 = ExpSpike(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\npeak_pos = 0.6, trend = 1, duration = 500, resolution = 0.1)\nm3 = ExpSpike(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\npeak_pos = 0.9, trend = 1, duration = 500, resolution = 0.1)\n\npar(mfrow = c(3,1))\nplot(m1, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m2, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m3, type = \"l\", xlab = \"time\", ylab = \"abundance\")\n\n# test effect of changes in trend\nm1 = ExpSpike(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\npeak_pos = 0.3, trend = 0.7, duration = 500, resolution = 0.1)\nm2 = ExpSpike(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\npeak_pos = 0.3, trend = 1, duration = 500, resolution = 0.1)\nm3 = ExpSpike(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\npeak_pos = 0.3, trend = 1.3, duration = 500, resolution = 0.1)\n\npar(mfrow = c(3,1))\nplot(m1, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m2, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m3, type = \"l\", xlab = \"time\", ylab = \"abundance\")\n\n\n"} {"package":"OscillatorGenerator","topic":"LinBurst","snippet":"### Name: LinBurst\n### Title: Generation of a Burst Signal with Linear Rise and Decline\n### Aliases: LinBurst\n\n### ** Examples\n\n# test effect of changes in period\nm1 = LinBurst(baseline = 200, peak = 1000, period = 50, duty_cycle = 0.6,\nsec_duty_cycle = 0.5, sec_peak = 850, trend = 1, peak_pos = 0.3, duration = 500, resolution = 0.1)\nm2 = LinBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\nsec_duty_cycle = 0.5, sec_peak = 850, trend = 1, peak_pos = 0.3, duration = 500, resolution = 0.1)\nm3 = LinBurst(baseline = 200, peak = 1000, period = 200, duty_cycle = 0.6,\nsec_duty_cycle = 0.5, sec_peak = 850, trend = 1, peak_pos = 0.3, duration = 500, resolution = 0.1)\n\npar(mfrow = c(3,1))\nplot(m1, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m2, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m3, type = \"l\", xlab = \"time\", ylab = \"abundance\")\n\n# test effect of changes in duty_cycle\nm1 = LinBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.3,\nsec_duty_cycle = 0.5, sec_peak = 850, trend = 1, peak_pos = 0.3, duration = 500, resolution = 0.1)\nm2 = LinBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\nsec_duty_cycle = 0.5, sec_peak = 850, trend = 1, peak_pos = 0.3, duration = 500, resolution = 0.1)\nm3 = LinBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.9,\nsec_duty_cycle = 0.5, sec_peak = 850, trend = 1, peak_pos = 0.3, duration = 500, resolution = 0.1)\n\npar(mfrow = c(3,1))\nplot(m1, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m2, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m3, type = \"l\", xlab = \"time\", ylab = \"abundance\")\n\n# test effect of changes in sec_duty_cycle\nm1 = LinBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\nsec_duty_cycle = 0.3, sec_peak = 850, trend = 1, peak_pos = 0.3, duration = 500, resolution = 0.1)\nm2 = LinBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\nsec_duty_cycle = 0.6, sec_peak = 850, trend = 1, peak_pos = 0.3, duration = 500, resolution = 0.1)\nm3 = LinBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\nsec_duty_cycle = 0.9, sec_peak = 850, trend = 1, peak_pos = 0.3, duration = 500, resolution = 0.1)\n\npar(mfrow = c(3,1))\nplot(m1, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m2, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m3, type = \"l\", xlab = \"time\", ylab = \"abundance\")\n\n# test effect of changes in trend\nm1 = LinBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\nsec_duty_cycle = 0.5, sec_peak = 850, trend = 0.7, peak_pos = 0.3, duration = 500, resolution = 0.1)\nm2 = LinBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\nsec_duty_cycle = 0.5, sec_peak = 850, trend = 1, peak_pos = 0.3, duration = 500, resolution = 0.1)\nm3 = LinBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\nsec_duty_cycle = 0.5, sec_peak = 850, trend = 1.3, peak_pos = 0.3, duration = 500, resolution = 0.1)\n\npar(mfrow = c(3,1))\nplot(m1, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m2, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m3, type = \"l\", xlab = \"time\", ylab = \"abundance\")\n\n# test effect of changes in peak_pos\nm1 = LinBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\nsec_duty_cycle = 0.5, sec_peak = 850, trend = 1, peak_pos = 0.3, duration = 500, resolution = 0.1)\nm2 = LinBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\nsec_duty_cycle = 0.5, sec_peak = 850, trend = 1, peak_pos = 0.6, duration = 500, resolution = 0.1)\nm3 = LinBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\nsec_duty_cycle = 0.5, sec_peak = 850, trend = 1, peak_pos = 0.9, duration = 500, resolution = 0.1)\n\npar(mfrow = c(3,1))\nplot(m1, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m2, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m3, type = \"l\", xlab = \"time\", ylab = \"abundance\")\n\n\n"} {"package":"OscillatorGenerator","topic":"LinSpike","snippet":"### Name: LinSpike\n### Title: Generation of a Spike Signal with Linear Rise and Decline\n### Aliases: LinSpike\n\n### ** Examples\n\n# test effect of changes in period\nm1 = LinSpike(baseline = 200, peak = 1000, period = 50, duty_cycle = 0.6,\npeak_pos = 0.3, trend = 1, duration = 500, resolution = 0.1)\nm2 = LinSpike(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\npeak_pos = 0.3, trend = 1, duration = 500, resolution = 0.1)\nm3 = LinSpike(baseline = 200, peak = 1000, period = 200, duty_cycle = 0.6,\npeak_pos = 0.3, trend = 1, duration = 500, resolution = 0.1)\n\npar(mfrow = c(3,1))\nplot(m1, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m2, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m3, type = \"l\", xlab = \"time\", ylab = \"abundance\")\n\n# test effect of changes in duty_cycle\nm1 = LinSpike(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.3,\npeak_pos = 0.3, trend = 1, duration = 500, resolution = 0.1)\nm2 = LinSpike(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\npeak_pos = 0.3, trend = 1, duration = 500, resolution = 0.1)\nm3 = LinSpike(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.9,\npeak_pos = 0.3, trend = 1, duration = 500, resolution = 0.1)\n\npar(mfrow = c(3,1))\nplot(m1, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m2, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m3, type = \"l\", xlab = \"time\", ylab = \"abundance\")\n\n# test effect of changes in peak_pos\nm1 = LinSpike(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\npeak_pos = 0.3, trend = 1, duration = 500, resolution = 0.1)\nm2 = LinSpike(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\npeak_pos = 0.6, trend = 1, duration = 500, resolution = 0.1)\nm3 = LinSpike(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\npeak_pos = 0.9, trend = 1, duration = 500, resolution = 0.1)\n\npar(mfrow = c(3,1))\nplot(m1, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m2, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m3, type = \"l\", xlab = \"time\", ylab = \"abundance\")\n\n# test effect of changes in trend\nm1 = LinSpike(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\npeak_pos = 0.3, trend = 0.7, duration = 500, resolution = 0.1)\nm2 = LinSpike(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\npeak_pos = 0.3, trend = 1, duration = 500, resolution = 0.1)\nm3 = LinSpike(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\npeak_pos = 0.3, trend = 1.3, duration = 500, resolution = 0.1)\n\npar(mfrow = c(3,1))\nplot(m1, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m2, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m3, type = \"l\", xlab = \"time\", ylab = \"abundance\")\n\n\n"} {"package":"OscillatorGenerator","topic":"Sinusoid","snippet":"### Name: Sinusoid\n### Title: Generation of a Sinusoidal Signal\n### Aliases: Sinusoid\n\n### ** Examples\n\n# test effect of changes in period\nm1 = Sinusoid(baseline = 200, peak = 1000, period = 50, duty_cycle = 0.6,\ntrend = 1, duration = 500, resolution = 0.1)\nm2 = Sinusoid(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\ntrend = 1, duration = 500, resolution = 0.1)\nm3 = Sinusoid(baseline = 200, peak = 1000, period = 200, duty_cycle = 0.6,\ntrend = 1, duration = 500, resolution = 0.1)\n\npar(mfrow = c(3,1))\nplot(m1, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m2, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m3, type = \"l\", xlab = \"time\", ylab = \"abundance\")\n\n# test effect of changes in duty_cycle\nm1 = Sinusoid(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.3,\ntrend = 1, duration = 500, resolution = 0.1)\nm2 = Sinusoid(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\ntrend = 1, duration = 500, resolution = 0.1)\nm3 = Sinusoid(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.9,\ntrend = 1, duration = 500, resolution = 0.1)\n\npar(mfrow = c(3,1))\nplot(m1, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m2, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m3, type = \"l\", xlab = \"time\", ylab = \"abundance\")\n\n# test effect of changes in trend\nm1 = Sinusoid(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\ntrend = 0.7, duration = 500, resolution = 0.1)\nm2 = Sinusoid(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\ntrend = 1, duration = 500, resolution = 0.1)\nm3 = Sinusoid(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\ntrend = 1.3, duration = 500, resolution = 0.1)\n\npar(mfrow = c(3,1))\nplot(m1, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m2, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m3, type = \"l\", xlab = \"time\", ylab = \"abundance\")\n\n\n\n"} {"package":"OscillatorGenerator","topic":"SquareBurst","snippet":"### Name: SquareBurst\n### Title: Generation of a Square-wave Burst Signal\n### Aliases: SquareBurst\n\n### ** Examples\n\n# test effect of changes in period\nm1 = SquareBurst(baseline = 200, peak = 1000, period = 50, duty_cycle = 0.6,\nsec_duty_cycle = 0.5, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)\nm2 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\nsec_duty_cycle = 0.5, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)\nm3 = SquareBurst(baseline = 200, peak = 1000, period = 200, duty_cycle = 0.6,\nsec_duty_cycle = 0.5, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)\n\npar(mfrow = c(3,1))\nplot(m1, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m2, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m3, type = \"l\", xlab = \"time\", ylab = \"abundance\")\n\n# test effect of changes in duty_cycle\nm1 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.3,\nsec_duty_cycle = 0.5, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)\nm2 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\nsec_duty_cycle = 0.5, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)\nm3 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.9,\nsec_duty_cycle = 0.5, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)\n\npar(mfrow = c(3,1))\nplot(m1, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m2, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m3, type = \"l\", xlab = \"time\", ylab = \"abundance\")\n\n# test effect of changes in sec_duty_cycle\nm1 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\nsec_duty_cycle = 0.3, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)\nm2 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\nsec_duty_cycle = 0.6, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)\nm3 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\nsec_duty_cycle = 0.9, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)\n\npar(mfrow = c(3,1))\nplot(m1, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m2, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m3, type = \"l\", xlab = \"time\", ylab = \"abundance\")\n\n# test effect of changes in trend\nm1 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\nsec_duty_cycle = 0.6, sec_peak = 700, trend = 0.7, duration = 500, resolution = 0.1)\nm2 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\nsec_duty_cycle = 0.6, sec_peak = 700, trend = 1, duration = 500, resolution = 0.1)\nm3 = SquareBurst(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\nsec_duty_cycle = 0.6, sec_peak = 700, trend = 1.3, duration = 500, resolution = 0.1)\n\npar(mfrow = c(3,1))\nplot(m1, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m2, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m3, type = \"l\", xlab = \"time\", ylab = \"abundance\")\n\n\n"} {"package":"OscillatorGenerator","topic":"SquareSpike","snippet":"### Name: SquareSpike\n### Title: Generation of a Square-wave Signal\n### Aliases: SquareSpike\n\n### ** Examples\n\n# test effect of changes in period\nm1 = SquareSpike(baseline = 200, peak = 1000, period = 50, duty_cycle = 0.6,\ntrend = 1, duration = 500, resolution = 0.1)\nm2 = SquareSpike(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\ntrend = 1, duration = 500, resolution = 0.1)\nm3 = SquareSpike(baseline = 200, peak = 1000, period = 200, duty_cycle = 0.6,\ntrend = 1, duration = 500, resolution = 0.1)\n\npar(mfrow = c(3,1))\nplot(m1, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m2, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m3, type = \"l\", xlab = \"time\", ylab = \"abundance\")\n\n# test effect of changes in duty_cycle\nm1 = SquareSpike(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.3,\ntrend = 1, duration = 500, resolution = 0.1)\nm2 = SquareSpike(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\ntrend = 1, duration = 500, resolution = 0.1)\nm3 = SquareSpike(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.9,\ntrend = 1, duration = 500, resolution = 0.1)\n\npar(mfrow = c(3,1))\nplot(m1, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m2, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m3, type = \"l\", xlab = \"time\", ylab = \"abundance\")\n\n# test effect of changes in trend\nm1 = SquareSpike(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\ntrend = 0.7, duration = 500, resolution = 0.1)\nm2 = SquareSpike(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\ntrend = 1, duration = 500, resolution = 0.1)\nm3 = SquareSpike(baseline = 200, peak = 1000, period = 100, duty_cycle = 0.6,\ntrend = 1.3, duration = 500, resolution = 0.1)\n\npar(mfrow = c(3,1))\nplot(m1, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m2, type = \"l\", xlab = \"time\", ylab = \"abundance\")\nplot(m3, type = \"l\", xlab = \"time\", ylab = \"abundance\")\n\n\n"} {"package":"NBShiny2","topic":"NBShiny2","snippet":"### Name: NBShiny2\n### Title: Launch 'NBShiny2' Interface\n### Aliases: NBShiny2\n### Keywords: NBShiny2\n\n### ** Examples\n\nif(interactive()){\nlibrary(rmarkdown)\nNBShiny2()\n}\n\n\n"} {"package":"BoSSA","topic":"circular_tree","snippet":"### Name: circular_tree\n### Title: Plot an inside-out circular tree\n### Aliases: circular_tree\n\n### ** Examples\n\n\nlibrary(ape)\n\ntest_tree <- rtree(20)\n\ncircular_tree(test_tree)\n\n\n\n"} {"package":"BoSSA","topic":"plot.pplace","snippet":"### Name: plot.pplace\n### Title: Plot a pplace or jplace object\n### Aliases: plot.pplace plot.jplace\n\n### ** Examples\n\n\ndata(pplace)\n\n### number type\nplot(pplace,type=\"number\",main=\"number\")\n\n### color type without and with legend\nplot(pplace,type=\"color\",main=\"color without legend\",legend=FALSE)\nplot(pplace,type=\"color\",main=\"color with legend\",legend=TRUE)\n\n### fattree type\nplot(pplace,type=\"fattree\",main=\"fattree\")\n\n### precise type\nplot(pplace,type=\"precise\",main=\"precise vanilla\")\nplot(pplace,type=\"precise\",simplify=TRUE,main=\"precise simplify\")\n\n# using the read number information encoded here in the name (if available)\nNpplace <- sample(1:100,nrow(pplace$multiclass),replace=TRUE)\n# in the following exemple, the dots are too large...\nplot(pplace,type=\"precise\",main=\"precise N\",legend=TRUE,N=Npplace,simplify=TRUE)\n\n# using the transfo option to modify dot sizes\n# note that placements sizes inferior to 1 won't\n# behave properly with log10 as a transformation function.\n# In this case, you rather use simplify (all the placement\n# will corresponds to at least one sequence).\n# Beware that when using the transfo option, \n# the legend does not anymore correspond to the actual placement\n# size but to the transform placement size\n# (i.e. the transform function applied to the dot size).\n# we will use the the log10 function\nplot(pplace,type=\"precise\",main=\"precise log10\",\n\tlegend=TRUE,N=Npplace,transfo=log10)\n# or without simplify, you can use a custom function\n# as transfo that will produce positive sized dots\nplot(pplace,type=\"precise\",main=\"precise custom\"\n\t,legend=TRUE,N=Npplace,transfo=function(X){log10(X+1)})\n\n\n"} {"package":"BoSSA","topic":"pplace","snippet":"### Name: pplace\n### Title: A placement object as obtained with the read_sqlite function\n### Aliases: pplace\n\n### ** Examples\n\ndata(pplace)\nstr(pplace)\n\n\n"} {"package":"BoSSA","topic":"pplace_to_matrix","snippet":"### Name: pplace_to_matrix\n### Title: Pplace to contingency matrix\n### Aliases: pplace_to_matrix\n\n### ** Examples\n\n\ndata(pplace)\n\n### simple example\npplace_to_matrix(pplace,c(rep(\"sample1\",27),rep(\"sample2\",50),rep(\"sample3\",23)))\n\n### using the N option to specify the number of sequence each placement represents\nNpplace <- sample(1:20,100,replace=TRUE)\npplace_to_matrix(pplace,c(rep(\"sample1\",27),rep(\"sample2\",50),rep(\"sample3\",23)),N=Npplace)\n\n### with tax_name=TRUE\npplace_to_matrix(pplace,c(rep(\"sample1\",27),rep(\"sample2\",50),rep(\"sample3\",23)),tax_name=TRUE)\n\n\n"} {"package":"BoSSA","topic":"pplace_to_table","snippet":"### Name: pplace_to_table\n### Title: Merge the multiclass and the placement table of pplace object\n### Aliases: pplace_to_table\n\n### ** Examples\n\n\ndata(pplace)\n\n### with every placement\npplace_to_table(pplace)\n\n### keeping only the best placement for each sequence\npplace_to_table(pplace,type=\"best\")\n\n\n"} {"package":"BoSSA","topic":"print.protdb","snippet":"### Name: print.protdb\n### Title: Compact display of protdb object\n### Aliases: print.protdb\n\n### ** Examples\n\npdb_file <- system.file(\"extdata\", \"1L2M.pdb\", package = \"BoSSA\")\npdb <- read_protdb(pdb_file)\nprint(pdb)\n\n\n"} {"package":"BoSSA","topic":"print.pplace","snippet":"### Name: print.pplace\n### Title: Compact display of pplace and jplace objects\n### Aliases: print.pplace print.jplace\n\n### ** Examples\n\ndata(pplace)\nprint(pplace)\n\n\n"} {"package":"BoSSA","topic":"read_protdb","snippet":"### Name: read_protdb\n### Title: Read Protein Data Bank (PDB) file\n### Aliases: read_protdb\n\n### ** Examples\n\npdb_file <- system.file(\"extdata\", \"1L2M.pdb\", package = \"BoSSA\")\npdb <- read_protdb(pdb_file)\npdb\n\n\n"} {"package":"BoSSA","topic":"read_sqlite","snippet":"### Name: read_sqlite\n### Title: Read a pplacer/guppy sqlite file\n### Aliases: read_sqlite\n\n### ** Examples\n\n### the path to the sqlite and jplace files\nsqlite_file <- system.file(\"extdata\", \"example.sqlite\", package = \"BoSSA\")\njplace_file <- system.file(\"extdata\", \"example.jplace\", package = \"BoSSA\")\npplace <- read_sqlite(sqlite_file,jplace_file)\n\n\n"} {"package":"BoSSA","topic":"refpkg","snippet":"### Name: refpkg\n### Title: Summary data and plots for reference packages\n### Aliases: refpkg\n\n### ** Examples\n\n\nrefpkg_path <- paste(find.package(\"BoSSA\"),\"/extdata/example.refpkg\",sep=\"\")\n\n### summary\nrefpkg(refpkg_path)\n\n### taxonomy\ntaxonomy <- refpkg(refpkg_path,type=\"taxonomy\")\nhead(taxonomy)\n\n### info\nrefpkg(refpkg_path,type=\"info\")\n\n### tree\nrefpkg(refpkg_path,type=\"tree\",rank_tree=\"order\",cex.text=0.5)\n\n### pie\nrefpkg(refpkg_path,type=\"pie\",rank_pie=c(\"class\",\"order\",\"family\"),cex.text=0.6)\n\n### krona\n# it will produce a flat text file\n# this file can be use as input for the the \"ImportText.pl\" krona script\n# see https://github.com/marbl/Krona/wiki/KronaTools for more details on krona\n## Not run: \n##D refpkg(refpkg_path,type=\"krona\",out_krona=\"for_krona.txt\")\n## End(Not run)\n\n\n"} {"package":"BoSSA","topic":"sub_pplace","snippet":"### Name: sub_pplace\n### Title: Subsets a pplace object\n### Aliases: sub_pplace\n\n### ** Examples\n\n\ndata(pplace)\n\n### subsetting using placement ids. Here placements 1 to 5\nsub1 <- sub_pplace(pplace,placement_id=1:5)\nsub1\n\n### subsetting using sequenes ids\nid <- c(\"GWZHISEQ01:514:HMCLFBCXX:2:1108:1739:60356_90\",\n\"GWZHISEQ01:514:HMCLFBCXX:2:1114:13665:31277_80\")\nsub2 <- sub_pplace(pplace,ech_id=id)\nsub2\n\n### subsetting using a regular expression of sequence ids\nsub3 <- sub_pplace(pplace,ech_regexp=\"^HWI\")\nsub3\n\n\n"} {"package":"BoSSA","topic":"write_jplace","snippet":"### Name: write_jplace\n### Title: Write a jplace or pplace object to the disk\n### Aliases: write_jplace\n\n### ** Examples\n\n\ndata(pplace)\n## Not run: \n##D write_jplace(pplace,\"test.jplace\")\n## End(Not run)\n\n\n\n"} {"package":"readroper","topic":"read_rpr","snippet":"### Name: read_rpr\n### Title: Reads Roper Center datasets\n### Aliases: read_rpr\n\n### ** Examples\n\nfwf_sample <- readroper_example('testMultiCard.txt')\ncat(readr::read_lines(fwf_sample))\nfwf_sample2 <- readroper_example('testSingleCard.txt')\ncat(readr::read_lines(fwf_sample2))\n# 1. Fixed width file, first card, multi-card\nread_rpr(col_positions=c(1,2,4), widths=c(1,2,1),\ncol_names=c('V1','V2','V3'), filepath=fwf_sample, card_read=1, cards=2)\n# 2 .Fixed width file, first card, single card\nread_rpr(col_positions=c(1,2,4), widths=c(1,2,1),\ncol_names=c('V1','V2','V3'), filepath=fwf_sample2)\n # 3. Fixed width file, second card, multi-card\nread_rpr(col_positions=c(1,2,4), widths=c(1,2,1),\ncol_names=c('V1','V2','V3'), filepath=fwf_sample, card_read=2, cards=2)\n\n\n\n"} {"package":"readroper","topic":"readroper_example","snippet":"### Name: readroper_example\n### Title: Get path to readroper example\n### Aliases: readroper_example\n\n### ** Examples\n\nreadroper_example()\nreadroper_example('testMultiCard.txt')\nreadroper_example('testSingleCard.txt')\n\n\n"} {"package":"HSAUR2","topic":"BCG","snippet":"### Name: BCG\n### Title: BCG Vaccine Data\n### Aliases: BCG\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"BCG\", package = \"HSAUR2\")\n\n ### sort studies w.r.t. sample size\n BCG <- BCG[order(rowSums(BCG[,2:5])),]\n\n ### to long format\n BCGlong <- with(BCG, data.frame(Freq = c(BCGTB, BCGVacc - BCGTB, \n NoVaccTB, NoVacc - NoVaccTB),\n infected = rep(rep(factor(c(\"yes\", \"no\")), \n rep(nrow(BCG), 2)), 2),\n vaccined = rep(factor(c(\"yes\", \"no\")), \n rep(nrow(BCG) * 2, 2)),\n study = rep(factor(Study, levels = as.character(Study)), \n 4)))\n\n ### doubledecker plot\n library(\"vcd\")\n doubledecker(xtabs(Freq ~ study + vaccined + infected, \n data = BCGlong))\n\n\n\n"} {"package":"HSAUR2","topic":"BtheB","snippet":"### Name: BtheB\n### Title: Beat the Blues Data\n### Aliases: BtheB\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"BtheB\", package = \"HSAUR2\")\n layout(matrix(1:2, nrow = 1)) \n ylim <- range(BtheB[,grep(\"bdi\", names(BtheB))], na.rm = TRUE)\n boxplot(subset(BtheB, treatment == \"TAU\")[,grep(\"bdi\", names(BtheB))],\n main = \"Treated as usual\", ylab = \"BDI\", \n xlab = \"Time (in months)\", names = c(0, 2, 3, 5, 8), ylim = ylim)\n boxplot(subset(BtheB, treatment == \"BtheB\")[,grep(\"bdi\", names(BtheB))], \n main = \"Beat the Blues\", ylab = \"BDI\", xlab = \"Time (in months)\",\n names = c(0, 2, 3, 5, 8), ylim = ylim)\n\n\n\n"} {"package":"HSAUR2","topic":"CHFLS","snippet":"### Name: CHFLS\n### Title: Chinese Health and Family Life Survey\n### Aliases: CHFLS\n### Keywords: datasets\n\n### ** Examples\n\n\n## Not run: \n##D \n##D ### for a description see http://popcenter.uchicago.edu/data/chfls.shtml\n##D library(\"TH.data\")\n##D load(file.path(path.package(package=\"TH.data\"), \"rda\", \"CHFLS.rda\"))\n##D \n##D tmp <- chfls1[, c(\"REGION6\", \"ZJ05\", \"ZJ06\", \"A35\", \"ZJ07\", \"ZJ16M\", \"INCRM\",\n##D \"JK01\", \"JK02\", \"JK20\", \"HY04\", \"HY07\", \"A02\", \"AGEGAPM\", \n##D \"A07M\", \"A14\", \"A21\", \"A22M\", \"A23\", \"AX16\", \"INCAM\", \"SEXNOW\", \"ZW04\")]\n##D \n##D names(tmp) <- c(\"Region\",\n##D \"Rgender\", ### gender of respondent\n##D \"Rage\", ### age of respondent\n##D \t\t\"RagestartA\",\t\t ### age of respondent at beginning of relationship \n##D ### with partner A\n##D \"Redu\", ### education of respondent\n##D \"RincomeM\", ### rounded monthly income of respondent\n##D \t\t\"RincomeComp\",\t\t ### inputed monthly income of respondent\n##D \"Rhealth\", ### health condition respondent\n##D \"Rheight\", ### respondent's height\n##D \"Rhappy\", ### respondent's happiness\n##D \"Rmartial\", ### respondent's marital status\n##D \"RhasA\", ### R has current A partner\n##D \"Agender\", ### gender of partner A\n##D \"RAagegap\", ### age gap\n##D \"RAstartage\", ### age at marriage\n##D \"Aheight\", ### height of partner A\n##D \"Aedu\", ### education of partner A\n##D \"AincomeM\", ### rounded partner A income\n##D \"AincomeEst\", ### estimated partner A income\n##D \"orgasm\", ### orgasm frequency\n##D \"AincomeComp\", ### imputed partner A income\n##D \"Rsexnow\", ### has sex last year\n##D \"Rhomosexual\") ### R is homosexual\n##D \n##D ### code missing values\n##D tmp$AincomeM[tmp$AincomeM < 0] <- NA\n##D tmp$RincomeM[tmp$RincomeM < 0] <- NA\n##D tmp$Aheight[tmp$Aheight < 0] <- NA\n##D \n##D olevels <- c(\"never\", \"rarely\", \"sometimes\", \"often\", \"always\")\n##D tmpA <- subset(tmp, Rgender == \"female\" & Rhomosexual != \"yes\" & orgasm %in% olevels)\n##D \n##D ### 1534 subjects\n##D dim(tmpA)\n##D \n##D CHFLS <- tmpA[, c(\"Region\", \"Rage\", \"Redu\", \"RincomeComp\", \"Rhealth\", \"Rheight\", \"Rhappy\",\n##D \"Aheight\", \"Aedu\", \"AincomeComp\")]\n##D names(CHFLS) <- c(\"R_region\", \"R_age\", \"R_edu\", \"R_income\", \"R_health\", \"R_height\", \n##D \"R_happy\", \"A_height\", \"A_edu\", \"A_income\")\n##D levels(CHFLS$R_region) <- c(\"Coastal South\", \"Coastal Easth\", \"Inlands\", \"North\", \n##D \"Northeast\", \"Central West\")\n##D \n##D CHFLS$R_edu <- ordered(as.character(CHFLS$R_edu), levels = c(\"no school\", \"primary\", \n##D \"low mid\", \"up mid\", \"j col\", \"univ/grad\"))\n##D levels(CHFLS$R_edu) <- c(\"Never attended school\", \"Elementary school\", \"Junior high school\", \n##D \"Senior high school\", \"Junior college\", \"University\")\n##D CHFLS$A_edu <- ordered(as.character(CHFLS$A_edu), levels = c(\"no school\", \"primary\", \n##D \"low mid\", \"up mid\", \"j col\", \"univ/grad\"))\n##D levels(CHFLS$A_edu) <- c(\"Never attended school\", \"Elementary school\", \"Junior high school\", \n##D \"Senior high school\", \"Junior college\", \"University\")\n##D \n##D CHFLS$R_health <- ordered(as.character(CHFLS$R_health), levels = c(\"poor\", \"not good\", \n##D \"fair\", \"good\", \"excellent\"))\n##D levels(CHFLS$R_health) <- c(\"Poor\", \"Not good\", \"Fair\", \"Good\", \"Excellent\")\n##D \n##D CHFLS$R_happy <- ordered(as.character(CHFLS$R_happy), levels = c(\"v unhappy\", \"not too\", \n##D \"relatively\", \"very\"))\n##D levels(CHFLS$R_happy) <- c(\"Very unhappy\", \"Not too happy\", \"Relatively happy\", \"Very happy\")\n## End(Not run)\n\n\n\n"} {"package":"HSAUR2","topic":"CYGOB1","snippet":"### Name: CYGOB1\n### Title: CYG OB1 Star Cluster Data\n### Aliases: CYGOB1\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"CYGOB1\", package = \"HSAUR2\")\n plot(logst ~ logli, data = CYGOB1)\n\n\n\n"} {"package":"HSAUR2","topic":"Forbes2000","snippet":"### Name: Forbes2000\n### Title: The Forbes 2000 Ranking of the World's Biggest Companies (Year\n### 2004)\n### Aliases: Forbes2000\n### Keywords: datasets\n\n### ** Examples\n\ndata(\"Forbes2000\", package = \"HSAUR2\")\nsummary(Forbes2000)\n### number of countries\nlength(levels(Forbes2000$country))\n### number of industries\nlength(levels(Forbes2000$category))\n\n\n"} {"package":"HSAUR2","topic":"GHQ","snippet":"### Name: GHQ\n### Title: General Health Questionnaire\n### Aliases: GHQ\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"GHQ\", package = \"HSAUR2\")\n male <- subset(GHQ, gender == \"male\")\n female <- subset(GHQ, gender == \"female\")\n layout(matrix(1:2, ncol = 2))\n barplot(t(as.matrix(male[,c(\"cases\", \"non.cases\")])), main = \"Male\", xlab = \"GHC score\")\n barplot(t(as.matrix(male[,c(\"cases\", \"non.cases\")])), main = \"Female\", xlab = \"GHC score\")\n\n\n\n"} {"package":"HSAUR2","topic":"HSAURtable","snippet":"### Name: HSAURtable\n### Title: Produce LaTeX Tables\n### Aliases: HSAURtable toLatex.tabtab toLatex.dftab HSAURtable.table\n### HSAURtable.data.frame\n### Keywords: misc\n\n### ** Examples\n\n\n data(\"rearrests\", package = \"HSAUR2\")\n toLatex(HSAURtable(rearrests), \n caption = \"Rearrests of juvenile felons.\", \n label = \"rearrests_tab\")\n\n\n\n"} {"package":"HSAUR2","topic":"Lanza","snippet":"### Name: Lanza\n### Title: Prevention of Gastointestinal Damages\n### Aliases: Lanza\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"Lanza\", package = \"HSAUR2\")\n layout(matrix(1:4, nrow = 2))\n pl <- tapply(1:nrow(Lanza), Lanza$study, function(indx)\n mosaicplot(table(Lanza[indx,\"treatment\"], \n Lanza[indx,\"classification\"]),\n main = \"\", shade = TRUE))\n\n\n\n"} {"package":"HSAUR2","topic":"USairpollution","snippet":"### Name: USairpollution\n### Title: Air Pollution in US Cities\n### Aliases: USairpollution\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"USairpollution\", package = \"HSAUR2\")\n\n\n\n"} {"package":"HSAUR2","topic":"USmelanoma","snippet":"### Name: USmelanoma\n### Title: USA Malignant Melanoma Data\n### Aliases: USmelanoma\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"USmelanoma\", package = \"HSAUR2\")\n\n\n"} {"package":"HSAUR2","topic":"agefat","snippet":"### Name: agefat\n### Title: Total Body Composision Data\n### Aliases: agefat\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"agefat\", package = \"HSAUR2\")\n plot(fat ~ age, data = agefat)\n\n\n\n"} {"package":"HSAUR2","topic":"aspirin","snippet":"### Name: aspirin\n### Title: Aspirin Data\n### Aliases: aspirin\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"aspirin\", package = \"HSAUR2\")\n aspirin\n\n\n\n"} {"package":"HSAUR2","topic":"backpain","snippet":"### Name: backpain\n### Title: Driving and Back Pain Data\n### Aliases: backpain\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"backpain\", package = \"HSAUR2\")\n summary(backpain)\n\n\n\n"} {"package":"HSAUR2","topic":"birthdeathrates","snippet":"### Name: birthdeathrates\n### Title: Birth and Death Rates Data\n### Aliases: birthdeathrates\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"birthdeathrates\", package = \"HSAUR2\")\n plot(birthdeathrates)\n\n\n\n"} {"package":"HSAUR2","topic":"bladdercancer","snippet":"### Name: bladdercancer\n### Title: Bladder Cancer Data\n### Aliases: bladdercancer\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"bladdercancer\", package = \"HSAUR2\")\n mosaicplot(xtabs(~ number + tumorsize, data = bladdercancer))\n\n\n\n"} {"package":"HSAUR2","topic":"clouds","snippet":"### Name: clouds\n### Title: Cloud Seeding Data\n### Aliases: clouds\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"clouds\", package = \"HSAUR2\")\n layout(matrix(1:2, nrow = 2))\n boxplot(rainfall ~ seeding, data = clouds, ylab = \"Rainfall\")\n boxplot(rainfall ~ echomotion, data = clouds, ylab = \"Rainfall\") \n\n\n\n"} {"package":"HSAUR2","topic":"epilepsy","snippet":"### Name: epilepsy\n### Title: Epilepsy Data\n### Aliases: epilepsy\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"epilepsy\", package = \"HSAUR2\")\n library(lattice)\n dotplot(I(seizure.rate / base) ~ period | subject, data = epilepsy, \n subset = treatment == \"Progabide\")\n dotplot(I(seizure.rate / base) ~ period | subject, data = epilepsy, \n subset = treatment == \"Progabide\")\n\n\n\n"} {"package":"HSAUR2","topic":"foster","snippet":"### Name: foster\n### Title: Foster Feeding Experiment\n### Aliases: foster\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"foster\", package = \"HSAUR2\")\n plot.design(foster)\n\n\n\n"} {"package":"HSAUR2","topic":"gardenflowers","snippet":"### Name: gardenflowers\n### Title: Garden Flowers\n### Aliases: gardenflowers\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"gardenflowers\", package = \"HSAUR2\")\n gardenflowers\n\n\n\n"} {"package":"HSAUR2","topic":"heptathlon","snippet":"### Name: heptathlon\n### Title: Olympic Heptathlon Seoul 1988\n### Aliases: heptathlon\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"heptathlon\", package = \"HSAUR2\")\n plot(heptathlon)\n\n\n\n"} {"package":"HSAUR2","topic":"household","snippet":"### Name: household\n### Title: Household Expenditure Data\n### Aliases: household\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"household\", package = \"HSAUR2\")\n\n\n\n"} {"package":"HSAUR2","topic":"mastectomy","snippet":"### Name: mastectomy\n### Title: Survival Times after Mastectomy of Breast Cancer Patients\n### Aliases: mastectomy\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"mastectomy\", package = \"HSAUR2\")\n table(mastectomy$metastasized)\n\n\n"} {"package":"HSAUR2","topic":"men1500m","snippet":"### Name: men1500m\n### Title: Winners of the Olympic Men's 1500m\n### Aliases: men1500m\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"men1500m\", package = \"HSAUR2\")\n op <- par(las = 2)\n plot(time ~ year, data = men1500m, axes = FALSE)\n yrs <- seq(from = 1896, to = 2004, by = 4)\n axis(1, at = yrs, labels = yrs)\n axis(2)\n box()\n par(op)\n\n\n\n"} {"package":"HSAUR2","topic":"meteo","snippet":"### Name: meteo\n### Title: Meteorological Measurements for 11 Years\n### Aliases: meteo\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"meteo\", package = \"HSAUR2\")\n meteo\n\n\n\n"} {"package":"HSAUR2","topic":"orallesions","snippet":"### Name: orallesions\n### Title: Oral Lesions in Rural India\n### Aliases: orallesions\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"orallesions\", package = \"HSAUR2\")\n mosaicplot(orallesions)\n\n\n\n"} {"package":"HSAUR2","topic":"phosphate","snippet":"### Name: phosphate\n### Title: Phosphate Level Data\n### Aliases: phosphate\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"phosphate\", package = \"HSAUR2\")\n plot(t0 ~ group, data = phosphate)\n\n\n\n"} {"package":"HSAUR2","topic":"pistonrings","snippet":"### Name: pistonrings\n### Title: Piston Rings Failures\n### Aliases: pistonrings\n### Keywords: datasets\n\n### ** Examples\n\n \n data(\"pistonrings\", package = \"HSAUR2\")\n mosaicplot(pistonrings)\n\n\n\n"} {"package":"HSAUR2","topic":"planets","snippet":"### Name: planets\n### Title: Exoplanets Data\n### Aliases: planets\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"planets\", package = \"HSAUR2\")\n require(\"scatterplot3d\")\n scatterplot3d(log(planets$mass), log(planets$period), log(planets$eccen), \n type = \"h\", highlight.3d = TRUE, angle = 55, \n scale.y = 0.7, pch = 16)\n\n\n\n"} {"package":"HSAUR2","topic":"plasma","snippet":"### Name: plasma\n### Title: Blood Screening Data\n### Aliases: plasma\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"plasma\", package = \"HSAUR2\")\n layout(matrix(1:2, ncol = 2))\n boxplot(fibrinogen ~ ESR, data = plasma, varwidth = TRUE)\n boxplot(globulin ~ ESR, data = plasma, varwidth = TRUE)\n\n\n\n"} {"package":"HSAUR2","topic":"polyps","snippet":"### Name: polyps\n### Title: Familial Andenomatous Polyposis\n### Aliases: polyps\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"polyps\", package = \"HSAUR2\")\n plot(number ~ age, data = polyps, pch = as.numeric(polyps$treat))\n legend(40, 40, legend = levels(polyps$treat), pch = 1:2, bty = \"n\")\n\n\n\n"} {"package":"HSAUR2","topic":"polyps3","snippet":"### Name: polyps3\n### Title: Familial Andenomatous Polyposis\n### Aliases: polyps3\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"polyps3\", package = \"HSAUR2\")\n plot(number3m ~ age, data = polyps3, pch = as.numeric(polyps3$treatment))\n legend(\"topright\", legend = levels(polyps3$treatment), pch = 1:2, bty = \"n\")\n\n\n\n"} {"package":"HSAUR2","topic":"pottery","snippet":"### Name: pottery\n### Title: Romano-British Pottery Data\n### Aliases: pottery\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"pottery\", package = \"HSAUR2\")\n plot(pottery)\n\n\n\n"} {"package":"HSAUR2","topic":"rearrests","snippet":"### Name: rearrests\n### Title: Rearrests of Juvenile Felons\n### Aliases: rearrests\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"rearrests\", package = \"HSAUR2\")\n rearrests\n\n\n\n"} {"package":"HSAUR2","topic":"respiratory","snippet":"### Name: respiratory\n### Title: Respiratory Illness Data\n### Aliases: respiratory\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"respiratory\", package = \"HSAUR2\")\n mosaicplot(xtabs( ~ treatment + month + status, data = respiratory))\n\n\n\n"} {"package":"HSAUR2","topic":"roomwidth","snippet":"### Name: roomwidth\n### Title: Students Estimates of Lecture Room Width\n### Aliases: roomwidth\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"roomwidth\", package = \"HSAUR2\")\n convert <- ifelse(roomwidth$unit == \"feet\", 1, 3.28)\n boxplot(I(width * convert) ~ unit, data = roomwidth)\n\n\n\n"} {"package":"HSAUR2","topic":"schizophrenia","snippet":"### Name: schizophrenia\n### Title: Age of Onset of Schizophrenia Data\n### Aliases: schizophrenia\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"schizophrenia\", package = \"HSAUR2\")\n boxplot(age ~ gender, data = schizophrenia)\n\n\n\n"} {"package":"HSAUR2","topic":"schizophrenia2","snippet":"### Name: schizophrenia2\n### Title: Schizophrenia Data\n### Aliases: schizophrenia2\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"schizophrenia2\", package = \"HSAUR2\")\n mosaicplot(xtabs( ~ onset + month + disorder, data = schizophrenia2))\n\n\n\n"} {"package":"HSAUR2","topic":"schooldays","snippet":"### Name: schooldays\n### Title: Days not Spent at School\n### Aliases: schooldays\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"schooldays\", package = \"HSAUR2\")\n plot.design(schooldays)\n\n\n\n"} {"package":"HSAUR2","topic":"skulls","snippet":"### Name: skulls\n### Title: Egyptian Skulls\n### Aliases: skulls\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"skulls\", package = \"HSAUR2\")\n means <- tapply(1:nrow(skulls), skulls$epoch, function(i)\n apply(skulls[i,colnames(skulls)[-1]], 2, mean))\n means <- matrix(unlist(means), nrow = length(means), byrow = TRUE)\n colnames(means) <- colnames(skulls)[-1]\n rownames(means) <- levels(skulls$epoch)\n pairs(means,\n panel = function(x, y) {\n text(x, y, levels(skulls$epoch))\n })\n\n\n\n"} {"package":"HSAUR2","topic":"smoking","snippet":"### Name: smoking\n### Title: Nicotine Gum and Smoking Cessation\n### Aliases: smoking\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"smoking\", package = \"HSAUR2\")\n boxplot(smoking$qt/smoking$tt,\n smoking$qc/smoking$tc,\n names = c(\"Treated\", \"Control\"), ylab = \"Percent Quitters\")\n\n\n\n"} {"package":"HSAUR2","topic":"students","snippet":"### Name: students\n### Title: Student Risk Taking\n### Aliases: students\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"students\", package = \"HSAUR2\")\n layout(matrix(1:2, ncol = 2))\n boxplot(low ~ treatment, data = students, ylab = \"low\")\n boxplot(high ~ treatment, data = students, ylab = \"high\")\n\n\n\n"} {"package":"HSAUR2","topic":"suicides","snippet":"### Name: suicides\n### Title: Crowd Baiting Behaviour and Suicides\n### Aliases: suicides\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"suicides\", package = \"HSAUR2\")\n mosaicplot(suicides)\n\n\n\n"} {"package":"HSAUR2","topic":"toenail","snippet":"### Name: toenail\n### Title: Toenail Infection Data\n### Aliases: toenail\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"toenail\", package = \"HSAUR2\")\n\n\n\n\n"} {"package":"HSAUR2","topic":"toothpaste","snippet":"### Name: toothpaste\n### Title: Toothpaste Data\n### Aliases: toothpaste\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"toothpaste\", package = \"HSAUR2\")\n toothpaste\n\n\n\n"} {"package":"HSAUR2","topic":"voting","snippet":"### Name: voting\n### Title: House of Representatives Voting Data\n### Aliases: voting\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"voting\", package = \"HSAUR2\")\n require(\"MASS\")\n voting_mds <- isoMDS(voting)\n plot(voting_mds$points[,1], voting_mds$points[,2],\n type = \"n\", xlab = \"Coordinate 1\", ylab = \"Coordinate 2\",\n xlim = range(voting_mds$points[,1])*1.2)\n text(voting_mds$points[,1], voting_mds$points[,2], \n labels = colnames(voting))\n voting_sh <- Shepard(voting[lower.tri(voting)], voting_mds$points)\n\n\n\n"} {"package":"HSAUR2","topic":"water","snippet":"### Name: water\n### Title: Mortality and Water Hardness\n### Aliases: water\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"water\", package = \"HSAUR2\")\n plot(mortality ~ hardness, data = water, \n col = as.numeric(water$location))\n\n\n\n"} {"package":"HSAUR2","topic":"watervoles","snippet":"### Name: watervoles\n### Title: Water Voles Data\n### Aliases: watervoles\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"watervoles\", package = \"HSAUR2\")\n watervoles\n\n\n\n"} {"package":"HSAUR2","topic":"waves","snippet":"### Name: waves\n### Title: Electricity from Wave Power at Sea\n### Aliases: waves\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"waves\", package = \"HSAUR2\")\n plot(method1 ~ method2, data = waves)\n\n\n\n"} {"package":"HSAUR2","topic":"weightgain","snippet":"### Name: weightgain\n### Title: Gain in Weight of Rats\n### Aliases: weightgain\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"weightgain\", package = \"HSAUR2\")\n interaction.plot(weightgain$type, weightgain$source, \n weightgain$weightgain)\n\n\n\n"} {"package":"HSAUR2","topic":"womensrole","snippet":"### Name: womensrole\n### Title: Womens Role in Society\n### Aliases: womensrole\n### Keywords: datasets\n\n### ** Examples\n\n\n data(\"womensrole\", package = \"HSAUR2\")\n summary(subset(womensrole, gender == \"Female\"))\n summary(subset(womensrole, gender == \"Male\"))\n\n\n\n"} {"package":"webp","topic":"read_webp","snippet":"### Name: read_webp\n### Title: Webp image format\n### Aliases: read_webp webp write_webp\n\n### ** Examples\n\n# Convert to webp\nlibrary(png)\nimg <- readPNG(system.file(\"img\", \"Rlogo.png\", package=\"png\"))\nout <- file.path(tempdir(), \"rlogo.webp\")\nwrite_webp(img, out)\n# browseURL(out)\n\n# Convert from webp\nlibrary(jpeg)\nimg <- read_webp(out)\njpeg <- file.path(tempdir(), \"rlogo.jpeg\")\nwriteJPEG(img, jpeg)\n# browseURL(jpeg)\n\n\n"} {"package":"ALTopt","topic":"ALTopt-package","snippet":"### Name: ALTopt-package\n### Title: Optimal Experimental Designs for Accelerated Life Testing\n### Aliases: ALTopt-package\n\n### ** Examples\n\n## No test: \n# D optimal design of two stress factors with right censoring.\nDesign.D <- altopt.rc(\"D\", 100, 100, 2, 1, formula = ~ x1 + x2 + x1:x2,\ncoef = c(0, -4.086, -1.476, 0.01))\n\ndesign.plot(Design.D$opt.design.rounded, x1, x2)\n\npv.contour.rc(Design.D$opt.design.rounded, x1, x2, 100, 2, 1,\nformula = ~ x1 + x2 + x1:x2, coef = c(0, -4.086, -1.476, 0.01),\nuseCond = c(1.758, 3.159))\n\nFUS.D <- pv.fus.rc(Design.D$opt.design.rounded, 100, 2, 1,\nformula = ~ x1 + x2 + x1:x2, coef = c(0, -4.086, -1.476, 0.01),\nuseLower = c(1.458, 2.859), useUpper = c(2.058, 3.459))\n\n# U optimal design of two stress factors with right censoring.\nDesign.U <- altopt.rc(\"U\", 100, 100, 2, 1, formula = ~ x1 + x2 + x1:x2,\ncoef = c(0, -4.086, -1.476, 0.01), useCond = c(1.758, 3.159))\n\ndesign.plot(Design.U$opt.design.rounded, x1, x2)\n\npv.contour.rc(Design.U$opt.design.rounded, x1, x2, 100, 2, 1,\nformula = ~ x1 + x2 + x1:x2, coef = c(0, -4.086, -1.476, 0.01),\nuseCond = c(1.758, 3.159))\n\nFUS.U <- pv.fus.rc(Design.U$opt.design.rounded, 100, 2, 1,\nformula = ~ x1 + x2 + x1:x2, coef = c(0, -4.086, -1.476, 0.01),\nuseLower = c(1.458, 2.859), useUpper = c(2.058, 3.459))\n\n# Comparing D and U optimal design.\ncompare.fus(FUS.D, FUS.U)\n## End(No test)\n\n\n"} {"package":"ALTopt","topic":"alteval.ic","snippet":"### Name: alteval.ic\n### Title: Design evaluation with interval censoring.\n### Aliases: alteval.ic\n\n### ** Examples\n\n# Evaluation of factorial design for interval censoring.\nx1 <- c(0, 1, 0, 1)\nx2 <- c(0, 0, 1, 1)\nallocation <- c(25, 25, 25, 25)\nfacDes <- data.frame(x1, x2, allocation)\n\nalteval.ic(facDes, \"D\", 30, 5, 2, 1, formula = ~ x1 + x2 + x1:x2,\ncoef = c(0, -4.086, -1.476, 0.01))\n\nalteval.ic(facDes, \"U\", 30, 5, 2, 1, formula = ~ x1 + x2 + x1:x2,\ncoef = c(0, -4.086, -1.476, 0.01), useCond = c(1.758, 3.159))\n\nalteval.ic(facDes, \"I\", 30, 5, 2, 1, formula = ~ x1 + x2 + x1:x2,\ncoef = c(0, -4.086, -1.476, 0.01), useLower = c(1.458, 2.859), useUpper = c(2.058, 3.459))\n\n\n"} {"package":"ALTopt","topic":"alteval.rc","snippet":"### Name: alteval.rc\n### Title: Design evaluation with right censoring.\n### Aliases: alteval.rc\n\n### ** Examples\n\n# Evaluation of factorial design for right censoring.\nx1 <- c(0, 1, 0, 1)\nx2 <- c(0, 0, 1, 1)\nallocation <- c(25, 25, 25, 25)\nfacDes <- data.frame(x1, x2, allocation)\n\nalteval.rc(facDes, \"D\", 100, 2, 1, formula = ~ x1 + x2 + x1:x2,\ncoef = c(0, -4.086, -1.476, 0.01))\n\nalteval.rc(facDes, \"U\", 100, 2, 1, formula = ~ x1 + x2 + x1:x2,\ncoef = c(0, -4.086, -1.476, 0.01), useCond = c(1.758, 3.159))\n\nalteval.rc(facDes, \"I\", 100, 2, 1, formula = ~ x1 + x2 + x1:x2,\ncoef = c(0, -4.086, -1.476, 0.01), useLower = c(1.458, 2.859), useUpper = c(2.058, 3.459))\n\n\n"} {"package":"ALTopt","topic":"altopt.ic","snippet":"### Name: altopt.ic\n### Title: Optimal design with interval censoring.\n### Aliases: altopt.ic\n\n### ** Examples\n\n## Not run: \n##D # Generating D optimal design for interval censoring.\n##D altopt.ic(\"D\", 100, 30, 5, 2, 1, formula = ~ x1 + x2 + x1:x2,\n##D coef = c(0, -4.086, -1.476, 0.01))\n##D \n##D # Generating U optimal design for interval censoring.\n##D altopt.ic(\"D\", 100, 30, 5, 2, 1, formula = ~ x1 + x2 + x1:x2,\n##D coef = c(0, -4.086, -1.476, 0.01), useCond = c(1.758, 3.159))\n##D \n##D # Generating I optimal design for interval censoring.\n##D altopt.ic(\"D\", 100, 30, 5, 2, 1, formula = ~ x1 + x2 + x1:x2,\n##D coef = c(0, -4.086, -1.476, 0.01), useLower = c(1.458, 2.859),\n##D useUpper = c(2.058, 3.459))\n## End(Not run)\n\n\n"} {"package":"ALTopt","topic":"altopt.rc","snippet":"### Name: altopt.rc\n### Title: Optimal design with right censoring.\n### Aliases: altopt.rc\n\n### ** Examples\n\n## Not run: \n##D # Generating D optimal design for right censoring.\n##D altopt.rc(\"D\", 100, 100, 2, 1, formula = ~ x1 + x2 + x1:x2,\n##D coef = c(0, -4.086, -1.476, 0.01))\n##D \n##D # Generating U optimal design for right censoring.\n##D altopt.rc(\"D\", 100, 100, 2, 1, formula = ~ x1 + x2 + x1:x2,\n##D coef = c(0, -4.086, -1.476, 0.01), useCond = c(1.758, 3.159))\n##D \n##D # Generating I optimal design for right censoring.\n##D altopt.rc(\"D\", 100, 100, 2, 1, formula = ~ x1 + x2 + x1:x2,\n##D coef = c(0, -4.086, -1.476, 0.01), useLower = c(1.458, 2.859),\n##D useUpper = c(2.058, 3.459))\n## End(Not run)\n\n\n"} {"package":"ALTopt","topic":"compare.fus","snippet":"### Name: compare.fus\n### Title: Comparing designs using FUS\n### Aliases: compare.fus\n\n### ** Examples\n\n## Not run: \n##D # Generating D optimal design and FUS plot.\n##D Dopt <- altopt.rc(\"D\", 100, 100, 2, 1, formula = ~ x1 + x2 + x1:x2,\n##D coef = c(0, -4.086, -1.476, 0.01))\n##D \n##D FUS.D <- pv.fus.rc(Dopt$opt.design.rounded, 100, 2, 1,\n##D formula = ~ x1 + x2 + x1:x2, coef = c(0, -4.086, -1.476, 0.01),\n##D useLower = c(1.458, 2.859), useUpper = c(2.058, 3.459))\n##D \n##D # Generating U optimal design and FUS plot.\n##D Uopt <- altopt.rc(\"U\", 100, 100, 2, 1, formula = ~ x1 + x2 + x1:x2,\n##D coef = c(0, -4.086, -1.476, 0.01), useCond = c(1.758, 3.159))\n##D \n##D FUS.U <- pv.fus.rc(Uopt$opt.design.rounded, 100, 2, 1,\n##D formula = ~ x1 + x2 + x1:x2, coef = c(0, -4.086, -1.476, 0.01),\n##D useLower = c(1.458, 2.859), useUpper = c(2.058, 3.459))\n##D \n##D # Comparing D and U optimal designs.\n##D compare.fus(FUS.D, FUS.U)\n## End(Not run)\n\n\n"} {"package":"ALTopt","topic":"compare.vdus","snippet":"### Name: compare.vdus\n### Title: Comparing designs using VDUS\n### Aliases: compare.vdus\n\n### ** Examples\n\n## Not run: \n##D # Generating D optimal design and VDUS plot.\n##D Dopt <- altopt.rc(\"D\", 100, 100, 2, 1, formula = ~ x1 + x2 + x1:x2,\n##D coef = c(0, -4.086, -1.476, 0.01))\n##D \n##D VDUS.D <- pv.vdus.rc(Dopt$opt.design.rounded, 100, 2, 1,\n##D formula = ~ x1 + x2 + x1:x2, coef = c(0, -4.086, -1.476, 0.01),\n##D useLower = c(1.458, 2.859), useUpper = c(2.058, 3.459))\n##D \n##D # Generating U optimal design and VDUS plot.\n##D Uopt <- altopt.rc(\"U\", 100, 100, 2, 1, formula = ~ x1 + x2 + x1:x2,\n##D coef = c(0, -4.086, -1.476, 0.01), useCond = c(1.758, 3.159))\n##D \n##D VDUS.U <- pv.vdus.rc(Uopt$opt.design.rounded, 100, 2, 1,\n##D formula = ~ x1 + x2 + x1:x2, coef = c(0, -4.086, -1.476, 0.01),\n##D useLower = c(1.458, 2.859), useUpper = c(2.058, 3.459))\n##D \n##D # Comparing D and U optimal designs.\n##D compare.vdus(VDUS.D, VDUS.U)\n## End(Not run)\n\n\n"} {"package":"ALTopt","topic":"convert.stress.level","snippet":"### Name: convert.stress.level\n### Title: Coding and decoding stress level\n### Aliases: convert.stress.level\n\n### ** Examples\n\n## Not run: \n##D # Generating D optimal design in coded unit.\n##D Design <- altopt.rc(optType = \"D\", N = 100, tc = 100, nf = 2, alpha = 1,\n##D formula = ~x1 + x2 + x1:x2, coef = c(0, -4.086, -1.476, 0.01))\n##D \n##D # Transform the coded unit to actual stress variable's level.\n##D convert.stress.level(lowStLv = c(34.834, 4.094), highStLv = c(30.288, 4.5),\n##D stand = Design$opt.design.rounded)\n##D \n##D # Transform the actual stress level to coded units.\n##D use <- c(38.281, 3.219)\n##D convert.stress.level(lowStLv = c(34.834, 4.094), highStLv = c(30.288, 4.5),\n##D actual = use)\n##D \n## End(Not run)\n\n\n"} {"package":"ALTopt","topic":"design.plot","snippet":"### Name: design.plot\n### Title: Design plot.\n### Aliases: design.plot\n\n### ** Examples\n\n## Not run: \n##D # Design plot of D optimal design with right censoring.\n##D Design1 <- altopt.rc(\"D\", 100, 100, 2, 1, formula = ~ x1 + x2 + x1:x2,\n##D coef = c(0, -4.086, -1.476, 0.01))\n##D \n##D design.plot(Design1$opt.design.rounded, x1, x2)\n## End(Not run)\n\n\n"} {"package":"ALTopt","topic":"pv.contour.ic","snippet":"### Name: pv.contour.ic\n### Title: Contour plot of prediction variance for a design with interval\n### censoring.\n### Aliases: pv.contour.ic\n\n### ** Examples\n\n## Not run: \n##D # Contour plot of prediction variance of U optimal design with interval censoring.\n##D Design <- altopt.ic(\"D\", 100, 30, 5, 2, 1, formula = ~ x1 + x2 + x1:x2,\n##D coef = c(0, -4.086, -1.476, 0.01), useCond = c(1.758, 3.159))\n##D \n##D pv.contour.ic(Design$opt.design.rounded, x1, x2, 30, 5, 2, 1,\n##D formula = ~ x1 + x2 + x1:x2, coef = c(0, -4.086, -1.476, 0.01), useCond = c(1.758, 3.159))\n## End(Not run)\n\n\n"} {"package":"ALTopt","topic":"pv.contour.rc","snippet":"### Name: pv.contour.rc\n### Title: Contour plot of prediction variance for a design with right\n### censoring.\n### Aliases: pv.contour.rc\n\n### ** Examples\n\n## Not run: \n##D # Contour plot of prediction variance of U optimal design with right censoring.\n##D Design <- altopt.rc(\"D\", 100, 100, 2, 1, formula = ~ x1 + x2 + x1:x2,\n##D coef = c(0, -4.086, -1.476, 0.01), useCond = c(1.758, 3.159))\n##D \n##D pv.contour.rc(Design$opt.design.rounded, x1, x2, 100, 2, 1,\n##D formula = ~ x1 + x2 + x1:x2, coef = c(0, -4.086, -1.476, 0.01), useCond = c(1.758, 3.159))\n## End(Not run)\n\n\n"} {"package":"ALTopt","topic":"pv.fus.ic","snippet":"### Name: pv.fus.ic\n### Title: FUS (Fraction of Use Space) plot for interval censoring.\n### Aliases: pv.fus.ic\n\n### ** Examples\n\n## Not run: \n##D # FUS plot of I optimal design with interval censoring.\n##D Design <- altopt.ic(\"I\", 100, 30, 5, 2, 1, formula = ~ x1 + x2 + x1:x2,\n##D coef = c(0, -4.086, -1.476, 0.01), useLower = c(1.458, 2.859), useUpper = c(2.058, 3.459))\n##D \n##D pv.fus.ic(Design$opt.design.rounded, 30, 5, 2, 1,\n##D formula = ~ x1 + x2 + x1:x2, coef = c(0, -4.086, -1.476, 0.01),\n##D useLower = c(1.458, 2.859), useUpper = c(2.058, 3.459))\n## End(Not run)\n\n\n"} {"package":"ALTopt","topic":"pv.fus.rc","snippet":"### Name: pv.fus.rc\n### Title: FUS (Fraction of Use Space) plot for right censoring.\n### Aliases: pv.fus.rc\n\n### ** Examples\n\n## Not run: \n##D # FUS plot of I optimal design with right censoring.\n##D Design <- altopt.rc(\"I\", 100, 100, 2, 1, formula = ~ x1 + x2 + x1:x2,\n##D coef = c(0, -4.086, -1.476, 0.01), useLower = c(1.458, 2.859), useUpper = c(2.058, 3.459))\n##D \n##D pv.fus.rc(Design$opt.design.rounded, 100, 2, 1,\n##D formula = ~ x1 + x2 + x1:x2, coef = c(0, -4.086, -1.476, 0.01),\n##D useLower = c(1.458, 2.859), useUpper = c(2.058, 3.459))\n## End(Not run)\n\n\n"} {"package":"ALTopt","topic":"pv.vdus.ic","snippet":"### Name: pv.vdus.ic\n### Title: VDUS (Variance Dispersion of Use Space) plot for interval\n### censoring.\n### Aliases: pv.vdus.ic\n\n### ** Examples\n\n## Not run: \n##D # VDUS plot of I optimal design with interval censoring.\n##D Design <- altopt.ic(\"I\", 100, 30, 5, 2, 1, formula = ~ x1 + x2 + x1:x2,\n##D coef = c(0, -4.086, -1.476, 0.01), useLower = c(1.458, 2.859), useUpper = c(2.058, 3.459))\n##D \n##D pv.vdus.ic(Design$opt.design.rounded, 30, 5, 2, 1,\n##D formula = ~ x1 + x2 + x1:x2, coef = c(0, -4.086, -1.476, 0.01),\n##D useLower = c(1.458, 2.859), useUpper = c(2.058, 3.459))\n## End(Not run)\n\n\n"} {"package":"ALTopt","topic":"pv.vdus.rc","snippet":"### Name: pv.vdus.rc\n### Title: VDUS (Variance Dispersion of Use Space) plot for right\n### censoring.\n### Aliases: pv.vdus.rc\n\n### ** Examples\n\n## Not run: \n##D # VDUS plot of I optimal design with right censoring.\n##D Design <- altopt.rc(\"I\", 100, 100, 2, 1, formula = ~ x1 + x2 + x1:x2,\n##D coef = c(0, -4.086, -1.476, 0.01), useLower = c(1.458, 2.859), useUpper = c(2.058, 3.459))\n##D \n##D pv.vdus.rc(Design$opt.design.rounded, 100, 2, 1,\n##D formula = ~ x1 + x2 + x1:x2, coef = c(0, -4.086, -1.476, 0.01),\n##D useLower = c(1.458, 2.859), useUpper = c(2.058, 3.459))\n## End(Not run)\n\n\n"} {"package":"CBCgrps","topic":"CBCgrps","snippet":"### Name: CBCgrps2.8-package\n### Title: Compare Baseline Characteristics Between Groups\n### Aliases: CBCgrps\n### Keywords: bivariate analysis; statisitcal description\n\n### ** Examples\n\ndata(df)\t\na<-twogrps(df,\"mort\")\n\n\n"} {"package":"CBCgrps","topic":"df","snippet":"### Name: df\n### Title: simulated dataset as a working example\n### Aliases: df\n### Keywords: datasets\n\n### ** Examples\n\ndata(df)\n## maybe str(df) ; plot(df) ...\n\n\n"} {"package":"CBCgrps","topic":"dt","snippet":"### Name: dt\n### Title: simulated dataset as a working example\n### Aliases: dt\n### Keywords: datasets\n\n### ** Examples\n\ndata(dt)\n## maybe str(df) ; plot(df) ...\n\n\n"} {"package":"CBCgrps","topic":"multigrps","snippet":"### Name: multigrps\n### Title: Compare Baseline Characteristics between three or more groups\n### Aliases: multigrps\n### Keywords: baseline compare\n\n### ** Examples\n\ndata(df)\t\nb<-multigrps(df,\"comorbid\")\nprint(b,quote=TRUE)\n\n\n"} {"package":"CBCgrps","topic":"twogrps","snippet":"### Name: twogrps\n### Title: Compare Baseline Characteristics between two groups\n### Aliases: twogrps\n### Keywords: Compare baseline\n\n### ** Examples\n\ndata(df)\t\na<-twogrps(df,\"mort\")\nprint(a,quote = TRUE)\n# define skewed variables manually\nprint(twogrps(df,\"mort\",skewvar=c(\"crp\",\"wbc\")),\n quote = TRUE)\n\n\n"} {"package":"funmediation","topic":"simulate_funmediation_example","snippet":"### Name: simulate_funmediation_example\n### Title: simulate_funmediation_example function\n### Aliases: simulate_funmediation_example\n\n### ** Examples\n\nset.seed(123)\n# Simplest way to call the function:\nsimulation_all_defaults <- simulate_funmediation_example()\nsummary(simulation_all_defaults)\nhead(simulation_all_defaults)\n# Changing the sample size to be larger:\nsimulation_larger <- simulate_funmediation_example(nsub=10000)\nsummary(simulation_larger)\n# Changing the effect of the mediator to be null:\nsimulation_null <- simulate_funmediation_example(beta_M=function(t) {return(0*t)})\nsummary(simulation_null)\n# Simulating a exposure variable with three levels (two dichotomous dummy codes)\nsimulation_three_group <- simulate_funmediation_example(nlevels=3,\n alpha_X = list(function(t) {return(.1*t)},\n function(t) {return(-(t/2)^.5)}),\n beta_X = c(-.2,.2))\nprint(summary(simulation_three_group));\n\n\n\n"} {"package":"GSED","topic":"boundaries_sim","snippet":"### Name: boundaries_sim\n### Title: Lower and upper boundaries for GSED\n### Aliases: boundaries_sim\n\n### ** Examples\n\n#For testing purpose only, larger number of simulations required (see in comments below)\nboundaries_sim(K_stages=2, N_subsets=3, f=c(0.6,0.2,0.2), ratio_Delta_star_d1=c(1), \nordering=FALSE, seed=42, n_trials=3, alpha_spending=c(0,0.0125,0.025), \none_minus_alpha_spending=c(0,0.4875,0.975))\n\n#boundaries_sim(K_stages=2, N_subsets=3, f=c(0.6,0.2,0.2), ratio_Delta_star_d1=c(1), \n#ordering=FALSE, seed=42, n_trials=10000000, alpha_spending=c(0,0.0125,0.025), \n#one_minus_alpha_spending=c(0,0.4875,0.975))\n\n\n"} {"package":"GSED","topic":"magnusson_turnbull","snippet":"### Name: magnusson_turnbull\n### Title: Application of GSED on data\n### Aliases: magnusson_turnbull\n\n### ** Examples\n\nmagnusson_turnbull(stage_cur=0, keep=NA, N_subsets=3, Y=c(-10.71,12.84,19.06), \nI=c(480,144,176), l=c(0.7962,2.5204), u=c(2.7625,2.5204), ordering=FALSE)\n\nmagnusson_turnbull(stage_cur=2, keep=c(2,3), N_subsets=3, Y=135.57, \nI=1120, l=c(0.7962,2.5204), u=c(2.7625,2.5204), ordering=FALSE)\n\n\n"} {"package":"GSED","topic":"max_FI","snippet":"### Name: max_FI\n### Title: Maximum Fisher information\n### Aliases: max_FI\n\n### ** Examples\n\ntheta_assumption = list(matrix(c(0.4,0.6,0.4,0.6,0.4,0.6),nrow=2,ncol=3))\n\n#For testing purpose only, larger number of simulations required (see in comments below)\nmax_FI(K_stages=2, N_subsets=3, f=c(0.6,0.2,0.2), ratio_Delta_star_d1=c(1), l=c(0.7962, 2.5204), \nu=c(2.7625, 2.5204), type_outcome=\"binary\", param_theta=theta_assumption, pow=0.9, \nordering=FALSE, increasing_theta=FALSE, seed=140691, n_trials=3, rule=1)\n \n#max_FI(K_stages=2, N_subsets=3, f=c(0.6,0.2,0.2), ratio_Delta_star_d1=c(1), l=c(0.7962, 2.5204), \n#u=c(2.7625, 2.5204), type_outcome=\"binary\", param_theta=theta_assumption, pow=0.9, \n#ordering=FALSE, increasing_theta=FALSE, seed=140691, n_trials=10000000, rule=1)\n\n#max_FI(K_stages=2, N_subsets=3, f=c(0.6,0.2,0.2), ratio_Delta_star_d1=c(1), l=c(0.7962, 2.5204), \n#u=c(2.7625, 2.5204), type_outcome=\"binary\", param_theta=theta_assumption, pow=0.9,\n#ordering=FALSE, increasing_theta=FALSE, seed=140691, n_trials=10000000, rule=2)\n\n\n"} {"package":"GSED","topic":"sim_magnusson_turnbull","snippet":"### Name: sim_magnusson_turnbull\n### Title: Simulations of trials with GSED\n### Aliases: sim_magnusson_turnbull\n\n### ** Examples\n\n#For testing purpose only, larger number of simulations required (see in comments below)\nsim_magnusson_turnbull(K_stages=2, N_subsets=3, f=c(0.6,0.2,0.2), l=c(0.7962, 2.5204), \nu=c(2.7625, 2.5204), ratio_Delta_star_d1=c(1), type_outcome=\"binary\", param_outcome=\nlist(matrix(c(0.4,0.4,0.4,0.6,0.6,0.6),nrow=2,ncol=3,byrow=TRUE)), n_max=1496, \nordering=FALSE, nsim=2, seed=42)\n\n#sim_magnusson_turnbull(K_stages=2, N_subsets=3, f=c(0.6,0.2,0.2), l=c(0.7962, 2.5204), \n#u=c(2.7625, 2.5204), ratio_Delta_star_d1=c(1), type_outcome=\"binary\", param_outcome=\n#list(matrix(c(0.4,0.4,0.4,0.6,0.6,0.6),nrow=2,ncol=3,byrow=TRUE)), n_max=1496, \n#ordering=FALSE, nsim=1000, seed=42)\n\n#sim_magnusson_turnbull(K_stages=2, N_subsets=3, f=c(0.6,0.2,0.2), l=c(0.7962, 2.5204), \n#u=c(2.7625, 2.5204), ratio_Delta_star_d1=c(1), type_outcome=\"binary\", param_outcome=\n#list(matrix(c(0.5,0.5,0.5,0.5,0.5,0.5),nrow=2,ncol=3,byrow=TRUE)), n_max=1496, \n#ordering=FALSE, nsim=1000, seed=42)\n\n#sim_magnusson_turnbull(K_stages=2, N_subsets=4, f=c(0.25,0.25,0.25,0.25), l=c(0.98,2.35), \n#u=c(2.59,2.35), ratio_Delta_star_d1=c(1), type_outcome=\"survival\", incl_rate=1/28, \n#mean_cur_c=7/log(2), HR=c(0.8,0.8,0.8,0.8), nb_required=1030, ordering=TRUE, \n#increasing_theta=FALSE, nsim=1000, seed=42)\n\n\n"} {"package":"GSED","topic":"stage_1_evaluation","snippet":"### Name: stage_1_evaluation\n### Title: Stage 1-evaluation step of GSED\n### Aliases: stage_1_evaluation\n\n### ** Examples\n\nstage_1_evaluation(keep=c(2,3), Z_1j=c(-0.49,1.07,1.44), f=c(0.6,0.2,0.2), u=c(2.7625,2.5204))\n\n\n"} {"package":"GSED","topic":"stage_1_selection","snippet":"### Name: stage_1_selection\n### Title: Stage 1-selection step of GSED\n### Aliases: stage_1_selection\n\n### ** Examples\n\nstage_1_selection(N_subsets=3, Z_1j=c(-0.49,1.07,1.44), l=c(0.7962,2.5204), ordering=FALSE)\n\n\n"} {"package":"LCFdata","topic":"eeg","snippet":"### Name: eeg\n### Title: ERP amplitudes at electrodes Fz, Cz, Pz, and Oz from 0 to 300\n### milliseconds.\n### Aliases: eeg\n### Keywords: datasets\n\n### ** Examples\n\ndata(erpFz)\nsummary(erpFz)\n\n\n"} {"package":"LCFdata","topic":"erpFz","snippet":"### Name: erpFz\n### Title: ERP amplitudes at electrode Fz restricted to the 100 to 175\n### millisecond time window.\n### Aliases: erpFz\n### Keywords: datasets\n\n### ** Examples\n\ndata(erpFz)\nsummary(erpFz)\n\n\n"} {"package":"LCFdata","topic":"z","snippet":"### Name: z\n### Title: Plotting data generated from a linear mixed-effects model from\n### Tremblay & Newman (In Preparation).\n### Aliases: z\n### Keywords: datasets\n\n### ** Examples\n\ndata(z)\nstr(z)\n\n\n"} {"package":"fpp","topic":"a10","snippet":"### Name: a10\n### Title: Monthly anti-diabetic drug sales in Australia from 1992 to 2008.\n### Aliases: a10\n### Keywords: datasets\n\n### ** Examples\n\nplot(a10)\nseasonplot(a10)\n\n\n"} {"package":"fpp","topic":"ausair","snippet":"### Name: ausair\n### Title: Air Transport Passengers Australia\n### Aliases: ausair\n### Keywords: datasets\n\n### ** Examples\n\nplot(ausair)\n\n\n"} {"package":"fpp","topic":"ausbeer","snippet":"### Name: ausbeer\n### Title: Quarterly Australian Beer production\n### Aliases: ausbeer\n### Keywords: datasets\n\n### ** Examples\n\ndata(ausbeer)\nseasonplot(ausbeer)\n\n\n"} {"package":"fpp","topic":"austa","snippet":"### Name: austa\n### Title: International vistors to Australia\n### Aliases: austa\n### Keywords: datasets\n\n### ** Examples\n\nplot(austa)\n\n\n"} {"package":"fpp","topic":"austourists","snippet":"### Name: austourists\n### Title: International Tourists to Australia: Total visitor nights.\n### Aliases: austourists\n### Keywords: datasets\n\n### ** Examples\n\nplot(austourists)\n\n\n"} {"package":"fpp","topic":"cafe","snippet":"### Name: cafe\n### Title: Quarterly expenditure on eating out in Australia\n### Aliases: cafe\n### Keywords: datasets\n\n### ** Examples\n\nplot(cafe)\n\n\n"} {"package":"fpp","topic":"credit","snippet":"### Name: credit\n### Title: Credit ratings on personal loans from an Australian bank.\n### Aliases: credit\n### Keywords: datasets\n\n### ** Examples\n\ndata(credit)\n\n\n"} {"package":"fpp","topic":"debitcards","snippet":"### Name: debitcards\n### Title: Retail debit card usage in Iceland.\n### Aliases: debitcards\n### Keywords: datasets\n\n### ** Examples\n\nplot(debitcards)\n\n\n"} {"package":"fpp","topic":"departures","snippet":"### Name: departures\n### Title: Total monthly departures from Australia\n### Aliases: departures\n### Keywords: datasets\n\n### ** Examples\n\nplot(departures)\n\n\n"} {"package":"fpp","topic":"elecequip","snippet":"### Name: elecequip\n### Title: Electrical equipment manufactured in the Euro area.\n### Aliases: elecequip\n### Keywords: datasets\n\n### ** Examples\n\nplot(elecequip)\n\n\n"} {"package":"fpp","topic":"elecsales","snippet":"### Name: elecsales\n### Title: Electricity sales to residential customers in South Australia.\n### Aliases: elecsales\n### Keywords: datasets\n\n### ** Examples\n\nplot(elecsales)\n\n\n"} {"package":"fpp","topic":"euretail","snippet":"### Name: euretail\n### Title: Quarterly retail trade: Euro area.\n### Aliases: euretail\n### Keywords: datasets\n\n### ** Examples\n\nplot(euretail)\n\n\n"} {"package":"fpp","topic":"fuel","snippet":"### Name: fuel\n### Title: Fuel economy data on 2009 vehicles in the US.\n### Aliases: fuel\n### Keywords: datasets\n\n### ** Examples\n\ndata(fuel)\n\n\n"} {"package":"fpp","topic":"guinearice","snippet":"### Name: guinearice\n### Title: Rice production (Guinea)\n### Aliases: guinearice\n### Keywords: datasets\n\n### ** Examples\n\nplot(guinearice)\n\n\n"} {"package":"fpp","topic":"h02","snippet":"### Name: h02\n### Title: Monthly cortecosteroid drug sales in Australia from 1992 to\n### 2008.\n### Aliases: h02\n### Keywords: datasets\n\n### ** Examples\n\nplot(h02)\nseasonplot(h02)\n\n\n"} {"package":"fpp","topic":"insurance","snippet":"### Name: insurance\n### Title: Insurance quotations and advertising expenditure.\n### Aliases: insurance\n### Keywords: datasets\n\n### ** Examples\n\nplot(insurance)\n\n\n"} {"package":"fpp","topic":"livestock","snippet":"### Name: livestock\n### Title: Livestock (sheep) in Asia, 1961-2007.\n### Aliases: livestock\n### Keywords: datasets\n\n### ** Examples\n\nplot(livestock)\n\n\n"} {"package":"fpp","topic":"melsyd","snippet":"### Name: melsyd\n### Title: Total weekly air passenger numbers on Ansett airline flights\n### between Melbourne and Sydney, 1987-1992.\n### Aliases: melsyd\n### Keywords: datasets\n\n### ** Examples\n\nplot(melsyd)\n\n\n"} {"package":"fpp","topic":"oil","snippet":"### Name: oil\n### Title: Annual oil production in Saudi Arabia\n### Aliases: oil\n### Keywords: datasets\n\n### ** Examples\n\nplot(oil)\n\n\n"} {"package":"fpp","topic":"sunspotarea","snippet":"### Name: sunspotarea\n### Title: Annual average sunspot area (1875-2011)\n### Aliases: sunspotarea\n### Keywords: datasets\n\n### ** Examples\n\nplot(sunspotarea)\n\n\n"} {"package":"fpp","topic":"usconsumption","snippet":"### Name: usconsumption\n### Title: Growth rates of personal consumption and personal income in the\n### USA.\n### Aliases: usconsumption\n### Keywords: datasets\n\n### ** Examples\n\nplot(usconsumption)\n\n\n"} {"package":"fpp","topic":"usmelec","snippet":"### Name: usmelec\n### Title: Electricity monthly total net generation. January 1973-October\n### 2010.\n### Aliases: usmelec\n### Keywords: datasets\n\n### ** Examples\n\nplot(usmelec)\n\n\n"} {"package":"fpp","topic":"vn","snippet":"### Name: vn\n### Title: Quarterly visitor nights for various regions of Australia.\n### Aliases: vn\n### Keywords: datasets\n\n### ** Examples\n\nplot(vn)\n\n\n"} {"package":"fpp","topic":"wmurders","snippet":"### Name: wmurders\n### Title: Monthly female murder rate (per 100,000 standard population) in\n### the USA.\n### Aliases: wmurders\n### Keywords: datasets\n\n### ** Examples\n\nplot(wmurders)\n\n\n"} {"package":"disordR","topic":"Arith","snippet":"### Name: Arith\n### Title: Arithmetic operations\n### Aliases: Arith disord_inverse disord_negative disord_positive\n### disord_mod_disord disord_mod_numeric numeric_mod_disord\n### disord_plus_disord disord_plus_numeric disord_power_disord\n### disord_power_numeric numeric_power_disord disord_prod_disord\n### disord_prod_numeric disord_arith_disord disord_arith_numeric\n### disord_arith_unary disord_show disindex_show disord_unary\n### numeric_arith_disord\n\n### ** Examples\n\n\na <- rdis()\na\na + 2*a\na > 5\na[a > 5] <- a[a > 5] + 100\na\n\n\n"} {"package":"disordR","topic":"Compare-methods","snippet":"### Name: Compare-methods\n### Title: Methods for comparison of 'disord' objects\n### Aliases: Compare-methods Compare,disord,ANY-method\n### Compare,disord,disord-method Compare,ANY,disord-method\n### any_compare_disord disord_compare_any disord_compare_disord\n### Keywords: methods math\n\n### ** Examples\n\nrdis() > 4\nrdis() > 1000\n\n\n"} {"package":"disordR","topic":"consistent","snippet":"### Name: consistent\n### Title: Check for consistency\n### Aliases: consistent is.consistent check_matching_hash %~%\n### Keywords: symbolmath\n\n### ** Examples\n\n\n# rdis() + rdis() # this would make check_matching_hash() report an error, if executed\n\n\n\n"} {"package":"disordR","topic":"disindex-class","snippet":"### Name: disindex-class\n### Title: Experimental class '\"disindex\"'\n### Aliases: disindex-class disindex values\n### Keywords: classes\n\n### ** Examples\n\n\n\n(x <- disord(c(1,2,1,2,2,7)))\n\nx==2\nw <- which(x==2)\nw\n\nx[w] <- 100\nx\n\n\n\n\n\n\n\n"} {"package":"disordR","topic":"disord-class","snippet":"### Name: disord-class\n### Title: Class '\"disord\"'\n### Aliases: disord-class\n### Keywords: classes\n\n### ** Examples\nshowClass(\"disord\")\n\n"} {"package":"disordR","topic":"disord","snippet":"### Name: disord\n### Title: Functionality for 'disord' objects\n### Aliases: disord hash hashcal as_disord is.disord accessors disord\n### elements disord<-\n### Keywords: symbolmath\n\n### ** Examples\n\n\n(a <- rdis())\n(b <- rdis())\n\na + 2*a + 2^a # fine\n# a + b # this would give an error if executed\n\na[a<0.5] <- 0 # round down; replacement works as expected\n\nelements(a)\n\n\n\n"} {"package":"disordR","topic":"drop","snippet":"### Name: drop\n### Title: Drop redundant information\n### Aliases: drop drop,disord-method allsame\n\n### ** Examples\n\ndisord(c(3,3,3,3,3)) # default is drop=TRUE\ndisord(c(3,3,3,3,3),drop=FALSE) # retains disord class\n\ndrop(disord(c(3,3,3,3),drop=FALSE)) \n\n## In extraction, argument drop discards disorderliness when possible:\na <- rdis()\na\na[] <- 6 # a becomes a vector\na\n\n\n\n"} {"package":"disordR","topic":"extract","snippet":"### Name: extract\n### Title: Extraction and replacement methods for class '\"disord\"'\n### Aliases: extract index-class [ [[ [.disord [,disord-method\n### [,disord,index,ANY-method [,disord,index,missing-method\n### [,disord,ANY,ANY-method [,disord,index,index-method\n### [,disord,missing,index-method [,disord,disord,missing-method\n### [,disord,disord,missing,ANY-method [,disord,missing,missing-method\n### [,disord,index,ANY,ANY-method [,disord,index,missing,ANY-method\n### [,ANY,disord,ANY-method [,disord,missing,missing,ANY-method [<-\n### [<-.disord [<-,disord-method [<-,disord,ANY,ANY-method\n### [<-,disord,index,ANY,ANY-method [<-,disord,index,missing,ANY-method\n### [<-,disord,index,missing,numeric-method\n### [<-,disord,index,missing,disord-method\n### [<-,disord,missing,missing,numeric-method\n### [<-,disord,missing,missing,disord-method\n### [<-,disord,index,index-method [<-,disord,index,missing-method\n### [<-,disord,missing,index-method [<-,disord,missing,missing-method\n### [<-,disord,disord,missing,ANY-method\n### [<-,disord,disord,missing,disord-method\n### [<-,disord,disord,missing-method\n### [<-,disord,missing,missing,ANY-method [[,disord,index-method\n### [[<-,disord,index-method Arith,ANY,disord-method\n### Arith,disord,ANY-method Arith,disord,disord-method\n### Arith,disord,missing-method [,disord,disindex,missing,ANY-method\n### [,disord,disindex,ANY,ANY-method [,ANY,disindex,ANY,ANY-method\n### [,disord,disindex,missing,ANY-method\n### [<-,disord,disindex,ANY,ANY-method\n### [<-,disord,disindex,missing,ANY-method [[,disord,disindex-method\n### [[,ANY,disindex-method [[<-,disord,disindex,missing,ANY-method\n### [[<-,ANY,disindex,ANY,ANY-method [[<-,ANY,disindex,ANY-method\n### [[<-,disord,disindex,missing-method [[<-,disord,disindex,ANY-method\n### [[<-,disord,index,ANY-method\n\n### ** Examples\n\na <- disord(sample(9))\na\na + 6*a^2\na[a>5] # \"give me all elements of a that exceed 5\"\n\na[] # a disord object, same elements as 'a', but with a different hash\n\na[a<5] <- a[a<5] + 100 # \"replace all elements of 'a' less than 5 with their value plus 100\"\na\n\n## Following expressions would return an error if executed:\nif(FALSE){\n a[1]\n a[1] <- 44\n a[1:2] <- a[3:4]\n}\n\nb <- disord(sample(9))\n## Following expressions would also return an error if executed:\nif(FALSE){\n a+b # (not really an example of extraction)\n a[b>5]\n a[b>5] <- 100\n a[b>5] <- a[b>5] + 44\n}\n\n\n\n"} {"package":"disordR","topic":"Logic","snippet":"### Name: Logic\n### Title: Logical operations\n### Aliases: Logic disord_logic disord_logic_disord disord_logic_any\n### any_logic_disord disord_logic_unary disord_logic_missing\n### disord_logical_negate\n\n### ** Examples\n\n\na <- disord(1:7)\nl <- a>3\nsum(l)\nany(l)\nall(l | !l)\n\n\n\n"} {"package":"disordR","topic":"misc","snippet":"### Name: misc\n### Title: Miscellaneous functions\n### Aliases: misc length length.disord length.disindex length<-\n### length<-.disord length,disord-method length,disindex-method\n### length<-,disord-method rev rev,disord-method rev.disord sort\n### sort,disord-method sort.disord sapply sapply,disord-method\n### sapply.disord lapply lapply,disord-method lapply.disord is.na\n### is.na.disord is.na<- is.na<-.disord is.na,disord-method\n### is.na<-,disord-method match match,disord-method\n### match,disord,ANY-method match,ANY,disord-method\n### match,ANY,disord-method match,disord,disord-method %in%\n### %in%,disord-method %in%,disord,ANY-method %in%,ANY,disord-method\n### %in%,ANY,disord-method %in%,disord,disord-method !,disord-method\n### as.logical,disord-method as.numeric,disord-method\n### as.double,disord-method as.list,disord-method\n### as.character,disord-method as.complex,disord-method\n### unlist,disord-method which,disord-method which,disindex-method unlist\n### diff\n\n### ** Examples\n\na <- disord(c(a=1,b=2,c=7))\na\nnames(a)\nlength(a)\nsqrt(a)\n\n\n# powers() and vars() in the mvp package return lists; see the vignette\n# for more discussion.\n\nl <- disord(list(3,6:9,1:10)) \nsapply(l,length)\n\nunlist(l)\n\n## Quick illustration of rev():\n\nrevstring <- function(s){paste(rev(unlist(strsplit(s, NULL))),collapse=\"\")}\nx <- rdis()\nrevstring(hash(x)) == hash(rev(x))\n\n\n\n"} {"package":"disordR","topic":"rdis","snippet":"### Name: rdis\n### Title: Random disord objects\n### Aliases: rdis rdisord rdisordR\n\n### ** Examples\n\nrdis()\nrdis(99)\nrdis(letters)\n\n\n"} {"package":"disordR","topic":"summary.disordR","snippet":"### Name: summary.disordR\n### Title: Summaries of disord objects\n### Aliases: summary.disordR summary.disord print.summary.disord\n### summary,disord-method summary,disindex-method\n\n### ** Examples\n\nsummary(rdis(1000))\n\n\n"} {"package":"disordR","topic":"disindex","snippet":"### Name: disindex-class\n### Title: Experimental class '\"disindex\"'\n### Aliases: disindex-class disindex values\n### Keywords: classes\n\n### ** Examples\n\n\n\n(x <- disord(c(1,2,1,2,2,7)))\n\nx==2\nw <- which(x==2)\nw\n\nx[w] <- 100\nx\n\n\n\n\n\n\n\n"} {"package":"ROI.plugin.quadprog","topic":"ROI.plugin.quadprog_Example_1","snippet":"### Name: Example-1\n### Title: Quadratic Problem 1\n### Aliases: ROI.plugin.quadprog_Example_1\n\n### ** Examples\n\n\nrequire(\"ROI\")\nA <- cbind(c(-4, -3, 0), \n c( 2, 1, 0), \n c( 0, -2, 1))\nx <- OP(Q_objective(diag(3), L = c(0, -5, 0)),\n L_constraint(L = t(A),\n dir = rep(\">=\", 3),\n rhs = c(-8, 2, 0)))\n\nopt <- ROI_solve(x, solver=\"quadprog\")\nopt\n## Optimal solution found.\n## The objective value is: -2.380952e+00\nsolution(opt)\n## [1] 0.4761905 1.0476190 2.0952381\n\n\n\n"} {"package":"RcmdrPlugin.TeachStat","topic":"Agrupadas","snippet":"### Name: Agrupadas\n### Title: Grouped or tabulated data set\n### Aliases: Agrupadas\n### Keywords: datasets\n\n### ** Examples\n\ndata(Agrupadas)\ncalcularResumenDatosTabulados(l_inf=Agrupadas$Linf, l_sup=Agrupadas$Lsup,\n ni=Agrupadas$ni, statistics =c(\"mean\", \"sd\", \"IQR\", \"quantiles\"), quantiles\n = c(0,0.25,0.5,0.75,1), tablaFrecuencia=FALSE)\n\n\n\n"} {"package":"RcmdrPlugin.TeachStat","topic":"ComplexIN","snippet":"### Name: ComplexIN\n### Title: Complex index numbers\n### Aliases: ComplexIN\n\n### ** Examples\n\n\ndf <- data.frame(Index=round(runif(12,80,105),2))\nComplexIN(df, means = c(\"arithmetic\", \"geometric\", \"harmonic\"))\n\n\n"} {"package":"RcmdrPlugin.TeachStat","topic":"Cprop.test","snippet":"### Name: Cprop.test\n### Title: Test for proportions of one or two samples\n### Aliases: Cprop.test\n\n### ** Examples\n\n## Proportion for a sample\nCprop.test(1,6) # 1 success in 6 attempts\n\n#### With a data set: proportion of cars not manufactured in US\ndata(cars93) #data set provided with the package\nexitos<-sum(cars93$USA == \"nonUS\")\ntotal<-length(cars93$USA)\nCprop.test(ex=exitos, nx=total)\n\n## Difference of proportions\nCprop.test(1,6,3,15)\n # Sample 1: 1 success in 6 attempts\n # Sample 2: 3 success in 15 attempts\n\n#### With a data set: difference of proportions of cars not manufactured in US \n#### between manual and automatic\nexitosx<-sum(cars93$USA == \"nonUS\" & cars93$Manual == \"Yes\" )\ntotalx<-sum(cars93$Manual == \"Yes\")\nexitosy<-sum(cars93$USA == \"nonUS\" & cars93$Manual == \"No\" )\ntotaly<-sum(cars93$Manual == \"No\")\nCprop.test(ex=exitosx, nx=totalx,ey=exitosy, ny=totaly)\n\n\n"} {"package":"RcmdrPlugin.TeachStat","topic":"DMKV.test","snippet":"### Name: DMKV.test\n### Title: Z-test for the difference of means of two independent Normal\n### variables with known population variances.\n### Aliases: DMKV.test\n\n### ** Examples\n\ndata(cars93) # Data set provided with the package\n# Maximum price difference (MaxPrice) in means between cars manufactured in the\n# US and those manufactured outside, assuming that the variances are known and \n# equal to 64 and 169, respectively\nvar1<-subset(cars93, USA==\"nonUS\", select=MaxPrice)\nvar2<-subset(cars93, USA==\"US\", select=MaxPrice)\nDMKV.test(var1, var2, sdx=13, sdy=8, difmu=0,\nalternative=\"greater\", conf.level=0.95)\n\n\n\n"} {"package":"RcmdrPlugin.TeachStat","topic":"Deflat","snippet":"### Name: Deflat\n### Title: Deflation of an economic series\n### Aliases: Deflat\n\n### ** Examples\n\ndata(Depositos, package = \"RcmdrPlugin.TeachStat\")\nDeflat(Depositos, \"year\", \"quantity\", \"G_IPC_2016\", \"2018\")\n\n\n"} {"package":"RcmdrPlugin.TeachStat","topic":"Depositos","snippet":"### Name: Depositos\n### Title: Deposits with credit institutions in Ourense\n### Aliases: Depositos\n### Keywords: datasets\n\n### ** Examples\n\ndata(Depositos)\n\n.Sindex <- Sindex(Depositos, \"year\", \"quantity\", \"2010\")*100\nprint(.Sindex)\n\nDeflat(Depositos, \"year\", \"quantity\", \"E_IPC_2016\", \"2011\")\n\n\n"} {"package":"RcmdrPlugin.TeachStat","topic":"MKV.test","snippet":"### Name: MKV.test\n### Title: Z-test for the mean of a Normal variable with known population\n### variance.\n### Aliases: MKV.test\n\n### ** Examples\n\ndata(cars93) # Dataset provided with the package\n# Mean maximum price (MaxPrice) less than 20 thousand $ assuming that the \n# variance is known and equal to 11\nMKV.test(cars93$MaxPrice, sd=11, alternative=\"less\", mu=20, conf.level=0.95)\n\n\n\n"} {"package":"RcmdrPlugin.TeachStat","topic":"Prices","snippet":"### Name: Prices\n### Title: Data for computing price indices.\n### Aliases: Prices\n### Keywords: datasets\n\n### ** Examples\n\ndata(Prices)\npriceIndexNum (Prices, prodID =\"prodID\", pervar =\"year\", pvar=\"price\", \n qvar =\"quantity\", base=\"2001\", indexMethod =c(\"laspeyres\", \"paasche\", \"fisher\"))\n\n\n"} {"package":"RcmdrPlugin.TeachStat","topic":"Sindex","snippet":"### Name: Sindex\n### Title: Simple index numbers\n### Aliases: Sindex\n\n### ** Examples\n\ndata(Depositos, package = \"RcmdrPlugin.TeachStat\")\nSindex(Depositos, \"year\", \"quantity\", \"2006\")\n\n\n"} {"package":"RcmdrPlugin.TeachStat","topic":"VKM.test","snippet":"### Name: VKM.test\n### Title: Chi-square test for the variance of a Normal variable with known\n### population mean.\n### Aliases: VKM.test\n\n### ** Examples\n\ndata(cars93) # Dataset provided with the package\n# Variance of the maximum price (MaxPrice) assuming that the population mean\n# price is known and equal to 22\nVKM.test(cars93$MaxPrice, alternative=\"two.sided\", sigma=11, mu=22, conf.level=0.95)\n\n\n"} {"package":"RcmdrPlugin.TeachStat","topic":"VUM.test","snippet":"### Name: VUM.test\n### Title: Chi-square test for the variance of a Normal variable with\n### unknown population mean.\n### Aliases: VUM.test\n\n### ** Examples\n\ndata(cars93) # Dataset provided with the package\n# Variance of the maximum price (MaxPrice) assuming that the population mean \n# price is unknown\nVUM.test(cars93$MaxPrice, alternative=\"two.sided\", sigma=11, conf.level=0.95)\n\n\n"} {"package":"RcmdrPlugin.TeachStat","topic":"W.numSummary","snippet":"### Name: W.numSummary\n### Title: Summary statistics for weighted variables\n### Aliases: W.numSummary\n\n### ** Examples\n\ndata(cars93)\n\n# no weighted\nW.numSummary(data=cars93[,c(\"CityMPG\")], statistics =c(\"mean\", \"sd\", \"IQR\", \"quantiles\"),\n quantiles = c(0,0.25,0.5,0.75,1), weights=NULL, groups=NULL)\n# weighted\nW.numSummary(data=cars93[,c(\"CityMPG\")], statistics =c(\"mean\", \"sd\", \"IQR\", \"quantiles\"),\n quantiles = c(0,0.25,0.5,0.75,1), weights=cars93$FuelCapacity, groups=NULL)\n# no weighted\nW.numSummary(data=cars93[,c(\"CityMPG\")], statistics =c(\"mean\", \"sd\", \"IQR\", \"quantiles\"),\n quantiles = c(0,0.25,0.5,0.75,1), weights=NULL, groups=cars93$Manual)\n# weighted\nbb <- W.numSummary(data=cars93[,c(\"CityMPG\")], statistics =c(\"mean\", \"sd\", \"IQR\", \"quantiles\"),\n quantiles = c(0,0.25,0.5,0.75,1), weights=cars93$FuelCapacity, groups=cars93$Manual)\n\nbb\nstr(bb)\nclass(bb)\n\n\n"} {"package":"RcmdrPlugin.TeachStat","topic":"aovreml","snippet":"### Name: aovreml\n### Title: ANOVA with random effects using the (REstricted) Maximum\n### Likelihood method.\n### Aliases: aovreml\n\n### ** Examples\n\n##---- Should be DIRECTLY executable !! ----\n##-- ==> Define data, use random,\n##--\tor do help(data=index) for the standard data sets.\n\n## The function is currently defined as\nfunction (formula, data = NULL, Lconfint = FALSE, REML = TRUE, \n ...) \n{\n vars <- all.vars(formula)\n formulaaov <- as.formula(paste(vars[1], \"~\", vars[2]))\n ANOV <- aov(formulaaov, data, ...)\n .ANOV <- summary(ANOV)\n cat(\"-------------------------------\")\n cat(\"\\n\", gettext(\"ANOVA table\", domain = \"R-RcmdrPlugin.TeachStat\"), \n \":\\n\", sep = \"\")\n print(.ANOV)\n cat(\"\\n-------------------------------\\n\")\n .sol <- lme4::lmer(formula, data = data, REML = REML, ...)\n .varcor <- lme4::VarCorr(.sol)\n .sighat2 <- unname(attr(.varcor, \"sc\"))^2\n .sighatalph2 <- unname(attr(.varcor[[vars[2]]], \"stddev\"))^2\n .prop <- .sighatalph2/(.sighatalph2 + .sighat2)\n estim <- c(.sighat2, .sighatalph2, .prop)\n names(estim) <- c(\"var (Error)\", \"var (Effect)\", \"% var (Effect)\")\n cat(\"\\n\", gettext(\"Components of Variance\", domain = \"R-RcmdrPlugin.TeachStat\"), \n \" (\", lme4::methTitle(.sol@devcomp$dims), \"):\\n\", sep = \"\")\n print(estim)\n if (Lconfint) {\n cat(\"\\n\", gettext(\"Confidence intervals\", domain = \"R-RcmdrPlugin.TeachStat\"), \n \":\\n\", sep = \"\")\n print(confint(.sol, oldNames = FALSE))\n }\n return(invisible(list(model = .sol, estimation = estim)))\n }\n\n\n"} {"package":"RcmdrPlugin.TeachStat","topic":"aovremm","snippet":"### Name: aovremm\n### Title: ANOVA with random effects using the Moments method.\n### Aliases: aovremm\n\n### ** Examples\n\n##---- Should be DIRECTLY executable !! ----\n##-- ==> Define data, use random,\n##--\tor do help(data=index) for the standard data sets.\n\n## The function is currently defined as\nfunction (formula, data = NULL, ...) \n{\n ANOV <- aov(formula, data, ...)\n .ANOV <- summary(ANOV)\n cat(\"-------------------------------\")\n cat(\"\\n\", gettext(\"ANOVA table\", domain = \"R-RcmdrPlugin.TeachStat\"), \n \":\\n\", sep = \"\")\n print(.ANOV)\n cat(\"\\n-------------------------------\\n\\n\")\n .sighat2 <- .ANOV[[1]]$`Mean Sq`[2]\n .vars <- all.vars(formula)\n .groups <- data[[.vars[2]]][!is.na(data[[.vars[1]]])]\n .n <- length(.groups)\n .ni <- table(.groups)\n .c <- (.n^2 - sum(.ni^2))/(.n * (length(.ni) - 1))\n .sighatalph2 <- (.ANOV[[1]]$`Mean Sq`[1] - .sighat2)/.c\n if (.sighatalph2 < 0) \n warning(\"Estimation of any variance component is not positive. The variance \n component model is inadequate.\")\n .prop <- .sighatalph2/(.sighatalph2 + .sighat2)\n estim <- c(.sighat2, .sighatalph2, .prop)\n names(estim) <- c(\"var (Error)\", \"var (Effect)\", \"% var (Effect)\")\n cat(\"\\n\", gettext(\"Components of Variance\", domain = \"R-RcmdrPlugin.TeachStat\"), \n \":\\n\", sep = \"\")\n print(estim)\n return(invisible(list(model = ANOV, estimation = estim)))\n }\n\n\n"} {"package":"RcmdrPlugin.TeachStat","topic":"calcularResumenDatosTabulados","snippet":"### Name: calcularResumenDatosTabulados\n### Title: Summary statistics for tabulated data\n### Aliases: calcularResumenDatosTabulados\n\n### ** Examples\n\n\ndata(cars93)\ncortes <- seq(from=1500, to=4250, by=250)\naa <- cut( cars93$Weight, breaks=cortes, dig.lab=4)\nni <- table(aa)\nl_inf <- cortes[-length(cortes)]\nl_sup <- cortes[-1]\nagrup <- data.frame(l_inf,l_sup,ni)\nhead(agrup)\n\ncalcularResumenDatosTabulados(agrup$l_inf, agrup$l_sup, agrup$Freq)\ncalcularResumenDatosTabulados(agrup$l_inf, agrup$l_sup, agrup$Freq, tabla=TRUE)\n\nbb <- calcularResumenDatosTabulados(agrup$l_inf, agrup$l_sup, agrup$Freq,\n statistics=c(\"mean\",\"mode\") )\nbb\nstr(bb)\nclass(bb$.summary)\nclass(bb$.table)\n\n\n"} {"package":"RcmdrPlugin.TeachStat","topic":"calcularResumenVariablesContinuas","snippet":"### Name: calcularResumenVariablesContinuas\n### Title: Summary statistics for continuous variables\n### Aliases: calcularResumenVariablesContinuas\n\n### ** Examples\n\n## Not run: \n##D data(cars93)\n##D calcularResumenVariablesContinuas(data=cars93[\"FuelCapacity\"],group=NULL)\n##D calcularResumenVariablesContinuas(data=cars93[\"FuelCapacity\"],group=cars93$Airbags)\n##D bb <- calcularResumenVariablesContinuas(data=cars93[\"FuelCapacity\"],group=cars93$Airbags,\n##D tablaFrecuencia=TRUE)\n##D str(bb)\n##D bb\n##D bb$.summary\n##D class(bb$.summary)\n##D \n##D calcularResumenVariablesContinuas(data=cars93[\"MidPrice\"], tablaFrecuencia=TRUE)\n##D calcularResumenVariablesContinuas(data=cars93[\"MidPrice\"], tablaFrecuencia=TRUE, cortes=5)\n##D calcularResumenVariablesContinuas(data=cars93[\"MidPrice\"], tablaFrecuencia=TRUE,\n##D cortes=c(7,14,21,28,63))\n##D calcularResumenVariablesContinuas(data=cars93[\"MidPrice\"], tablaFrecuencia=TRUE,\n##D cortes=\"Scott\") \n##D calcularResumenVariablesContinuas(data=cars93[\"MidPrice\"], groups=cars93$Airbags, \n##D tablaFrecuencia=TRUE, cortes=5)\n## End(Not run)\n\n\n"} {"package":"RcmdrPlugin.TeachStat","topic":"calcularResumenVariablesDiscretas","snippet":"### Name: calcularResumenVariablesDiscretas\n### Title: Summary statistics for discrete variables\n### Aliases: calcularResumenVariablesDiscretas\n\n### ** Examples\n\n## Not run: \n##D data(cars93)\n##D calcularResumenVariablesDiscretas(data=cars93[\"Cylinders\"],group=NULL)\n##D calcularResumenVariablesDiscretas(data=cars93[\"Cylinders\"],group=cars93$Airbags)\n##D bb <- calcularResumenVariablesDiscretas(data=cars93[\"Cylinders\"],group=cars93$Airbags,\n##D tablaFrecuencia=TRUE)\n##D str(bb)\n##D bb\n##D bb$.summary\n##D class(bb$.summary)\n##D \n##D calcularResumenVariablesDiscretas(data=cars93[\"Horsepower\"], tablaFrecuencia=TRUE)\n##D calcularResumenVariablesDiscretas(data=cars93[\"Horsepower\"], tablaFrecuencia=TRUE, cortes=5)\n##D calcularResumenVariablesDiscretas(data=cars93[\"Horsepower\"], tablaFrecuencia=TRUE,\n##D cortes=c(50,100,200,250,300))\n##D calcularResumenVariablesDiscretas(data=cars93[\"Horsepower\"], tablaFrecuencia=TRUE,\n##D cortes=\"Sturges\") \n##D calcularResumenVariablesDiscretas(data=cars93[\"Horsepower\"], groups=cars93$Airbags, \n##D tablaFrecuencia=TRUE, cortes=5)\n## End(Not run)\n\n\n"} {"package":"RcmdrPlugin.TeachStat","topic":"calcular_frecuencia","snippet":"### Name: calcular_frecuencia\n### Title: Frequency distributions for qualitative variables\n### Aliases: calcular_frecuencia tabla.frec.cualitativa\n\n### ** Examples\n\ndata(cars93)\naa <- calcular_frecuencia(df.nominal=cars93[\"Type\"], ordenado.frec=TRUE, df.ordinal=NULL, \n cuantil.p=0.5, iprint = TRUE)\ncalcular_frecuencia(df.nominal=NULL, ordenado.frec=TRUE, df.ordinal=cars93[\"Airbags\"], \n cuantil.p=0.25, iprint = TRUE)\nbb <- calcular_frecuencia(df.nominal=cars93[\"Type\"], ordenado.frec=TRUE, \n df.ordinal=cars93[\"Airbags\"], cuantil.p=0.25, iprint = FALSE)\nstr(bb)\nbb\n\n\n"} {"package":"RcmdrPlugin.TeachStat","topic":"characRV","snippet":"### Name: characRV\n### Title: Characteristics of Random Variables.\n### Aliases: characRV\n\n### ** Examples\n\n##---- Should be DIRECTLY executable !! ----\n##-- ==> Define data, use random,\n##--\tor do help(data=index) for the standard data sets.\n\n## The function is currently defined as\nfunction (D, charact = c(\"expectation\", \"median\", \"sd\", \"IQR\", \n \"skewness\", \"kurtosis\", \"moment\", \"cmoment\"), moment = 1, \n cmoment = 2) \n{\n if (missing(charact)) \n charact <- c(\"expectation\", \"sd\")\n charact <- match.arg(charact, c(\"expectation\", \"median\", \n \"sd\", \"IQR\", \"skewness\", \"kurtosis\", \"moment\", \"cmoment\"), \n several.ok = TRUE)\n moment <- if (\"moment\" %in% charact) \n moment\n else NULL\n cmoment <- if (\"cmoment\" %in% charact) \n cmoment\n else NULL\n mom <- if (!is.null(moment)) \n paste(\"alpha_\", moment, sep = \"\")\n else NULL\n cmom <- if (!is.null(cmoment)) \n paste(\"mu_\", cmoment, sep = \"\")\n else NULL\n chars <- c(c(\"expectation\", \"median\", \"sd\", \"IQR\", \"skewness\", \n \"kurtosis\")[c(\"expectation\", \"median\", \"sd\", \"IQR\", \"skewness\", \n \"kurtosis\") %in% charact], mom, cmom)\n nchars <- length(chars)\n table <- matrix(0, 1, nchars)\n rownames(table) <- gsub(\"[[:space:]]\", \"\", deparse(substitute(D)))\n colnames(table) <- chars\n if (\"expectation\" %in% chars) \n table[, \"expectation\"] <- distrEx::E(D)\n if (\"median\" %in% chars) \n table[, \"median\"] <- distrEx::median(D)\n if (\"sd\" %in% chars) \n table[, \"sd\"] <- distrEx::sd(D)\n if (\"IQR\" %in% chars) \n table[, \"IQR\"] <- distrEx::IQR(D)\n if (\"skewness\" %in% chars) \n table[, \"skewness\"] <- distrEx::skewness(D)\n if (\"kurtosis\" %in% chars) \n table[, \"kurtosis\"] <- distrEx::kurtosis(D)\n if (\"moment\" %in% charact) \n table[, mom] <- distrEx::E(D, fun = function(x) {\n x^moment\n })\n if (\"cmoment\" %in% charact) \n table[, cmom] <- distrEx::E(D, fun = function(x) {\n (x - distrEx::E(D))^cmoment\n })\n print(table)\n return(invisible(table))\n }\n\n\n"} {"package":"RcmdrPlugin.TeachStat","topic":"listTypesVariables","snippet":"### Name: listTypesVariables\n### Title: List of variables and types of a Data Frame\n### Aliases: listTypesVariables\n\n### ** Examples\n\nrequire(datasets)\nlistTypesVariables(\"iris\")\n\n\n"} {"package":"RcmdrPlugin.TeachStat","topic":"plotRegions","snippet":"### Name: plotRegions\n### Title: Plot regions in probability mass or density functions.\n### Aliases: plotRegions\n\n### ** Examples\n\n##---- Should be DIRECTLY executable !! ----\n##-- ==> Define data, use random,\n##--\tor do help(data=index) for the standard data sets.\n\n## The function is currently defined as\nfunction (D, add = FALSE, regions = NULL, col = \"gray\", legend = TRUE, \n legend.pos = \"topright\", to.draw.arg = 1, verticals = FALSE, \n ngrid = 1000, cex.points = par(\"cex\"), mfColRow = FALSE, \n lwd = par(\"lwd\"), ...) \n{\n dots <- match.call(call = sys.call(0), expand.dots = FALSE)$...\n if (!is.null(dots[[\"panel.first\"]])) {\n pF <- .panel.mingle(dots, \"panel.first\")\n }\n else if (to.draw.arg == 1) {\n pF <- quote(abline(h = 0, col = \"gray\"))\n }\n else if (to.draw.arg == 2) {\n pF <- quote(abline(h = 0:1, col = \"gray\"))\n }\n else {\n pF <- NULL\n }\n dots$panel.first <- pF\n if (!add) {\n do.call(plot, c(list(D, to.draw.arg = to.draw.arg, cex.points = cex.points, \n mfColRow = mfColRow, verticals = verticals), dots))\n }\n discrete <- is(D, \"DiscreteDistribution\")\n if (discrete) {\n x <- support(D)\n if (hasArg(\"xlim\")) {\n if (length(xlim) != 2) \n stop(\"Wrong length of Argument xlim\")\n x <- x[(x >= xlim[1]) & (x <= xlim[2])]\n }\n if (!is.null(regions)) {\n col <- rep(col, length = length(regions))\n for (i in 1:length(regions)) {\n region <- regions[[i]]\n which.xs <- (x > region[1] & x <= region[2])\n xs <- x[which.xs]\n ps <- d(D)(x)[which.xs]\n lines(xs, ps, type = \"h\", col = col[i], lwd = 3 * \n lwd, ...)\n points(xs, ps, pch = 16, col = col[i], cex = 2 * \n cex.points, ...)\n }\n if (legend) {\n if (length(unique(col)) > 1) {\n legend(legend.pos, title = if (length(regions) > \n 1) \n \"Regions\"\n else \"Region\", legend = sapply(regions, function(region) {\n paste(round(region[1], 2), \"to\", round(region[2], \n 2))\n }), col = col, pch = 15, pt.cex = 2.5, inset = 0.02)\n }\n else {\n legend(legend.pos, title = if (length(regions) > \n 1) \n \"Regions\"\n else \"Region\", legend = sapply(regions, function(region) {\n paste(round(region[1], 2), \"to\", round(region[2], \n 2))\n }), inset = 0.02)\n }\n }\n }\n }\n else {\n lower0 <- getLow(D, eps = getdistrOption(\"TruncQuantile\") * \n 2)\n upper0 <- getUp(D, eps = getdistrOption(\"TruncQuantile\") * \n 2)\n me <- (distr::q.l(D))(1/2)\n s <- (distr::q.l(D))(3/4) - (distr::q.l(D))(1/4)\n lower1 <- me - 6 * s\n upper1 <- me + 6 * s\n lower <- max(lower0, lower1)\n upper <- min(upper0, upper1)\n dist <- upper - lower\n if (hasArg(\"xlim\")) {\n if (length(xlim) != 2) \n stop(\"Wrong length of Argument xlim\")\n x <- seq(xlim[1], xlim[2], length = ngrid)\n }\n else x <- seq(from = lower - 0.1 * dist, to = upper + \n 0.1 * dist, length = ngrid)\n if (!is.null(regions)) {\n col <- rep(col, length = length(regions))\n for (i in 1:length(regions)) {\n region <- regions[[i]]\n which.xs <- (x >= region[1] & x <= region[2])\n xs <- x[which.xs]\n ps <- d(D)(x)[which.xs]\n xs <- c(xs[1], xs, xs[length(xs)])\n ps <- c(0, ps, 0)\n polygon(xs, ps, col = col[i])\n }\n if (legend) {\n if (length(unique(col)) > 1) {\n legend(legend.pos, title = if (length(regions) > \n 1) \n \"Regions\"\n else \"Region\", legend = sapply(regions, function(region) {\n paste(round(region[1], 2), \"to\", round(region[2], \n 2))\n }), col = col, pch = 15, pt.cex = 2.5, inset = 0.02)\n }\n else {\n legend(legend.pos, title = if (length(regions) > \n 1) \n \"Regions\"\n else \"Region\", legend = sapply(regions, function(region) {\n paste(round(region[1], 2), \"to\", round(region[2], \n 2))\n }), inset = 0.02)\n }\n }\n }\n }\n return(invisible(NULL))\n }\n\n\n"} {"package":"RcmdrPlugin.TeachStat","topic":"priceIndexNum","snippet":"### Name: priceIndexNum\n### Title: Price index numbers\n### Aliases: priceIndexNum\n\n### ** Examples\n\nlibrary(IndexNumR)\ndata(Prices, package = \"RcmdrPlugin.TeachStat\")\n\npriceIndexNum(Prices, prodID = \"prodID\", pervar = \"year\", pvar = \"price\", \n qvar = \"quantity\", base = \"2003\", \n indexMethod = c(\"laspeyres\", \"paasche\", \"fisher\"))\n\n\n"} {"package":"queryparser","topic":"column_references","snippet":"### Name: column_references\n### Title: Return the column references in a parsed SQL query\n### Aliases: column_references\n\n### ** Examples\n\nmy_query <- \"SELECT f.flight,\n manufacturer, p.model\n FROM flights f\n JOIN planes p USING (tailnum);\"\n\ncolumn_references(parse_query(my_query), from = FALSE)\n\n\n"} {"package":"queryparser","topic":"extract_alias","snippet":"### Name: extract_alias\n### Title: Extract the column alias from a SQL expression\n### Aliases: extract_alias\n\n### ** Examples\n\nexpr <- \"round(AVG(arr_delay)) AS avg_delay\"\nextract_alias(expr)\n\n\n"} {"package":"queryparser","topic":"parse_expression","snippet":"### Name: parse_expression\n### Title: Parse a SQL expression\n### Aliases: parse_expression\n\n### ** Examples\n\nexpr <- \"round(AVG(arr_delay))\"\nparse_expression(expr)\n\n\n"} {"package":"queryparser","topic":"parse_query","snippet":"### Name: parse_query\n### Title: Parse a SQL query\n### Aliases: parse_query\n\n### ** Examples\n\nmy_query <- \"SELECT origin, dest,\n COUNT(flight) AS num_flts,\n round(AVG(distance)) AS dist,\n round(AVG(arr_delay)) AS avg_delay\n FROM flights\n WHERE distance BETWEEN 200 AND 300\n AND air_time IS NOT NULL\n GROUP BY origin, dest\n HAVING num_flts > 3000\n ORDER BY num_flts DESC, avg_delay DESC\n LIMIT 100;\"\n\nparse_query(my_query)\n\nparse_query(my_query, tidyverse = TRUE)\n\n\n"} {"package":"queryparser","topic":"split_query","snippet":"### Name: split_query\n### Title: Split a SQL query\n### Aliases: split_query\n\n### ** Examples\n\nmy_query <- \"SELECT origin, dest,\n COUNT(flight) AS num_flts,\n round(AVG(distance)) AS dist,\n round(AVG(arr_delay)) AS avg_delay\n FROM flights\n WHERE distance BETWEEN 200 AND 300\n AND air_time IS NOT NULL\n GROUP BY origin, dest\n HAVING num_flts > 3000\n ORDER BY num_flts DESC, avg_delay DESC\n LIMIT 100;\"\n\nsplit_query(my_query)\n\n\n"} {"package":"queryparser","topic":"unqualify_query","snippet":"### Name: unqualify_query\n### Title: Remove prefixes from column references in a parsed SQL query\n### Aliases: unqualify_query\n\n### ** Examples\n\nmy_query <- \"SELECT f.flight,\n manufacturer, p.model\n FROM flights f\n JOIN planes p USING (tailnum);\"\n\nunqualify_query(\n parse_query(my_query),\n prefixes = c(\"p\", \"f\")\n)\n\n\n"} {"package":"interflex","topic":"interflex","snippet":"### Name: interflex\n### Title: Flexible Interactive Models\n### Aliases: interflex\n\n### ** Examples\n\nlibrary(interflex)\ndata(interflex)\ns1.binning <-interflex(estimator = 'binning', Y = \"Y\", D = \"D\", X = \"X\", \ndata = s1)\n\ns1.linear <-interflex(estimator = 'linear', Y = \"Y\", D = \"D\", X = \"X\", \ndata = s1)\n\n\n\n\n"} {"package":"interflex","topic":"rcpparma_hello_world","snippet":"### Name: RcppArmadillo-Functions\n### Title: Set of functions in example RcppArmadillo package\n### Aliases: rcpparma_hello_world rcpparma_innerproduct\n### rcpparma_outerproduct rcpparma_bothproducts\n\n### ** Examples\n\n x <- sqrt(1:4)\n rcpparma_innerproduct(x)\n rcpparma_outerproduct(x)\n\n\n"} {"package":"pcutils","topic":"add_alpha","snippet":"### Name: add_alpha\n### Title: Add alpha for a Rcolor\n### Aliases: add_alpha\n\n### ** Examples\n\nadd_alpha(\"red\",0.3)\n\n\n"} {"package":"pcutils","topic":"add_theme","snippet":"### Name: add_theme\n### Title: Add a global gg_theme and colors for plots\n### Aliases: add_theme\n\n### ** Examples\n\nadd_theme()\n\n\n"} {"package":"pcutils","topic":"change_fac_lev","snippet":"### Name: change_fac_lev\n### Title: Change factor levels\n### Aliases: change_fac_lev\n\n### ** Examples\n\nchange_fac_lev(letters[1:5],levels = c(\"c\",\"a\"))\n\n\n"} {"package":"pcutils","topic":"count2","snippet":"### Name: count2\n### Title: Like uniq -c in shell to count a vector\n### Aliases: count2\n\n### ** Examples\n\ncount2(data.frame(group = c(\"A\", \"A\", \"B\", \"C\", \"C\", \"A\"), value = c(2, 2, 2, 1, 3, 1)))\n\n\n"} {"package":"pcutils","topic":"dabiao","snippet":"### Name: dabiao\n### Title: Print some message with =\n### Aliases: dabiao\n\n### ** Examples\n\ndabiao(\"Start running!\")\n\n\n\n"} {"package":"pcutils","topic":"explode","snippet":"### Name: explode\n### Title: Explode a data.frame if there are split charter in one column\n### Aliases: explode\n\n### ** Examples\n\n## No test: \ndf <- data.frame(a = 1:2, b = c(\"a,b\", \"c\"), c = 3:4)\nexplode(df, \"b\", \",\")\n## End(No test)\n\n\n"} {"package":"pcutils","topic":"get_cols","snippet":"### Name: get_cols\n### Title: Get n colors\n### Aliases: get_cols\n\n### ** Examples\n\nget_cols(10, \"col2\") -> my_cols\nscales::show_col(my_cols)\n## No test: \nscales::show_col(get_cols(15, RColorBrewer::brewer.pal(5, \"Set2\")))\nscales::show_col(get_cols(15, ggsci::pal_aaas()(5)))\n## End(No test)\n\n\n"} {"package":"pcutils","topic":"gghuan","snippet":"### Name: gghuan\n### Title: Plot a doughnut chart\n### Aliases: gghuan\n\n### ** Examples\n\na <- data.frame(type = letters[1:6], num = c(1, 3, 3, 4, 5, 10))\ngghuan(a) + ggplot2::scale_fill_manual(values = get_cols(6, \"col3\"))\nb <- data.frame(type = letters[1:12], num = c(1, 3, 3, 4, 15, 10, 35, 2:6))\ngghuan(b) + ggplot2::theme(legend.position = \"right\")\n\n\n"} {"package":"pcutils","topic":"gghuan2","snippet":"### Name: gghuan2\n### Title: gghuan2 for multi-doughnut chart\n### Aliases: gghuan2\n\n### ** Examples\n\ndata.frame(a = c(\"a\", \"a\", \"b\", \"b\", \"c\"), aa = rep(\"a\", 5),\n b = c(\"a\", LETTERS[2:5]), c = 1:5) %>% gghuan2()\n\n\n"} {"package":"pcutils","topic":"grepl.data.frame","snippet":"### Name: grepl.data.frame\n### Title: Grepl applied on a data.frame\n### Aliases: grepl.data.frame\n\n### ** Examples\n\nmatrix(letters[1:6], 2, 3) |> as.data.frame() -> a\ngrepl.data.frame(\"c\", a)\ngrepl.data.frame(\"\\\\w\", a)\n\n\n"} {"package":"pcutils","topic":"group_box","snippet":"### Name: group_box\n### Title: Plot a boxplot\n### Aliases: group_box\n\n### ** Examples\n\na <- data.frame(a = 1:18, b = runif(18, 0, 5))\ngroup_box(a, group = rep(c(\"a\", \"b\", \"c\"), each = 6))\n\n\n"} {"package":"pcutils","topic":"hebing","snippet":"### Name: hebing\n### Title: Group your data\n### Aliases: hebing\n\n### ** Examples\n\ndata(otutab)\nhebing(otutab,metadata$Group)\n\n\n"} {"package":"pcutils","topic":"is.ggplot.color","snippet":"### Name: is.ggplot.color\n### Title: Judge if a characteristic is Rcolor\n### Aliases: is.ggplot.color\n\n### ** Examples\n\nis.ggplot.color(\"red\")\nis.ggplot.color(\"notcolor\")\nis.ggplot.color(NA)\nis.ggplot.color(\"#000\")\n\n\n"} {"package":"pcutils","topic":"mmscale","snippet":"### Name: mmscale\n### Title: Min_Max scale\n### Aliases: mmscale\n\n### ** Examples\n\nx <- runif(10)\nmmscale(x, 5, 10)\n\n\n"} {"package":"pcutils","topic":"multitest","snippet":"### Name: multitest\n### Title: Multi-groups test\n### Aliases: multitest\n\n### ** Examples\n\nmultitest(runif(30), rep(c(\"a\", \"b\", \"c\"), each = 10), print = FALSE, return = \"wilcox\") -> aa\n\n\n"} {"package":"pcutils","topic":"my_circo","snippet":"### Name: my_circo\n### Title: My circo plot\n### Aliases: my_circo\n\n### ** Examples\n\n## No test: \ndata.frame(a=c(\"a\",\"a\",\"b\",\"b\",\"c\"),b=c(\"a\",LETTERS[2:5]),c=1:5)%>%my_circo(mode=\"circlize\")\n## End(No test)\n\n\n"} {"package":"pcutils","topic":"my_lm","snippet":"### Name: my_lm\n### Title: Fit a linear model and plot\n### Aliases: my_lm\n\n### ** Examples\n\n## No test: \nmy_lm(runif(50), var = 1:50)\nmy_lm(c(1:50) + runif(50, 0, 5), var = 1:50)\n## End(No test)\n\n\n"} {"package":"pcutils","topic":"remove.outliers","snippet":"### Name: remove.outliers\n### Title: Remove outliers\n### Aliases: remove.outliers\n\n### ** Examples\n\nremove.outliers(c(1, 10:15))\n\n\n"} {"package":"pcutils","topic":"rgb2code","snippet":"### Name: rgb2code\n### Title: Transform a rgb vector to a Rcolor code\n### Aliases: rgb2code\n\n### ** Examples\n\nrgb2code(c(12, 23, 34))\nrgb2code(\"#69C404\", rev = TRUE)\n\n\n"} {"package":"pcutils","topic":"sanxian","snippet":"### Name: sanxian\n### Title: Three-line table\n### Aliases: sanxian\n\n### ** Examples\n\n## No test: \ndata(otutab)\nsanxian(otutab)\n## End(No test)\n\n\n"} {"package":"pcutils","topic":"stackplot","snippet":"### Name: stackplot\n### Title: Plot a stack plot\n### Aliases: stackplot\n\n### ** Examples\n\ndata(otutab)\nstackplot(otutab, metadata, group = \"Group\")\n## No test: \nstackplot(otutab, metadata, group = \"Group\", group_order = TRUE, flow = TRUE, relative = FALSE)\n## End(No test)\n\n\n"} {"package":"pcutils","topic":"strsplit2","snippet":"### Name: strsplit2\n### Title: Split Composite Names\n### Aliases: strsplit2\n\n### ** Examples\n\nstrsplit2(c(\"a;b\", \"c;d\"), \";\")\n\n\n"} {"package":"pcutils","topic":"tax_pie","snippet":"### Name: tax_pie\n### Title: Pie plot\n### Aliases: tax_pie\n\n### ** Examples\n\n## No test: \ndata(otutab)\ntax_pie(otutab,topN = 7)\n## End(No test)\n\n\n"} {"package":"pcutils","topic":"tax_wordcloud","snippet":"### Name: tax_wordcloud\n### Title: Word cloud plot\n### Aliases: tax_wordcloud\n\n### ** Examples\n\n## No test: \ndata(otutab)\ntax_wordcloud(taxonomy$Genus)\n## End(No test)\n\n\n"} {"package":"pcutils","topic":"twotest","snippet":"### Name: twotest\n### Title: Two-group test\n### Aliases: twotest\n\n### ** Examples\n\ntwotest(runif(20), rep(c(\"a\", \"b\"), each = 10))\n\n\n"} {"package":"pcutils","topic":"update_param","snippet":"### Name: update_param\n### Title: Update the parameters\n### Aliases: update_param\n\n### ** Examples\n\nupdate_param(list(a=1,b=2),list(b=5,c=5))\n\n\n\n"} {"package":"pcutils","topic":"venn","snippet":"### Name: venn\n### Title: Plot a general venn (upset, flower)\n### Aliases: venn venn.list venn.data.frame\n\n### ** Examples\n\n## No test: \naa <- list(a = 1:3, b = 3:7, c = 2:4)\nvenn(aa, mode = \"venn\")\nvenn(aa, mode = \"venn2\", type = \"ChowRuskey\")\nvenn(aa, mode = \"upset\")\ndata(otutab)\nvenn(otutab, mode = \"flower\")\n## End(No test)\n\n\n"} {"package":"waterfalls","topic":"waterfall","snippet":"### Name: waterfall\n### Title: Create waterfall charts\n### Aliases: waterfall\n\n### ** Examples\n\nwaterfall(values = round(rnorm(5), 1), labels = letters[1:5], calc_total = TRUE)\nwaterfall(.data = data.frame(category = letters[1:5],\n value = c(100, -20, 10, 20, 110)), \n fill_colours = colorRampPalette(c(\"#1b7cd6\", \"#d5e6f2\"))(5),\n fill_by_sign = FALSE)\n\n\n"} {"package":"Rdsdp","topic":"dsdp","snippet":"### Name: Rdsdp::dsdp\n### Title: Solve semidefinite programm with DSDP\n### Aliases: dsdp\n\n### ** Examples\n\n\tK=NULL\n\tK$s=c(2,3)\n\tK$l=2\n\n\tC=matrix(c(0,0,2,1,1,2,c(3,0,1,\n 0,2,0,\n 1,0,3)),1,15,byrow=TRUE)\n\tA=matrix(c(0,1,0,0,0,0,c(3,0,1,\n 0,4,0,\n 1,0,5),\n \t1,0,3,1,1,3,rep(0,9)), 2,15,byrow=TRUE)\n\tb <- c(1,2)\n\t\n OPTIONS=NULL\n OPTIONS$gaptol=0.000001\n OPTIONS$logsummary=0\n OPTIONS$outputstats=1\n\t\n result = dsdp(A,b,C,K,OPTIONS)\n\n\n"} {"package":"SpatialVS","topic":"SpatialVS","snippet":"### Name: SpatialVS\n### Title: Function for spatial variable selection\n### Aliases: SpatialVS\n### Keywords: function\n\n### ** Examples\n\n#use small.test.dat as the input to fit the spatial Poisson regression model.\n#a grid of alpha.vec and lambda.vec is typically used.\n#Here one point of alpha.vec and lambda.vec is used for fast illustration.\n\ntest.fit<-SpatialVS(dat.obj=small.test.dat, alpha.vec=0.5,\nlambda.vec=5, method=\"PQL\", intercept=TRUE, verbose=FALSE)\n\n\n"} {"package":"SpatialVS","topic":"SpatialVS.summary","snippet":"### Name: SpatialVS.summary\n### Title: Function for spatial variable selection's summary\n### Aliases: SpatialVS.summary\n\n### ** Examples\n\n\ntest.fit<-SpatialVS(dat.obj=small.test.dat, alpha.vec=0.5, lambda.vec=5,\nmethod=\"PQL\", intercept=TRUE, verbose=FALSE)\nSpatialVS.summary(test.fit)\n\n\n"} {"package":"SpatialVS","topic":"control.default","snippet":"### Name: control.default\n### Title: Global variable of spatial variable selection, contains\n### optimization tuning parameters.\n### Aliases: control.default\n### Keywords: default setting\n\n### ** Examples\n\ncontrol.default=list(maxIter=200,iwls=10^(-4),tol1=10^(-3),tol2=10^(-3))\n\n\n"} {"package":"SpatialVS","topic":"lyme.svs.eco0.dat","snippet":"### Name: lyme.svs.eco0.dat\n### Title: The Lyme disease dataset with Eco id=0\n### Aliases: lyme.svs.eco0.dat distmat.compute\n### Keywords: datasets\n\n### ** Examples\n\ndata(\"lyme.svs.eco0\")\nlyme.svs.eco0.dat$dist=distmat.compute(location=lyme.svs.eco0.dat$location, dist.min=0.4712249)\n\n\n\n"} {"package":"SpatialVS","topic":"lyme.svs.eco1.dat","snippet":"### Name: lyme.svs.eco1.dat\n### Title: The Lyme disease dataset with Eco id=1\n### Aliases: lyme.svs.eco1.dat\n### Keywords: datasets\n\n### ** Examples\n\ndata(\"lyme.svs.eco1\")\nlyme.svs.eco1.dat$dist=distmat.compute(location=lyme.svs.eco1.dat$location, dist.min=0.2821849)\n\n\n\n"} {"package":"SpatialVS","topic":"small.test.dat","snippet":"### Name: small.test.dat\n### Title: A small dataset for fast testing of functions\n### Aliases: small.test.dat\n### Keywords: dataset\n\n### ** Examples\n\ndata(\"small.test\")\n\n#Here is a toy example for creating a data object that can be used for\n#generating dat.obj for SpatialVS function\nn=20\n#simulate counts data\ny=rpois(n=n, lambda=1)\n#simulate covariate matrix\nx1=rnorm(n)\nx2=rnorm(n)\nX=cbind(1, x1, x2)\n#compute distance matrix from some simulated locations\nloc_x=runif(n)\nloc_y=runif(n)\ndist=matrix(0,n, n)\nfor(i in 1:n)\n{\n for(j in 1:n)\n {\n dist[i,j]=sqrt((loc_x[i]-loc_x[j])^2+(loc_y[i]-loc_y[j])^2)\n }\n}\n\n#assume offset is all zero\noffset=rep(0, n)\n\n#assemble the data object for SpatialVS\n\ndat.obj=list(y=y, X=X, dist=dist, offset=offset)\n\n\n\n\n"} {"package":"oem","topic":"big.oem","snippet":"### Name: big.oem\n### Title: Orthogonalizing EM for big.matrix objects\n### Aliases: big.oem\n\n### ** Examples\n\n## Not run: \n##D set.seed(123)\n##D nrows <- 50000\n##D ncols <- 100\n##D bkFile <- \"bigmat.bk\"\n##D descFile <- \"bigmatk.desc\"\n##D bigmat <- filebacked.big.matrix(nrow=nrows, ncol=ncols, type=\"double\",\n##D backingfile=bkFile, backingpath=\".\",\n##D descriptorfile=descFile,\n##D dimnames=c(NULL,NULL))\n##D \n##D # Each column value with be the column number multiplied by\n##D # samples from a standard normal distribution.\n##D set.seed(123)\n##D for (i in 1:ncols) bigmat[,i] = rnorm(nrows)*i\n##D \n##D y <- rnorm(nrows) + bigmat[,1] - bigmat[,2]\n##D \n##D fit <- big.oem(x = bigmat, y = y, \n##D penalty = c(\"lasso\", \"elastic.net\", \n##D \"ols\", \n##D \"mcp\", \"scad\", \n##D \"mcp.net\", \"scad.net\",\n##D \"grp.lasso\", \"grp.lasso.net\",\n##D \"grp.mcp\", \"grp.scad\",\n##D \"sparse.grp.lasso\"), \n##D groups = rep(1:20, each = 5))\n##D \n##D fit2 <- oem(x = bigmat[,], y = y, \n##D penalty = c(\"lasso\", \"grp.lasso\"), \n##D groups = rep(1:20, each = 5)) \n##D \n##D max(abs(fit$beta[[1]] - fit2$beta[[1]])) \n##D \n##D layout(matrix(1:2, ncol = 2))\n##D plot(fit)\n##D plot(fit, which.model = 2)\n## End(Not run)\n\n\n\n"} {"package":"oem","topic":"cv.oem","snippet":"### Name: cv.oem\n### Title: Cross validation for Orthogonalizing EM\n### Aliases: cv.oem\n\n### ** Examples\n\nset.seed(123)\nn.obs <- 1e4\nn.vars <- 100\n\ntrue.beta <- c(runif(15, -0.25, 0.25), rep(0, n.vars - 15))\n\nx <- matrix(rnorm(n.obs * n.vars), n.obs, n.vars)\ny <- rnorm(n.obs, sd = 3) + x %*% true.beta\n\nfit <- cv.oem(x = x, y = y, \n penalty = c(\"lasso\", \"grp.lasso\"), \n groups = rep(1:20, each = 5))\n\nlayout(matrix(1:2, ncol = 2))\nplot(fit)\nplot(fit, which.model = 2)\n\n\n"} {"package":"oem","topic":"logLik.oem","snippet":"### Name: logLik.oem\n### Title: log likelihood function for fitted oem objects\n### Aliases: logLik.oem logLik.cv.oem logLik.xval.oem\n\n### ** Examples\n\nset.seed(123)\nn.obs <- 2000\nn.vars <- 50\n\ntrue.beta <- c(runif(15, -0.25, 0.25), rep(0, n.vars - 15))\nx <- matrix(rnorm(n.obs * n.vars), n.obs, n.vars)\ny <- rnorm(n.obs, sd = 3) + x %*% true.beta\n\nfit <- oem(x = x, y = y, penalty = c(\"lasso\", \"mcp\"), compute.loss = TRUE)\n\nlogLik(fit)\n\nlogLik(fit, which.model = \"mcp\")\n\n\nfit <- cv.oem(x = x, y = y, penalty = c(\"lasso\", \"mcp\"), compute.loss = TRUE,\n nlambda = 25)\n\nlogLik(fit)\n\nlogLik(fit, which.model = \"mcp\")\n\n\nfit <- xval.oem(x = x, y = y, penalty = c(\"lasso\", \"mcp\"), compute.loss = TRUE, \n nlambda = 25)\n\nlogLik(fit)\n\nlogLik(fit, which.model = \"mcp\")\n\n\n\n"} {"package":"oem","topic":"oem","snippet":"### Name: oem\n### Title: Orthogonalizing EM\n### Aliases: oem\n\n### ** Examples\n\nset.seed(123)\nn.obs <- 1e4\nn.vars <- 50\n\ntrue.beta <- c(runif(15, -0.25, 0.25), rep(0, n.vars - 15))\n\nx <- matrix(rnorm(n.obs * n.vars), n.obs, n.vars)\ny <- rnorm(n.obs, sd = 3) + x %*% true.beta\n\nfit <- oem(x = x, y = y, \n penalty = c(\"lasso\", \"grp.lasso\", \"sparse.grp.lasso\"), \n groups = rep(1:10, each = 5))\n\nlayout(matrix(1:3, ncol = 3))\nplot(fit)\nplot(fit, which.model = 2)\nplot(fit, which.model = \"sparse.grp.lasso\")\n\n# the oem package has support for\n# sparse design matrices\n\nlibrary(Matrix)\n\nxs <- rsparsematrix(n.obs * 25, n.vars * 2, density = 0.01)\nys <- rnorm(n.obs * 25, sd = 3) + as.vector(xs %*% c(true.beta, rep(0, n.vars)) )\nx.dense <- as.matrix(xs)\n\nsystem.time(fit <- oem(x = x.dense, y = ys, \n penalty = c(\"lasso\", \"grp.lasso\"), \n groups = rep(1:20, each = 5), intercept = FALSE,\n standardize = FALSE))\n\nsystem.time(fits <- oem(x = xs, y = ys, \n penalty = c(\"lasso\", \"grp.lasso\"), \n groups = rep(1:20, each = 5), intercept = FALSE, \n standardize = FALSE, lambda = fit$lambda))\n \nmax(abs(fit$beta[[1]] - fits$beta[[1]]))\nmax(abs(fit$beta[[2]] - fits$beta[[2]]))\n\n# logistic\ny <- rbinom(n.obs, 1, prob = 1 / (1 + exp(-x %*% true.beta)))\n\nsystem.time(res <- oem(x, y, intercept = FALSE, \n penalty = c(\"lasso\", \"sparse.grp.lasso\", \"mcp\"), \n family = \"binomial\", \n groups = rep(1:10, each = 5),\n nlambda = 10,\n irls.tol = 1e-3, tol = 1e-8))\n\nlayout(matrix(1:3, ncol = 3))\nplot(res)\nplot(res, which.model = 2)\nplot(res, which.model = \"mcp\")\n\n\n# sparse design matrix\nxs <- rsparsematrix(n.obs * 2, n.vars, density = 0.01)\nx.dense <- as.matrix(xs)\nys <- rbinom(n.obs * 2, 1, prob = 1 / (1 + exp(-x %*% true.beta)))\n\nsystem.time(res.gr <- oem(x.dense, ys, intercept = FALSE, \n penalty = \"grp.lasso\", \n family = \"binomial\", \n nlambda = 10,\n groups = rep(1:5, each = 10), \n irls.tol = 1e-3, tol = 1e-8))\n \nsystem.time(res.gr.s <- oem(xs, ys, intercept = FALSE, \n penalty = \"grp.lasso\", \n family = \"binomial\", \n nlambda = 10,\n groups = rep(1:5, each = 10), \n irls.tol = 1e-3, tol = 1e-8))\n \nmax(abs(res.gr$beta[[1]] - res.gr.s$beta[[1]]))\n\n\n\n"} {"package":"oem","topic":"oem.xtx","snippet":"### Name: oem.xtx\n### Title: Orthogonalizing EM with precomputed XtX\n### Aliases: oem.xtx\n\n### ** Examples\n\nset.seed(123)\nn.obs <- 1e4\nn.vars <- 100\n\ntrue.beta <- c(runif(15, -0.25, 0.25), rep(0, n.vars - 15))\n\nx <- matrix(rnorm(n.obs * n.vars), n.obs, n.vars)\ny <- rnorm(n.obs, sd = 3) + x %*% true.beta\n\nfit <- oem(x = x, y = y, \n penalty = c(\"lasso\", \"elastic.net\", \n \"ols\", \n \"mcp\", \"scad\", \n \"mcp.net\", \"scad.net\",\n \"grp.lasso\", \"grp.lasso.net\",\n \"grp.mcp\", \"grp.scad\",\n \"sparse.grp.lasso\"), \n standardize = FALSE, intercept = FALSE,\n groups = rep(1:20, each = 5))\n \nxtx <- crossprod(x) / n.obs\nxty <- crossprod(x, y) / n.obs\n\nfit.xtx <- oem.xtx(xtx = xtx, xty = xty, \n penalty = c(\"lasso\", \"elastic.net\", \n \"ols\", \n \"mcp\", \"scad\", \n \"mcp.net\", \"scad.net\",\n \"grp.lasso\", \"grp.lasso.net\",\n \"grp.mcp\", \"grp.scad\",\n \"sparse.grp.lasso\"), \n groups = rep(1:20, each = 5)) \n \nmax(abs(fit$beta[[1]][-1,] - fit.xtx$beta[[1]]))\nmax(abs(fit$beta[[2]][-1,] - fit.xtx$beta[[2]])) \n\nlayout(matrix(1:2, ncol = 2))\nplot(fit.xtx)\nplot(fit.xtx, which.model = 2)\n\n\n\n"} {"package":"oem","topic":"plot.oem","snippet":"### Name: plot.oem\n### Title: Plot method for Orthogonalizing EM fitted objects\n### Aliases: plot.oem plot.cv.oem plot.xval.oem\n\n### ** Examples\n\nset.seed(123)\nn.obs <- 1e4\nn.vars <- 100\nn.obs.test <- 1e3\n\ntrue.beta <- c(runif(15, -0.5, 0.5), rep(0, n.vars - 15))\n\nx <- matrix(rnorm(n.obs * n.vars), n.obs, n.vars)\ny <- rnorm(n.obs, sd = 3) + x %*% true.beta\n\nfit <- oem(x = x, y = y, penalty = c(\"lasso\", \"grp.lasso\"), groups = rep(1:10, each = 10))\n\nlayout(matrix(1:2, ncol = 2))\nplot(fit, which.model = 1)\nplot(fit, which.model = 2)\n\nset.seed(123)\nn.obs <- 1e4\nn.vars <- 100\nn.obs.test <- 1e3\n\ntrue.beta <- c(runif(15, -0.5, 0.5), rep(0, n.vars - 15))\n\nx <- matrix(rnorm(n.obs * n.vars), n.obs, n.vars)\ny <- rnorm(n.obs, sd = 3) + x %*% true.beta\n\nfit <- cv.oem(x = x, y = y, penalty = c(\"lasso\", \"grp.lasso\"), groups = rep(1:10, each = 10))\n\nlayout(matrix(1:2, ncol = 2))\nplot(fit, which.model = 1)\nplot(fit, which.model = \"grp.lasso\")\n\nset.seed(123)\nn.obs <- 1e4\nn.vars <- 100\nn.obs.test <- 1e3\n\ntrue.beta <- c(runif(15, -0.5, 0.5), rep(0, n.vars - 15))\n\nx <- matrix(rnorm(n.obs * n.vars), n.obs, n.vars)\ny <- rnorm(n.obs, sd = 3) + x %*% true.beta\n\nfit <- xval.oem(x = x, y = y, penalty = c(\"lasso\", \"grp.lasso\"), groups = rep(1:10, each = 10))\n\nlayout(matrix(1:4, ncol = 2))\nplot(fit, which.model = 1)\nplot(fit, which.model = 2)\n\nplot(fit, which.model = 1, type = \"coef\")\nplot(fit, which.model = 2, type = \"coef\")\n\n\n\n"} {"package":"oem","topic":"predict.cv.oem","snippet":"### Name: predict.cv.oem\n### Title: Prediction function for fitted cross validation oem objects\n### Aliases: predict.cv.oem\n\n### ** Examples\n\nset.seed(123)\nn.obs <- 1e4\nn.vars <- 100\nn.obs.test <- 1e3\n\ntrue.beta <- c(runif(15, -0.5, 0.5), rep(0, n.vars - 15))\n\nx <- matrix(rnorm(n.obs * n.vars), n.obs, n.vars)\ny <- rnorm(n.obs, sd = 3) + x %*% true.beta\nx.test <- matrix(rnorm(n.obs.test * n.vars), n.obs.test, n.vars)\ny.test <- rnorm(n.obs.test, sd = 3) + x.test %*% true.beta\n\nfit <- cv.oem(x = x, y = y, \n penalty = c(\"lasso\", \"grp.lasso\"), \n groups = rep(1:10, each = 10), \n nlambda = 10)\n\npreds.best <- predict(fit, newx = x.test, type = \"response\", which.model = \"best.model\")\n\napply(preds.best, 2, function(x) mean((y.test - x) ^ 2))\n\npreds.gl <- predict(fit, newx = x.test, type = \"response\", which.model = \"grp.lasso\")\n\napply(preds.gl, 2, function(x) mean((y.test - x) ^ 2))\n\npreds.l <- predict(fit, newx = x.test, type = \"response\", which.model = 1)\n\napply(preds.l, 2, function(x) mean((y.test - x) ^ 2))\n\n\n"} {"package":"oem","topic":"predict.oem","snippet":"### Name: predict.oem\n### Title: Prediction method for Orthogonalizing EM fitted objects\n### Aliases: predict.oem\n\n### ** Examples\n\nset.seed(123)\nn.obs <- 1e4\nn.vars <- 100\nn.obs.test <- 1e3\n\ntrue.beta <- c(runif(15, -0.5, 0.5), rep(0, n.vars - 15))\n\nx <- matrix(rnorm(n.obs * n.vars), n.obs, n.vars)\ny <- rnorm(n.obs, sd = 3) + x %*% true.beta\nx.test <- matrix(rnorm(n.obs.test * n.vars), n.obs.test, n.vars)\ny.test <- rnorm(n.obs.test, sd = 3) + x.test %*% true.beta\n\nfit <- oem(x = x, y = y, \n penalty = c(\"lasso\", \"grp.lasso\"), \n groups = rep(1:10, each = 10), \n nlambda = 10)\n\npreds.lasso <- predict(fit, newx = x.test, type = \"response\", which.model = 1)\npreds.grp.lasso <- predict(fit, newx = x.test, type = \"response\", which.model = 2)\n\napply(preds.lasso, 2, function(x) mean((y.test - x) ^ 2))\napply(preds.grp.lasso, 2, function(x) mean((y.test - x) ^ 2))\n\n\n\n"} {"package":"oem","topic":"predict.xval.oem","snippet":"### Name: predict.xval.oem\n### Title: Prediction function for fitted cross validation oem objects\n### Aliases: predict.xval.oem\n\n### ** Examples\n\nset.seed(123)\nn.obs <- 1e4\nn.vars <- 100\nn.obs.test <- 1e3\n\ntrue.beta <- c(runif(15, -0.5, 0.5), rep(0, n.vars - 15))\n\nx <- matrix(rnorm(n.obs * n.vars), n.obs, n.vars)\ny <- rnorm(n.obs, sd = 3) + x %*% true.beta\nx.test <- matrix(rnorm(n.obs.test * n.vars), n.obs.test, n.vars)\ny.test <- rnorm(n.obs.test, sd = 3) + x.test %*% true.beta\n\nfit <- xval.oem(x = x, y = y, \n penalty = c(\"lasso\", \"grp.lasso\"), \n groups = rep(1:10, each = 10), \n nlambda = 10)\n\npreds.best <- predict(fit, newx = x.test, type = \"response\", which.model = \"best.model\")\n\napply(preds.best, 2, function(x) mean((y.test - x) ^ 2))\n\npreds.gl <- predict(fit, newx = x.test, type = \"response\", which.model = \"grp.lasso\")\n\napply(preds.gl, 2, function(x) mean((y.test - x) ^ 2))\n\npreds.l <- predict(fit, newx = x.test, type = \"response\", which.model = 1)\n\napply(preds.l, 2, function(x) mean((y.test - x) ^ 2))\n\n\n"} {"package":"oem","topic":"xval.oem","snippet":"### Name: xval.oem\n### Title: Fast cross validation for Orthogonalizing EM\n### Aliases: xval.oem\n\n### ** Examples\n\nset.seed(123)\nn.obs <- 1e4\nn.vars <- 100\n\ntrue.beta <- c(runif(15, -0.25, 0.25), rep(0, n.vars - 15))\n\nx <- matrix(rnorm(n.obs * n.vars), n.obs, n.vars)\ny <- rnorm(n.obs, sd = 3) + x %*% true.beta\n\nsystem.time(fit <- oem(x = x, y = y, \n penalty = c(\"lasso\", \"grp.lasso\"), \n groups = rep(1:20, each = 5)))\n \nsystem.time(xfit <- xval.oem(x = x, y = y, \n penalty = c(\"lasso\", \"grp.lasso\"), \n groups = rep(1:20, each = 5)))\n \nsystem.time(xfit2 <- xval.oem(x = x, y = y, \n penalty = c(\"lasso\", \"grp.lasso\",\n \"mcp\", \"scad\", \n \"mcp.net\", \"scad.net\",\n \"grp.lasso\", \"grp.lasso.net\",\n \"grp.mcp\", \"grp.scad\",\n \"sparse.grp.lasso\"), \n groups = rep(1:20, each = 5)))\n\n\n\n"} {"package":"BayesianMediationA","topic":"bma.bx.cy","snippet":"### Name: bma.bx.cy\n### Title: Bayesian Mediation Analysis\n### Aliases: bma.bx.cy\n\n### ** Examples\n\ndata(\"weight_behavior\")\n#n.iter and n.burnin are set to be very small, should be adjusted\n#binary predictor\ntest.b.c<- bma.bx.cy(pred=weight_behavior[,3], m=weight_behavior[,c(14,12,13)],\n y=weight_behavior[,1],n.iter=5,n.burnin = 1)\nsummary(test.b.c)\n## No test: \n#categorical predictor\ntest.ca.c<- bma.bx.cy(pred=weight_behavior[,4], m=weight_behavior[,12:14],\n y=weight_behavior[,1],n.iter=5,n.burnin = 1)\nsummary(test.ca.c)\n\n#use covariate for y\ntest.b.c.2<- bma.bx.cy(pred=weight_behavior[,3], m=weight_behavior[,12:14],\n y=weight_behavior[,1],cova=weight_behavior[,2],n.iter=10,n.burnin = 1)\nsummary(test.b.c.2)\n\n#use covariate for mediators\ntest.b.c.3<- bma.bx.cy(pred=weight_behavior[,3], m=weight_behavior[,c(9,12:14)],\n y=weight_behavior[,1],mcov=weight_behavior[,c(2,5)],\n mclist = list(1,2),n.iter=5,n.burnin = 1)\nsummary(test.b.c.3)\n\n#use continuous predictor\ntest.c.c<- bma.bx.cy(pred=weight_behavior[,2], m=weight_behavior[,12:14],\n y=weight_behavior[,1],n.iter=5,n.burnin = 1)\nsummary(test.c.c,method=3)\n\n#use transfered continuous predictor for y\ntest.c.c.2<- bma.bx.cy(pred=weight_behavior[,2], m=weight_behavior[,12:14],\n y=weight_behavior[,1],fpy=list(1,c(\"x\",\"x^2\")),n.iter=5,n.burnin = 1)\nsummary(test.c.c.2,method=1)\n\n#multiple predictors\ntest.m.c<- bma.bx.cy(pred=weight_behavior[,2:4], m=weight_behavior[,12:14],\n y=weight_behavior[,1],n.iter=10,n.burnin = 1)\nsummary(test.m.c,method=3)\n\n##binary outcome\ntest.m.b<- bma.bx.cy(pred=weight_behavior[,2:4], m=weight_behavior[,12:14],\n y=weight_behavior[,15],cova=weight_behavior[,5],n.iter=5,n.burnin = 1)\nsummary(test.m.b,method=2)\n\n##time-to-event outcome\n#use a simulation\nset.seed(1)\nN=100\n\nalpha=0.5\nx=rnorm(N,0,1)\nx=ifelse(x>0,1,0)\ne1=rnorm(N,0,1)\nM=alpha*x+e1\nlambda=0.01\nrho=1\nbeta=1.2\nc=-1\nrateC=0.001\nv=runif(n=N)\nTlat =(- log(v) / (lambda * exp(c*x+M*beta)))^(1 / rho)\nC=rexp(n=N, rate=rateC)\ntime=pmin(Tlat, C)\nstatus <- as.numeric(Tlat <= C)\n\ntest.m.t.1<- bma.bx.cy(pred=x, m=M,y=Surv(time,status),inits=function(){\n list(r=1,lambda=0.01)},n.iter=10,n.burnin = 1)\ntemp1=summary(test.m.t.1)\nprint(temp1,method=1,RE=FALSE)\n\n##categorical outcome\ntest.m.c<- bma.bx.cy(pred=weight_behavior[,2:4], m=weight_behavior[,12:13],\n y=weight_behavior[,14],cova=weight_behavior[,5],n.iter=5,n.burnin = 1)\nsummary(test.m.c,method=3)\n## End(No test)\n\n\n"} {"package":"BayesianMediationA","topic":"print.summary.bma","snippet":"### Name: print.summary.bma\n### Title: Print the results from the summary function.\n### Aliases: print.summary.bma\n\n### ** Examples\n\n# see example at bma.bx.cy.\n\n\n"} {"package":"BayesianMediationA","topic":"summary.bma.bx.cy","snippet":"### Name: summary.bma.bx.cy\n### Title: Summary function for the bma.bx.cy object\n### Aliases: summary.bma.bx.cy\n\n### ** Examples\n\n# See examples at bma.bx.cy.\n\n\n"} {"package":"BayesianMediationA","topic":"weight_behavior","snippet":"### Name: weight_behavior\n### Title: Weight_Behavior Data Set\n### Aliases: weight_behavior\n### Keywords: Datasets\n\n### ** Examples\n\ndata(weight_behavior)\nnames(weight_behavior)\n\n\n"} {"package":"condir","topic":"csCompare","snippet":"### Name: csCompare\n### Title: Statistically compare CRs towards two CSs\n### Aliases: csCompare\n\n### ** Examples\n\nset.seed(1000)\ncsCompare(cs1 = rnorm(n = 100, mean = 10), cs2 = rnorm(n = 100, mean = 9))\n\n\n"} {"package":"condir","topic":"csPlot","snippet":"### Name: csPlot\n### Title: Plot CRs for each CS\n### Aliases: csPlot\n\n### ** Examples\n\nset.seed(1000)\ncsPlot(cs1 = rnorm(n = 100, mean = 10), cs2 = rnorm(n = 100, mean = 9))\n\n\n"} {"package":"condir","topic":"csReport","snippet":"### Name: csReport\n### Title: Report results of conditioning data\n### Aliases: csReport\n\n### ** Examples\n\nset.seed(1000)\ntmp <- csCompare(cs1 = rnorm(n = 100, mean = 10),\ncs2 = rnorm(n = 100, mean = 9))\ncsReport(tmp)\n\n\n"} {"package":"condir","topic":"csRobustnessPlot","snippet":"### Name: csRobustnessPlot\n### Title: Plot robustness results\n### Aliases: csRobustnessPlot\n\n### ** Examples\n\nset.seed(1000)\ncsRobustnessPlot(cs1 = rnorm(n = 100, mean = 10),\ncs2 = rnorm(n = 100, mean = 9))\n\n\n"} {"package":"condir","topic":"csSensitivity","snippet":"### Name: csSensitivity\n### Title: Sensitivity analysis for the Bayes Factors of 'csCompare'\n### results\n### Aliases: csSensitivity\n\n### ** Examples\n\nset.seed(1000)\ncsSensitivity(cs1 = rnorm(n = 100, mean = 10),\ncs2 = rnorm(n = 100, mean = 9))\n\n\n"} {"package":"condir","topic":"csTable","snippet":"### Name: csTable\n### Title: Produce tables of 'csCompare' results\n### Aliases: csTable\n\n### ** Examples\n\ntmp <- csCompare(cs1 = c(1, 2, 3, 1, 4), cs2 = c(10, 12, 12, 31, 13))\ncsTable(tmp)\n\n\n"} {"package":"impactflu","topic":"generate_counts","snippet":"### Name: generate_counts\n### Title: Generate normal counts\n### Aliases: generate_counts\n\n### ** Examples\n\n# Tokars (2018) vaccinations\nvacs_tok <- generate_counts(1e6, 304, 0.55, 100, 50)\n# Tokars (2018) cases\ncasen_tok <- generate_counts(1e6, 304, 0.12, 190, 35)\n\n\n"} {"package":"impactflu","topic":"generate_dates","snippet":"### Name: generate_dates\n### Title: Generate dates\n### Aliases: generate_dates\n\n### ** Examples\n\n# Dates from Tokars (2018)\ntimepoints <- 1L:304L\ndates <- generate_dates(timepoints, lubridate::ymd(\"2017-08-01\"), \"day\")\n\n\n"} {"package":"impactflu","topic":"method1","snippet":"### Name: method1\n### Title: Analysis methods from Tokars (2018)\n### Aliases: method1 method3\n\n### ** Examples\n\nlibrary(dplyr)\n\n# Simulate a population\nnsam <- 1e6L\nndays <- 304L\npop_tok <- sim_reference(\n init_pop_size = nsam,\n vaccinations = generate_counts(nsam, ndays, 0.55, mean = 100, sd = 50),\n cases_novac = generate_counts(nsam, ndays, 0.12, mean = 190, sd = 35),\n ve = 0.48,\n lag = 14,\n deterministic = TRUE\n)\n\n# Summarise by month\npop_tok_month <- pop_tok %>%\n mutate(\n datestamp = generate_dates(\n timepoint, lubridate::ymd(\"2017-08-01\"), \"day\"\n ),\n year = lubridate::year(datestamp),\n month = lubridate::month(datestamp)\n ) %>%\n group_by(year, month) %>%\n summarise(\n vaccinations = sum(vaccinations), cases = sum(cases), ve = mean(ve)\n ) %>%\n ungroup()\n\n# Estimate averted cases using the two different methods\nm1 <- method1(\n nsam, pop_tok_month$vaccinations, pop_tok_month$cases, pop_tok_month$ve\n)\nm3 <- method3(\n nsam, pop_tok_month$vaccinations, pop_tok_month$cases, pop_tok_month$ve\n)\nsum(m1$avert)\nsum(m3$avert)\n\n\n"} {"package":"impactflu","topic":"sim_reference","snippet":"### Name: sim_reference\n### Title: Simulate an ideal population\n### Aliases: sim_reference\n\n### ** Examples\n\n# Population from Tokars (2018)\nnsam <- 1e6L\nndays <- 304L\npop_tok <- sim_reference(\n init_pop_size = nsam,\n vaccinations = generate_counts(nsam, ndays, 0.55, mean = 100, sd = 50),\n cases_novac = generate_counts(nsam, ndays, 0.12, mean = 190, sd = 35),\n ve = 0.48,\n lag = 14,\n deterministic = TRUE\n)\nhead(pop_tok)\nsum(pop_tok$avert)\n\n\n"} {"package":"tsfknn","topic":"autoplot.knnForecast","snippet":"### Name: autoplot.knnForecast\n### Title: Create a ggplot object from a knnForecast object\n### Aliases: autoplot.knnForecast\n\n### ** Examples\n\npred <- knn_forecasting(USAccDeaths, h = 12, lags = 1:12, k = 2)\nlibrary(ggplot2)\nautoplot(pred)\nautoplot(pred, highlight = \"neighbors\")\n\n\n"} {"package":"tsfknn","topic":"knn_examples","snippet":"### Name: knn_examples\n### Title: Examples of the model associated with a prediction\n### Aliases: knn_examples\n\n### ** Examples\n\npred <- knn_forecasting(ts(1:8), h = 1, lags = 1:2, k = 2)\nknn_examples(pred)\n\n\n"} {"package":"tsfknn","topic":"knn_forecasting","snippet":"### Name: knn_forecasting\n### Title: Time series forecasting using KNN regression\n### Aliases: knn_forecasting\n\n### ** Examples\n\npred <- knn_forecasting(USAccDeaths, h = 12, lags = 1:12, k = 2)\npred$prediction # To see a time series with the forecasts\nplot(pred) # To see a plot with the forecast\n\n\n"} {"package":"tsfknn","topic":"n_training_examples","snippet":"### Name: n_training_examples\n### Title: Number of training examples\n### Aliases: n_training_examples\n\n### ** Examples\n\nn_training_examples(ts(1:10), h = 2, lags = 1:3, msas = \"MIMO\")\nn_training_examples(ts(1:10), h = 2, lags = 1:3, msas = \"recursive\")\n\n\n"} {"package":"tsfknn","topic":"nearest_neighbors","snippet":"### Name: nearest_neighbors\n### Title: Nearest neighbors associated with predictions\n### Aliases: nearest_neighbors\n\n### ** Examples\n\npred <- knn_forecasting(UKgas, h = 4, lags = 1:4, k = 2, msas = \"MIMO\")\nnearest_neighbors(pred)\n\n\n"} {"package":"tsfknn","topic":"predict.knnForecast","snippet":"### Name: predict.knnForecast\n### Title: Predict method for KNN models for time series forecasting.\n### Aliases: predict.knnForecast\n\n### ** Examples\n\npred <- knn_forecasting(UKgas, h = 4, k = 1, msas = \"recursive\")\nnew_pred <- predict(pred, h = 6)\nprint(new_pred$prediction)\nplot(new_pred) # To see a plot with the forecast\n\n\n\n"} {"package":"tsfknn","topic":"rolling_origin","snippet":"### Name: rolling_origin\n### Title: Assessing forecasting accuracy with rolling origin\n### Aliases: rolling_origin\n\n### ** Examples\n\npred <- knn_forecasting(UKgas, h = 4, lags = 1:4, k = 2)\nro <- rolling_origin(pred)\nprint(ro$global_accu)\n\n\n"} {"package":"AgreementInterval","topic":"agrInt2alpha","snippet":"### Name: agrInt2alpha\n### Title: agrInt2alpha\n### Aliases: agrInt2alpha\n\n### ** Examples\n\nagrInt2alpha(clin.limit=c(-15, 15), n=52, sigmae=46.09245)\n\n\n"} {"package":"AgreementInterval","topic":"ai","snippet":"### Name: ai\n### Title: ai\n### Aliases: ai\n\n### ** Examples\n\nai(x=1:4, y=c(1, 1, 2, 4))\na <- c(1, 2, 3, 4, 7)\nb <- c(1, 3, 2, 5, 3)\nai(x=a, y=b)\nai(x=IPIA$Tomography, y=IPIA$Urography)\nai(x=IPIA$Tomography, y=IPIA$Urography, clin.limit=c(-15, 15))\n\n\n"} {"package":"AgreementInterval","topic":"aiAdj","snippet":"### Name: aiAdj\n### Title: aiAdj\n### Aliases: aiAdj\n\n### ** Examples\n\nans <- ai(x=IPIA$Tomography, y=IPIA$Urography)\naiAdj(object=ans, x=1)\naiAdj(object=ans, x=c(1, 2))\n\n\n"} {"package":"AgreementInterval","topic":"plot.ai","snippet":"### Name: plot.ai\n### Title: plot.ai\n### Aliases: plot.ai\n\n### ** Examples\n\na <- c(1, 2, 3, 4, 7)\nb <- c(1, 3, 2, 5, 3)\nans <- ai(x=a, y=b)\nplot(x=ans)\nplot(x=ans, clin.limit=c(-5, 5))\n\n\n"} {"package":"AgreementInterval","topic":"summary.ai","snippet":"### Name: summary.ai\n### Title: summary.ai\n### Aliases: summary.ai\n\n### ** Examples\n\na <- c(1, 2, 3, 4, 7)\nb <- c(1, 3, 2, 5, 3)\nans <- ai(x=a, y=b)\nsummary(ans)\n\n\n"} {"package":"AgreementInterval","topic":"tolProb","snippet":"### Name: tolProb\n### Title: tolProb\n### Aliases: tolProb\n\n### ** Examples\n\ntolProb(n=52, k=5, alpha=0.05)\ntolProb(n=52, k=0, alpha=0.05)\n\n\n"} {"package":"R2HTML","topic":"HTML","snippet":"### Name: HTML\n### Title: Outputs an object to a HTML file\n### Aliases: HTML HTML.anova HTML.aov HTML.aovlist HTML.array HTML.atomic\n### HTML.by HTML.call HTML.character HTML.coefmat HTML.complex\n### HTML.connection HTML.default HTML.density HTML.difftime\n### HTML.dummy.coef HTML.dummy.coef.list HTML.environment HTML.factor\n### HTML.family HTML.formula HTML.ftable HTML.glm HTML.glm.null\n### HTML.hsearch HTML.htest HTML.infl HTML.integer HTML.integrate\n### HTML.list HTML.listof HTML.lm HTML.lm.null HTML.logical HTML.logLik\n### HTML.MethodsFunction HTML.mtable HTML.noquote HTML.numeric\n### HTML.octmode HTML.ordered HTML.pairlist HTML.POSIXct HTML.POSIXlt\n### HTML.rle HTML.SavedPlots HTML.simple.list HTML.socket\n### HTML.summary.aov HTML.summary.aovlist HTML.summary.glm\n### HTML.summary.glm.null HTML.summary.lm HTML.summary.lm.null\n### HTML.summary.manova HTML.summary.table HTML.table HTML.tables.aov\n### HTML.terms HTML.ts HTML.TukeyHSD HTML.xtable HTML.xtabs HTML.abbrev\n### HTML.agnes HTML.Anova HTML.anova.loglm HTML.ar HTML.Arima HTML.arima0\n### HTML.boot HTML.bootci HTML.bSpline HTML.clara HTML.correspondence\n### HTML.cox.zph HTML.coxph.null HTML.diana HTML.dissimilarity HTML.dist\n### HTML.ecdf HTML.ellipsoid HTML.factanal HTML.fanny HTML.fitdistr\n### HTML.fractions HTML.gam HTML.gamma.shape HTML.glm.dose HTML.grob\n### HTML.hclust HTML.HoltWinters HTML.lda HTML.libraryIQR HTML.loadings\n### HTML.loess HTML.loglm HTML.lqs HTML.mca HTML.medpolish HTML.mona\n### HTML.multinom HTML.nls HTML.nnet HTML.pairwise.htest HTML.pam\n### HTML.polr HTML.polySpline HTML.power.htest HTML.ppolySpline HTML.ppr\n### HTML.prcomp HTML.princomp HTML.qda HTML.ridgelm HTML.rlm\n### HTML.rms.curv HTML.rpart HTML.saddle.distn HTML.shingle\n### HTML.shingleLevel HTML.simplex HTML.smooth.spline HTML.stepfun\n### HTML.stl HTML.StructTS HTML.structure HTML.summary.agnes\n### HTML.summary.clara HTML.summary.diana HTML.summary.fanny\n### HTML.summary.gam HTML.summary.loess HTML.summary.loglm\n### HTML.summary.mona HTML.summary.multinom HTML.summary.negbin\n### HTML.summary.nls HTML.summary.nnet HTML.summary.pam HTML.summary.polr\n### HTML.summary.ppr HTML.summary.prcomp HTML.summary.princomp\n### HTML.summary.rlm HTML.tskernel HTML.tukeyline HTML.tukeysmooth\n### HTML.unit HTML.viewport HTML.reStruct HTML.summary.lme\n### HTML.summary.pdDiag\n### Keywords: print IO file\n\n### ** Examples\n\ndir.create(file.path(tempdir(),\"R2HTML\"))\ntarget <- HTMLInitFile(file.path(tempdir(),\"R2HTML\"),filename=\"sample\", BackGroundColor=\"#BBBBEE\")\nHTML(\"
Don't forget to use the CSS file in order to benefit from fixed-width font\",file=target)\ntmp <- as.data.frame(matrix(rnorm(100),ncol=10))\nHTML(tmp,file=target)\nHTMLEndFile()\n\n\n"} {"package":"R2HTML","topic":"HTML.cormat","snippet":"### Name: HTML.cormat\n### Title: Write a correlation matrix with HTML formatting\n### Aliases: HTML.cormat\n### Keywords: IO multivariate\n\n### ** Examples\n\n\ttmpfic=HTMLInitFile(tempdir(),CSSFile=\"http://www.stat.ucl.ac.be/R2HTML/Pastel.css\")\n\tdata(iris)\n\tHTML(as.title(\"Fisher Iris dataset / Correlation matrix - normal matrix\"),\n\t file=tmpfic)\n\tHTML(cor(iris[,1:4]), file=tmpfic)\n\tHTML(as.title(\"Fisher Iris dataset / Correlation matrix - traffic highlighting\"),\n\t file=tmpfic)\n\tHTML.cormat(cor(iris[,1:4]), file=tmpfic)\n\n\t# File is generated, you can call the browser:\n\t## Not run: browseURL(tmpfic)\n\t\n\n\n"} {"package":"R2HTML","topic":"HTML.data.frame","snippet":"### Name: HTML.data.frame\n### Title: Write a data.frame (or matrix) to a HTML output\n### Aliases: HTML.data.frame HTML.matrix\n### Keywords: datasets IO\n\n### ** Examples\n\ntmpfic=HTMLInitFile(tempdir(),CSSFile=system.file(\"samples\", \"R2HTML.css\", package=\"R2HTML\"))\ndata(iris)\nHTML(as.title(\"Fisher Iris dataset\"),file=tmpfic)\nHTML(iris, file=tmpfic)\n# File is generated, you can call the browser:\n## Not run: browseURL(tmpfic)\n\n# Export one line of iris using default decimal separator\nHTML(iris[1,],file=\"\")\n\n# Seeing default decimal separator:\ngetOption(\"R2HTML.format.decimal.mark\")\n\n# Modifying it:\noptions(\"R2HTML.format.decimal.mark\"=\",\")\nHTML(iris[1,],file=\"\")\n\n# Bypassing value set in option:\nHTML(iris[1,],file=\"\",decimal.mark=\"*\")\n\n# Using a vector for formatting options\nHTML(iris[1:2,1:2],nsmall=c(3,1),file=\"\")\n\n\n"} {"package":"R2HTML","topic":"HTML.function","snippet":"### Name: HTML.function\n### Title: Writes the code of a function to a target HTML file\n### Aliases: HTML.function\n### Keywords: print IO file\n\n### ** Examples\n\n\n## Define a function and export it's code to the file /test.html.\n## Target file may be changed when submitting this code...\n\nmyfile <- paste(tempfile(),\".html\",sep=\"\")\nmyfun <- function(x){\n\tcat(\"\\n Euclidian norm\")\n\treturn(sqrt(sum(x^2)))\n }\nHTML(myfun,file=myfile)\ncat(\"\\n Test output written in: \",myfile)\n\n\n\n"} {"package":"R2HTML","topic":"HTML.latex","snippet":"### Name: HTML.latex\n### Title: Insert a piece of LaTeX into a HTML file\n### Aliases: HTML.latex as.latex\n### Keywords: print IO file\n\n### ** Examples\n\n## Not run: \n##D \t fic = HTMLInitFile()\n##D \t HTML.title(\"sample page\",1,file=fic) \n##D \t HTML(\"First paragraph\",file=fic)\n##D \t cat(\"Some text and then an equation:\",file=fic,append=TRUE)\n##D \t HTML(as.latex(\"\\int_{-\\infty}^{1}f(x)dx\") ,file=fic)\n##D \t cat(\". Nice isn't it?\",file=fic,append=TRUE)\n##D \t HTML(as.latex(\"\\int_{-\\infty}^{1}f(x)dx\",inline=FALSE) ,file=fic) \n##D \t HTML(as.latex(\"\\int_{-\\infty}^{1}f(x)dx\",inline=FALSE,count=TRUE) ,file=fic) \n##D \t HTML(as.latex(\"\\int_{-\\infty}^{1}f(x)dx\",inline=FALSE,label=\"My equation\") ,file=fic) \n##D \t cat(\"file:\", fic, \"is created\")\n##D \t browseURL(fic)\n## End(Not run)\t\t \n\n\n"} {"package":"R2HTML","topic":"HTML.title","snippet":"### Name: HTML.title\n### Title: Writes a title in a target HTML output\n### Aliases: HTML.title as.title\n### Keywords: print IO file\n\n### ** Examples\n\n## Write a title in the file /test.html.\n## Target file may be changed when submitting this code...\n\nmyfile <- paste(tempfile(),\".html\",sep=\"\")\n\ntit1 <- as.title(\"This is method 1\")\n\nHTML(tit1, file=myfile)\n\nHTML.title(\"This is method 2\",file=myfile, HR=3)\ncat(\"\\n Test output written in: \",myfile)\n\n\n\n\n"} {"package":"R2HTML","topic":"HTML2clip","snippet":"### Name: HTM2clip\n### Title: Wrapper around HTML() to save output to the clipboard\n### Aliases: HTML2clip\n### Keywords: print IO file\n\n### ** Examples\n\nif (.Platform$OS == \"windows\")\n\tHTML2clip(summary(lm(rating ~., attitude)))\n\n\n"} {"package":"R2HTML","topic":"HTMLCSS","snippet":"### Name: HTMLCSS\n### Title: Insert HTML code to refer to an external CSS file\n### Aliases: HTMLCSS\n### Keywords: print IO file\n\n### ** Examples\n\nmyfile <- file.path(tempdir(),\"tmp.html\")\nHTMLCSS(myfile,CSSfile=\"myownCSS.CSS\")\n\n\n"} {"package":"R2HTML","topic":"HTMLChangeCSS","snippet":"### Name: HTMLChangeCSS\n### Title: Change the current CSS file for dynamic use of package\n### Aliases: HTMLChangeCSS\n### Keywords: IO file\n\n### ** Examples\n\n## Not run: \n##D \tHTMLStart()\n##D \t(x=diag(3))\n##D \tHTMLChangeCSS(\"Pastel\")\n##D \t# refresh the browser\n## End(Not run)\n\n\n"} {"package":"R2HTML","topic":"HTMLInitFile","snippet":"### Name: HTMLInitFile\n### Title: Begins / Ends a new HTML report output\n### Aliases: HTMLInitFile HTMLEndFile HTMLSetFile HTMLGetFile\n### Keywords: print IO file\n\n### ** Examples\n\n\t# Store in target the name of a output file\n\tdir.create(file.path(tempdir(),\"R2HTML\"))\n\ttarget <- HTMLInitFile(file.path(tempdir(),\"R2HTML\"),\"index\", BackGroundColor=\"#BBBBEE\")\n\t# Use target to write a dataframe\n\tHTML(as.title(\"Here is the data frame\"),file=target)\n\tHTML(\"
Don't forget to use the CSS file in order to benefit from fixed size police\",\n\t file=target)\n\ttmp <- as.data.frame(matrix(rnorm(100),ncol=10))\n\tHTML(tmp,file=target)\n\tHTMLEndFile()\n\n\n"} {"package":"R2HTML","topic":"HTMLInsertGraph","snippet":"### Name: HTMLInsertGraph\n### Title: Insert a graph in a HTML report\n### Aliases: HTMLInsertGraph\n### Keywords: print IO file\n\n### ** Examples\n\n\n\tdirectory=tempdir()\n\tHTMLoutput=file.path(directory,\"output.html\")\n\tgraph1=\"graph1.png\"\n\t# Write graph to a file\n\t## Not run: png(file.path(directory,graph1))\n\t## Not run: plot(table(rpois(100,5)), type = \"h\", col = \"red\", lwd=10,main=\"rpois(100,lambda=5)\")\n\t## Not run: dev.off()\n\t# Insert graph to the HTML output\n\tHTMLInsertGraph(graph1,file=HTMLoutput,caption=\"Sample discrete distribution plot\")\n\n\n"} {"package":"R2HTML","topic":"HTMLStart","snippet":"### Name: HTMLStart\n### Title: Start / Stop the automatic redirection of output to HTML files\n### Aliases: HTMLStart HTMLStop\n### Keywords: print IO file\n\n### ** Examples\n\n\n# Perform's one's own direct report\n\ndir.create(file.path(tempdir(),\"R2HTML\"))\nHTMLStart(file.path(tempdir(),\"R2HTML\"),HTMLframe=FALSE, Title=\"My report\",autobrowse=FALSE)\nas.title(\"This is my first title\")\nx <- 1\ny<- 2\nx+y\nHTMLStop()\n\n\n## Use for interactive teaching course\nif (interactive()){\n\tdir.create(file.path(tempdir(),\"R2HTML\"))\n\tHTMLStart(file.path(tempdir(),\"R2HTML\"),echo=TRUE)\n\tas.title(\"Manipulation vectors\")\n\t1:10\n\tsum(1:10)\n\tc(1:10,rep(3,4))\n\tHTMLStop()\n}\n\n\n"} {"package":"R2HTML","topic":"HTMLbr","snippet":"### Name: HTMLbr\n### Title: Facility functions to write HTML code\n### Aliases: HTMLbr HTMLhr HTMLli\n### Keywords: print IO file\n\n### ** Examples\n\n\n## Insert a line to a HTML file\n## Change the path/name of the file to redirect to your test file\n\nmyfile <- paste(tempfile(),\".html\",sep=\"\")\nHTMLhr(file=myfile)\ncat(\"\\n Test output written in: \",myfile)\n\n\n\n"} {"package":"R2HTML","topic":"HTMLgrid","snippet":"### Name: HTMLgrid\n### Title: Creates a HTML grid using ActiveWidget grid -\n### www.activewidgets.com\n### Aliases: HTMLgrid HTMLgrid_inline HTMLgrid_references HTMLgrid_summary\n### Keywords: datasets IO\n\n### ** Examples\n\n data(iris)\n fic <- HTMLInitFile(useGrid=TRUE,useLaTeX=FALSE)\n fic <- HTMLgrid_inline(iris,file=fic)\n cat(\"\\n Browse file 'fic':\",fic)\n ## Not run: browseURL(fic)\n\n\n"} {"package":"R2HTML","topic":"HTMLplot","snippet":"### Name: HTMLplot\n### Title: Insert a graphic into an HTML output\n### Aliases: HTMLplot\n### Keywords: print IO file\n\n### ** Examples\n\n\n## Plots a graphic and insert it into the file /test.html.\n## Target file and also graph directory should be changed when submitting this code...\n\nmyfile <- paste(tempfile(),\".html\",sep=\"\")\nplot(sin, -pi, 2*pi,main=\"Sinus\")\n# HTMLplot(file=myfile,GraphDirectory=\"/\",Caption=\"Look at this curve!\")\n\n\n"} {"package":"R2HTML","topic":"HTMLstem","snippet":"### Name: HTMLstem\n### Title: Insert a stem-and-leaf plot in the HTML output\n### Aliases: HTMLstem\n### Keywords: IO univar\n\n### ** Examples\n\n\tdata(islands)\n\ttmpfic=paste(tempfile(),\"html\",sep=\".\")\n\tHTMLstem(log10(islands),tmpfic)\n\tcat(\"\\n stem-and-leaf writen to:\", tmpfic,\"\\n\")\n\n\n"} {"package":"R2HTML","topic":"RweaveHTML","snippet":"### Name: RweaveHTML\n### Title: A driver to parse HTML noweb files with Sweave tool\n### Aliases: RweaveHTML RweaveHTMLOptions RweaveHTMLFinish\n### RweaveHTMLWritedoc RweaveHTMLSetup RweaveHTMLRuncode SweaveSyntaxHTML\n### Keywords: IO file\n\n### ** Examples\n\n\n## Not run: \n##D library(tools)\n##D Sweave(\"file.snw\",driver=RweaveHTML)\n## End(Not run)\n\n\n"} {"package":"DOS","topic":"addalmostexact","snippet":"### Name: addalmostexact\n### Title: Use a Penalty to Obtain a Near Exact Match\n### Aliases: addalmostexact\n\n### ** Examples\n\ndata(costa)\nz<-1*(costa$welder==\"Y\")\naa<-1*(costa$race==\"A\")\nsmoker=1*(costa$smoker==\"Y\")\nage<-costa$age\nx<-cbind(age,aa,smoker)\ndmat<-mahal(z,x)\n# Mahalanobis distances\nround(dmat[,1:6],2)\n# Mahalanobis distanced penalized for mismatching on smoking.\ndmat<-addalmostexact(dmat, z, smoker, mult = 10)\n# The first treated subject (row labeled 27) is a nonsmoker, but the\n# third control (column 3) is a smoker, so there is a big penalty.\nround(dmat[,1:6],2)\n\n\n"} {"package":"DOS","topic":"addcaliper","snippet":"### Name: addcaliper\n### Title: Implement a Caliper Using a Penalty Function in Optimal Matching\n### Aliases: addcaliper\n\n### ** Examples\n\ndata(costa)\nz<-1*(costa$welder==\"Y\")\naa<-1*(costa$race==\"A\")\nsmoker=1*(costa$smoker==\"Y\")\nage<-costa$age\nx<-cbind(age,aa,smoker)\ndmat<-mahal(z,x)\n# Mahalanobis distances\nround(dmat[,1:6],2) # Compare with Table 8.5 in Design of Observational Studies (2010)\n# Impose propensity score calipers\nprop<-glm(z~age+aa+smoker,family=binomial)$fitted.values # propensity score\n# Mahalanobis distanced penalized for violations of a propensity score caliper.\n# This version is used for numerical work.\ndmat<-addcaliper(dmat,z,prop,caliper=.5)\nround(dmat[,1:6],2) # Compare with Table 8.5 in Design of Observational Studies (2010)\n## Not run: \n##D # Find the minimum distance match within propensity score calipers.\n##D optmatch::pairmatch(dmat,data=costa)\n## End(Not run)\n# Conceptual versions with infinite distances for violations of propensity caliper.\ndmat[dmat>20]<-Inf\nround(dmat[,1:6],2) # Compare with Table 8.5 in Design of Observational Studies (2010)\n\n\n"} {"package":"DOS","topic":"angristlavy","snippet":"### Name: angristlavy\n### Title: Class Size and Academic Performance - Maimonidies Rule\n### Aliases: angristlavy\n### Keywords: datasets\n\n### ** Examples\n\n# Figure 1.1 in Chapter 1 of Design of Observational Studies (2010)\ndata(angristlavy)\nattach(angristlavy)\ngrp<-factor(z,levels=c(1,0),labels=c(\"31-40\",\"41-50\"),ordered=TRUE)\npar(mfrow=c(2,2))\nboxplot(tipuach~grp,main=\"Disadvantaged\",ylab=\"Percent\")\nboxplot(clasz~grp,main=\"Class Size\",ylab=\"Students\")\nboxplot(avgmath~grp,main=\"Math\",ylab=\"Average Score\")\nboxplot(avgverb~grp,main=\"Verbal\",ylab=\"Average Score\")\ndetach(angristlavy)\n\n\n"} {"package":"DOS","topic":"costa","snippet":"### Name: costa\n### Title: Welding and DNA-Protein Crosslinks\n### Aliases: costa\n### Keywords: datasets\n\n### ** Examples\n\ndata(costa)\nboxplot(costa$dpc~costa$welder,\n xlab=\"Control (N) or Welder (Y)\",\n ylab=\"DNA-Protein Cross-links Percent\")\n\n\n"} {"package":"DOS","topic":"dynarski","snippet":"### Name: dynarski\n### Title: A Natural Experiment Concerning Subsidized College Education\n### Aliases: dynarski\n### Keywords: datasets\n\n### ** Examples\n\n#\ndata(dynarski)\n# Table 13.1 of Rosenbaum (2010)\nhead(dynarski)\n# Table 13.2 of Rosenbaum (2010)\nzb<-dynarski$zb\nzbf<-factor(zb,levels=c(1,0),labels=c(\"Father deceased\",\"Father not deceased\"))\ntable(zbf)\nXb<-dynarski[,3:10]\n\n# Estimate the propensity score, Rosenbaum (2010, Section 13.3)\np<-glm(zb~Xb$faminc+Xb$incmiss+Xb$black+Xb$hisp\n +Xb$afqtpct+Xb$edmissm+Xb$edm+Xb$female,\n family=binomial)$fitted.values\n# Figure 13.1 in Rosenbaum (2010)\nboxplot(p~zbf,ylab=\"Propensity score\",main=\"1979-1981 Cohort\")\n\n# Read about missing covariate values in section 13.4 of Rosenbaum (2010)\n\n# Robust Mahalanobis distance matrix, treated x control\ndmat<-smahal(zb,Xb)\ndim(dmat)\n# Table 13.3 in Rosenbaum (2010)\nround(dmat[1:5,1:5],2)\n\n# Add a caliper on the propensity score using a penalty function\ndmat<-addcaliper(dmat,zb,p,caliper=.2)\ndim(dmat)\n# Table 13.4 in Rosenbaum (2010)\nround(dmat[1:5,1:5],2)\n## Not run: \n##D # YOU MUST LOAD the optmatch package and accept its license to continue.\n##D # Note that the optmatch package has changed since 2010. It now suggests\n##D # that you indicate the data explicitly as data=dynarski.\n##D \n##D # Creating a 1-to-10 match, as in section 13.6 of Rosenbaum (2010)\n##D # This may take a few minutes.\n##D m<-fullmatch(dmat,data=dynarski,min.controls = 10,max.controls = 10,omit.fraction = 1379/2689)\n##D length(m)\n##D sum(matched(m))\n##D 1441/11 # There are 131 matched sets, 1 treated, 10 controls\n##D \n##D # Alternative, simpler code to do the same thing\n##D m2<-pairmatch(dmat,controls=10,data=dynarski)\n##D # Results are the same:\n##D sum(m[matched(m)]!=m2[matched(m2)])\n##D \n##D # Housekeeping\n##D im<-as.integer(m)\n##D dynarski<-cbind(dynarski,im)\n##D dm<-dynarski[matched(m),]\n##D dm<-dm[order(dm$im,1-dm$zb),]\n##D \n##D # Table 13.5 in Rosenbaum (2010)\n##D which(dm$id==10)\n##D dm[188:198,]\n##D which(dm$id==396)\n##D dm[23:33,]\n##D which(dm$id==3051)\n##D dm[1068:1078,]\n##D # In principle, there can be a tie, in which several different\n##D # matched samples all minimize the total distance. On my\n##D # computer, this calculation reproduces Table 13.5, but were\n##D # there a tie, optmatch should return one of the tied optimal\n##D # matches, but not any particular one.\n## End(Not run)\n\n\n"} {"package":"DOS","topic":"fine","snippet":"### Name: fine\n### Title: Expand a Distance Matrix for Matching with Fine Balance.\n### Aliases: fine\n\n### ** Examples\n\ndata(costa)\nz<-1*(costa$welder==\"Y\")\naa<-1*(costa$race==\"A\")\nsmoker=1*(costa$smoker==\"Y\")\nage<-costa$age\nx<-cbind(age,aa,smoker)\ndmat<-mahal(z,x)\n# Mahalanobis distances\nround(dmat[,1:6],2) # Compare with Table 8.5 in Design of Observational Studies (2010)\n# Impose propensity score calipers\nprop<-glm(z~age+aa+smoker,family=binomial)$fitted.values # propensity score\n# Mahalanobis distanced penalized for violations of a propensity score caliper.\n# This version is used for numerical work.\ndmat<-addcaliper(dmat,z,prop,caliper=.5)\nround(dmat[,1:6],2) # Compare with Table 8.5 in Design of Observational Studies (2010)\n# Because dmat already contains large penalties, we set mult=1.\ndmat<-fine(dmat,z,aa,mult=1)\ndmat[,1:6] # Compare with Table 10.1 in Design of Observational Studies (2010)\ndim(dmat) # dmat has been expanded to be square by adding 5 extras, here numbered 48:52\n# Any control matched to an extra is discarded.\n## Not run: \n##D # Find the minimum distance match within propensity score calipers.\n##D optmatch::pairmatch(dmat)\n##D # Any control matched to an extra is discarded. For instance, the optimal match paired\n##D # extra row 48 with the real control in column 7 to form matched set 1.22, so that control\n##D # is not part of the matched sample. The harmless warning message from pairmatch\n##D # reflects the divergence between the costa data.frame and expanded distance matrix.\n## End(Not run)\n# Conceptual versions with infinite distances for violations of propensity caliper.\ndmat[dmat>20]<-Inf\nround(dmat[,1:6],2) # Compare with Table 10.1 in Design of Observational Studies (2010)\n\n\n"} {"package":"DOS","topic":"lead","snippet":"### Name: lead\n### Title: Lead in Children\n### Aliases: lead\n### Keywords: datasets\n\n### ** Examples\n\ndata(lead)\n# Reproduces parts of Table 2 in Rosenbaum (2011)\nsenU(lead$dif,gamma=5.8,m=8,m1=5,m2=8)\nsenU(lead$dif,gamma=5,m=5,m1=4,m2=5)\n\n# m=2, m1=2, m2=2 is the U-statistic that closely\n# resembles Wilcoxon's signed rank test. Note\n# that the results are almost the same.\nsenWilcox(lead$dif,gamma=5) # In Table 2\nsenU(lead$dif,gamma=5,m=2,m1=2,m2=2)\n\n\n"} {"package":"DOS","topic":"mahal","snippet":"### Name: mahal\n### Title: Mahalanobis Distance Matrix for Optimal Matching\n### Aliases: mahal\n\n### ** Examples\n\ndata(costa)\nz<-1*(costa$welder==\"Y\")\naa<-1*(costa$race==\"A\")\nsmoker=1*(costa$smoker==\"Y\")\nage<-costa$age\nx<-cbind(age,aa,smoker)\ndmat<-mahal(z,x)\n# Mahalanobis distances\nround(dmat[,1:6],2) # Compare with Table 8.5 in Design of Observational Studies (2010)\n# Impose propensity score calipers\nprop<-glm(z~age+aa+smoker,family=binomial)$fitted.values # propensity score\n# Mahalanobis distanced penalized for violations of a propensity score caliper.\n# This version is used for numerical work.\ndmat<-addcaliper(dmat,z,prop,caliper=.5)\nround(dmat[,1:6],2) # Compare with Table 8.5 in Design of Observational Studies (2010)\n## Not run: \n##D # Find the minimum distance match within propensity score calipers.\n##D optmatch::pairmatch(dmat,data=costa)\n## End(Not run)\n# Conceptual versions with infinite distances for violations of propensity caliper.\ndmat[dmat>20]<-Inf\nround(dmat[,1:6],2) # Compare with Table 8.5 in Design of Observational Studies (2010)\n\n\n"} {"package":"DOS","topic":"pinto","snippet":"### Name: pinto\n### Title: Welding and DNA-Protein Crosslinks\n### Aliases: pinto\n### Keywords: datasets\n\n### ** Examples\n\ndata(pinto)\npar(mfrow=c(1,3))\nattach(pinto)\nboxplot(mn~group,ylim=c(0,6),main=\"All\",ylab=\"Micronuclei\")\nboxplot(mn[!longEx]~group[!longEx],ylim=c(0,6),main=\"Short Ex\",ylab=\"Micronuclei\")\nboxplot(mn[longEx]~group[longEx],ylim=c(0,6),main=\"Long Ex\",ylab=\"Micronuclei\")\n\n# Calculations in Table 5.5 of Design of Observational Studies (2010)\nd<-mn[group==\"painter\"]-mn[group==\"control\"] # 22 pair differences\nsenWilcox(d,gamma=1)\nsenWilcox(d,gamma=2) # sensitive to gamma=2\nsenWilcox(d,gamma=3.3)\ndLong<-d[longEx[group==\"painter\"]] # 12 pairs with long exposure\nsenWilcox(dLong,gamma=3.3) # insensitive to gamma=3.3\n\n\n"} {"package":"DOS","topic":"schoket","snippet":"### Name: schoket\n### Title: DNA Damage in Aluminum Production Workers\n### Aliases: schoket\n### Keywords: datasets\n\n### ** Examples\n\ndata(schoket)\nattach(schoket)\nplot(sort(adductsc),sort(adductsw),ylim=c(0,6.4),xlim=c(0,6.4),\n xlab=\"DNA adducts for controls\",ylab=\"DNA adducts for workers\",\n main=\"Quantile-Quantile Plot\") # Compare with Chapter 16\nabline(0,1) # line of equality\nlegend(4,1,lty=1,\"x=y\")\nboxplot(adductsw,adductsc,ylim=c(0,6.4),ylab=\"DNA adducts\",names=c(\"Worker\",\"Control\"))\nd<-adductsw-adductsc\nsenWilcox(d,gamma=1)\nsenWilcox(d,gamma=1.5) # sensitive to gamma=1.5\n\n\n"} {"package":"DOS","topic":"senU","snippet":"### Name: senU\n### Title: Sensitivity Analysis for a New U Statistic\n### Aliases: senU\n\n### ** Examples\n\ndata(\"schoket\")\nd<-schoket$adductsw-schoket$adductsc\n\n# With the defaults, m=2, m1=2, m2=2, the U-statistic is very\n# similar to Wilcoxon's signed rank statistic\nsenWilcox(d,gamma=1)\nsenU(d,gamma=1)\n\n# With m=1, m1=1, m2=1, the U-statistic is the sign test\nsenU(d,gamma=1,m=1,m1=1,m2=1)\nprop.test(sum(d>0),length(d),p=.5,\n alternative=\"greater\",correct=FALSE)$p.value\n\n# With m=m1=m2, this is the test of Stephenson (1981) whose ranks are similar to\n# those of Conover and Salzburg (1988); see Rosenbaum (2007).\n\n# The calculations that follow reproduce the sensitivity analysis for the\n# data of Schoket et al. () in Chapter 16 of Desgin of Observational Studies (2010).\nsenU(d,gamma=1,m=2,m1=2,m2=2)\nsenU(d,gamma=1,m=5,m1=5,m2=5)\n\nsenU(d,gamma=1.5,m=2,m1=2,m2=2)\nsenU(d,gamma=1.5,m=5,m1=5,m2=5)\n\nsenU(d,gamma=1.8,m=2,m1=2,m2=2)\nsenU(d,gamma=1.8,m=5,m1=5,m2=5)\n\nsenU(d,gamma=2,m=2,m1=2,m2=2)\nsenU(d,gamma=2,m=5,m1=5,m2=5)\n\ndata(lead)\n# Reproduces parts of Table 2 in Rosenbaum (2011)\nsenU(lead$dif,gamma=5.8,m=8,m1=5,m2=8)\nsenU(lead$dif,gamma=5,m=5,m1=4,m2=5)\n\n# m=2, m1=2, m2=2 is the U-statistic that closely\n# resembles Wilcoxon's signed rank test. Note\n# that the results are almost the same.\nsenWilcox(lead$dif,gamma=5) # In Table 2\nsenU(lead$dif,gamma=5,m=2,m1=2,m2=2)\n\n\n"} {"package":"DOS","topic":"senWilcox","snippet":"### Name: senWilcox\n### Title: Sensitivity Analysis for Wilcoxon's Signed-rank Statistic\n### Aliases: senWilcox\n\n### ** Examples\n\ndata(werfel)\nd<-werfel$serpc_p-werfel$cerpc_p\n\n# Reproduces the approximate one-sided P-value computed in Section 3.5 of Rosenbaum (2010).\nsenWilcox(d,gamma=3)\n\n# Reproduces parts of Tables 4.3 and 4.4 in Rosenbaum (2002)\ndata(lead)\nsenWilcox(lead$dif,gamma=1,conf.int=TRUE,alternative=\"twosided\")\nsenWilcox(lead$dif,gamma=2,conf.int=TRUE,alternative=\"twosided\")\n\n# Agrees with the usual Wilcoxon procedures when gamma=1.\nsenWilcox(d,gamma=1,conf.int=TRUE,alternative=\"twosided\")\nstats::wilcox.test(d,conf.int=TRUE,exact=FALSE,correct=FALSE)\n\n\n"} {"package":"DOS","topic":"senWilcoxExact","snippet":"### Name: senWilcoxExact\n### Title: Exact Sensitivity Analysis for Wilcoxon's Signed-rank Statistic\n### Aliases: senWilcoxExact\n\n### ** Examples\n\ndata(werfel)\nd<-werfel$serpc_p-werfel$cerpc_p\n\n# Reproduces the exact one-sided P-value computed in Section 3.9 of Rosenbaum (2010).\nsenWilcoxExact(d,gamma=2)\n\n# Agrees with the usual Wilcoxon procedures when gamma=1.\nsenWilcoxExact(d,gamma=1)\nstats::wilcox.test(d,alternative=\"greater\")\n\n# Reproduces the one-sided confidence interval for gamma=3 in Table 3.3 of Rosenbaum (2010)\nsenWilcoxExact(d-0.0935,gamma=3)\nsenWilcoxExact(d-0.0936,gamma=3)\n\n\n"} {"package":"DOS","topic":"smahal","snippet":"### Name: smahal\n### Title: Robust Mahalanobis Distance Matrix for Optimal Matching\n### Aliases: smahal\n\n### ** Examples\n\ndata(costa)\nz<-1*(costa$welder==\"Y\")\naa<-1*(costa$race==\"A\")\nsmoker=1*(costa$smoker==\"Y\")\nage<-costa$age\nx<-cbind(age,aa,smoker)\ndmat<-smahal(z,x)\n# Mahalanobis distances\nround(dmat[,1:6],2) # Compare with Table 8.6 in Design of Observational Studies (2010)\n# Impose propensity score calipers\nprop<-glm(z~age+aa+smoker,family=binomial)$fitted.values # propensity score\n# Mahalanobis distanced penalized for violations of a propensity score caliper.\n# This version is used for numerical work.\ndmat<-addcaliper(dmat,z,prop,caliper=.5)\nround(dmat[,1:6],2) # Compare with Table 8.6 in Design of Observational Studies (2010)\n## Not run: \n##D # Find the minimum distance match within propensity score calipers.\n##D optmatch::pairmatch(dmat,data=costa)\n## End(Not run)\n# Conceptual versions with infinite distances for violations of propensity caliper.\ndmat[dmat>20]<-Inf\nround(dmat[,1:6],2) # Compare with Table 8.6 in Design of Observational Studies (2010)\n\n\n"} {"package":"DOS","topic":"werfel","snippet":"### Name: werfel\n### Title: Welding Fumes and DNA Damage\n### Aliases: werfel\n### Keywords: datasets\n\n### ** Examples\n\ndata(werfel)\nd<-werfel$serpc_p-werfel$cerpc_p\n\n# Reproduces the approximate one-sided P-value computed in Section 3.5 of Rosenbaum (2010).\nsenWilcox(d,gamma=3)\n\n# Agrees with the usual large sample Wilcoxon procedures when gamma=1.\nsenWilcox(d,gamma=1,conf.int=TRUE,alternative=\"twosided\")\nstats::wilcox.test(d,conf.int=TRUE,exact=FALSE,correct=FALSE)\n\n\n"} {"package":"BayesARIMAX","topic":"BayesARIMAX","snippet":"### Name: BayesARIMAX\n### Title: Bayesian Estimation of ARIMAX Model\n### Aliases: BayesARIMAX\n### Keywords: Bayesian ARIMAX\n\n### ** Examples\n\nset.seed(121)\nY<- arima.sim(list(order = c(1,1,1),ar=0.7,ma=0.4), n = 49)\nX=rnorm(50,4,1)\nBayesARIMAX(Y,X)\n\n\n"} {"package":"excluder","topic":"check_duplicates","snippet":"### Name: check_duplicates\n### Title: Check for duplicate IP addresses and/or locations\n### Aliases: check_duplicates\n\n### ** Examples\n\n# Check for duplicate IP addresses and locations\ndata(qualtrics_text)\ncheck_duplicates(qualtrics_text)\n\n# Check only for duplicate locations\nqualtrics_text %>%\n check_duplicates(dupl_location = FALSE)\n\n# Do not print rows to console\nqualtrics_text %>%\n check_duplicates(print = FALSE)\n\n# Do not print message to console\nqualtrics_text %>%\n check_duplicates(quiet = TRUE)\n\n\n"} {"package":"excluder","topic":"check_duration","snippet":"### Name: check_duration\n### Title: Check for minimum or maximum durations\n### Aliases: check_duration\n\n### ** Examples\n\n# Check for durations faster than 100 seconds\ndata(qualtrics_text)\ncheck_duration(qualtrics_text, min_duration = 100)\n\n# Remove preview data first\nqualtrics_text %>%\n exclude_preview() %>%\n check_duration(min_duration = 100)\n\n# Check only for durations slower than 800 seconds\nqualtrics_text %>%\n exclude_preview() %>%\n check_duration(max_duration = 800)\n\n# Do not print rows to console\nqualtrics_text %>%\n exclude_preview() %>%\n check_duration(min_duration = 100, print = FALSE)\n\n# Do not print message to console\nqualtrics_text %>%\n exclude_preview() %>%\n check_duration(min_duration = 100, quiet = TRUE)\n\n\n"} {"package":"excluder","topic":"check_ip","snippet":"### Name: check_ip\n### Title: Check for IP addresses from outside of a specified country.\n### Aliases: check_ip\n\n### ** Examples\n\n# Check for IP addresses outside of the US\ndata(qualtrics_text)\ncheck_ip(qualtrics_text)\n\n# Remove preview data first\nqualtrics_text %>%\n exclude_preview() %>%\n check_ip()\n\n# Check for IP addresses outside of Germany\nqualtrics_text %>%\n exclude_preview() %>%\n check_ip(country = \"DE\")\n\n# Do not print rows to console\nqualtrics_text %>%\n exclude_preview() %>%\n check_ip(print = FALSE)\n\n# Do not print message to console\nqualtrics_text %>%\n exclude_preview() %>%\n check_ip(quiet = TRUE)\n\n\n"} {"package":"excluder","topic":"check_location","snippet":"### Name: check_location\n### Title: Check for locations outside of the US\n### Aliases: check_location\n\n### ** Examples\n\n# Check for locations outside of the US\ndata(qualtrics_text)\ncheck_location(qualtrics_text)\n\n# Remove preview data first\nqualtrics_text %>%\n exclude_preview() %>%\n check_location()\n\n# Do not print rows to console\nqualtrics_text %>%\n exclude_preview() %>%\n check_location(print = FALSE)\n\n# Do not print message to console\nqualtrics_text %>%\n exclude_preview() %>%\n check_location(quiet = TRUE)\n\n\n"} {"package":"excluder","topic":"check_preview","snippet":"### Name: check_preview\n### Title: Check for survey previews\n### Aliases: check_preview\n\n### ** Examples\n\n# Check for survey previews\ndata(qualtrics_text)\ncheck_preview(qualtrics_text)\n\n# Works for Qualtrics data exported as numeric values, too\nqualtrics_numeric %>%\n check_preview()\n\n# Do not print rows to console\nqualtrics_text %>%\n check_preview(print = FALSE)\n\n# Do not print message to console\nqualtrics_text %>%\n check_preview(quiet = TRUE)\n\n\n"} {"package":"excluder","topic":"check_progress","snippet":"### Name: check_progress\n### Title: Check for survey progress\n### Aliases: check_progress\n\n### ** Examples\n\n# Check for rows with incomplete progress\ndata(qualtrics_text)\ncheck_progress(qualtrics_text)\n\n# Remove preview data first\nqualtrics_text %>%\n exclude_preview() %>%\n check_progress()\n\n# Include a lower acceptable completion percentage\nqualtrics_numeric %>%\n exclude_preview() %>%\n check_progress(min_progress = 98)\n\n# Do not print rows to console\nqualtrics_text %>%\n exclude_preview() %>%\n check_progress(print = FALSE)\n\n# Do not print message to console\nqualtrics_text %>%\n exclude_preview() %>%\n check_progress(quiet = TRUE)\n\n\n"} {"package":"excluder","topic":"check_resolution","snippet":"### Name: check_resolution\n### Title: Check screen resolution\n### Aliases: check_resolution\n\n### ** Examples\n\n# Check for survey previews\ndata(qualtrics_text)\ncheck_resolution(qualtrics_text)\n\n# Remove preview data first\nqualtrics_text %>%\n exclude_preview() %>%\n check_resolution()\n\n# Do not print rows to console\nqualtrics_text %>%\n exclude_preview() %>%\n check_resolution(print = FALSE)\n\n# Do not print message to console\nqualtrics_text %>%\n exclude_preview() %>%\n check_resolution(quiet = TRUE)\n\n\n"} {"package":"excluder","topic":"deidentify","snippet":"### Name: deidentify\n### Title: Remove columns that could include identifiable information\n### Aliases: deidentify\n\n### ** Examples\n\nnames(qualtrics_numeric)\n\n# Remove IP address, location, and computer information columns\ndeid <- deidentify(qualtrics_numeric)\nnames(deid)\n\n# Remove only IP address and location columns\ndeid2 <- deidentify(qualtrics_numeric, strict = FALSE)\nnames(deid2)\n\n\n"} {"package":"excluder","topic":"exclude_duplicates","snippet":"### Name: exclude_duplicates\n### Title: Exclude rows with duplicate IP addresses and/or locations\n### Aliases: exclude_duplicates\n\n### ** Examples\n\n# Exclude duplicate IP addresses and locations\ndata(qualtrics_text)\ndf <- exclude_duplicates(qualtrics_text)\n\n# Remove preview data first\ndf <- qualtrics_text %>%\n exclude_preview() %>%\n exclude_duplicates()\n\n# Exclude only for duplicate locations\ndf <- qualtrics_text %>%\n exclude_preview() %>%\n exclude_duplicates(dupl_location = FALSE)\n\n\n"} {"package":"excluder","topic":"exclude_duration","snippet":"### Name: exclude_duration\n### Title: Exclude rows with minimum or maximum durations\n### Aliases: exclude_duration\n\n### ** Examples\n\n# Exclude durations faster than 100 seconds\ndata(qualtrics_text)\ndf <- exclude_duration(qualtrics_text, min_duration = 100)\n\n# Remove preview data first\ndf <- qualtrics_text %>%\n exclude_preview() %>%\n exclude_duration()\n\n# Exclude only for durations slower than 800 seconds\ndf <- qualtrics_text %>%\n exclude_preview() %>%\n exclude_duration(max_duration = 800)\n\n\n"} {"package":"excluder","topic":"exclude_ip","snippet":"### Name: exclude_ip\n### Title: Exclude IP addresses from outside of a specified country.\n### Aliases: exclude_ip\n\n### ** Examples\n\n# Exclude IP addresses outside of the US\ndata(qualtrics_text)\ndf <- exclude_ip(qualtrics_text)\n\n# Remove preview data first\ndf <- qualtrics_text %>%\n exclude_preview() %>%\n exclude_ip()\n\n# Exclude IP addresses outside of Germany\ndf <- qualtrics_text %>%\n exclude_preview() %>%\n exclude_ip(country = \"DE\")\n\n\n"} {"package":"excluder","topic":"exclude_location","snippet":"### Name: exclude_location\n### Title: Exclude locations outside of US\n### Aliases: exclude_location\n\n### ** Examples\n\n# Exclude locations outside of the US\ndata(qualtrics_text)\ndf <- exclude_location(qualtrics_text)\n\n# Remove preview data first\ndf <- qualtrics_text %>%\n exclude_preview() %>%\n exclude_location()\n\n\n"} {"package":"excluder","topic":"exclude_preview","snippet":"### Name: exclude_preview\n### Title: Exclude survey previews\n### Aliases: exclude_preview\n\n### ** Examples\n\n# Exclude survey previews\ndata(qualtrics_text)\ndf <- exclude_preview(qualtrics_text)\n\n# Works for Qualtrics data exported as numeric values, too\ndf <- qualtrics_numeric %>%\n exclude_preview()\n\n# Do not print rows to console\ndf <- qualtrics_text %>%\n exclude_preview(print = FALSE)\n\n\n"} {"package":"excluder","topic":"exclude_progress","snippet":"### Name: exclude_progress\n### Title: Exclude survey progress\n### Aliases: exclude_progress\n\n### ** Examples\n\n# Exclude rows with incomplete progress\ndata(qualtrics_text)\ndf <- exclude_progress(qualtrics_text)\n\n# Remove preview data first\ndf <- qualtrics_text %>%\n exclude_preview() %>%\n exclude_progress()\n\n# Include a lower acceptable completion percentage\ndf <- qualtrics_numeric %>%\n exclude_preview() %>%\n exclude_progress(min_progress = 98)\n\n# Do not print rows to console\ndf <- qualtrics_text %>%\n exclude_preview() %>%\n exclude_progress(print = FALSE)\n\n\n"} {"package":"excluder","topic":"exclude_resolution","snippet":"### Name: exclude_resolution\n### Title: Exclude unacceptable screen resolution\n### Aliases: exclude_resolution\n\n### ** Examples\n\n# Exclude low screen resolutions\ndata(qualtrics_text)\ndf <- exclude_resolution(qualtrics_text)\n\n# Remove preview data first\ndf <- qualtrics_text %>%\n exclude_preview() %>%\n exclude_resolution()\n\n\n"} {"package":"excluder","topic":"mark_duplicates","snippet":"### Name: mark_duplicates\n### Title: Mark duplicate IP addresses and/or locations\n### Aliases: mark_duplicates\n\n### ** Examples\n\n# Mark duplicate IP addresses and locations\ndata(qualtrics_text)\ndf <- mark_duplicates(qualtrics_text)\n\n# Remove preview data first\ndf <- qualtrics_text %>%\n exclude_preview() %>%\n mark_duplicates()\n\n# Mark only for duplicate locations\ndf <- qualtrics_text %>%\n exclude_preview() %>%\n mark_duplicates(dupl_location = FALSE)\n\n\n"} {"package":"excluder","topic":"mark_duration","snippet":"### Name: mark_duration\n### Title: Mark minimum or maximum durations\n### Aliases: mark_duration\n\n### ** Examples\n\n# Mark durations faster than 100 seconds\ndata(qualtrics_text)\ndf <- mark_duration(qualtrics_text, min_duration = 100)\n\n# Remove preview data first\ndf <- qualtrics_text %>%\n exclude_preview() %>%\n mark_duration()\n\n# Mark only for durations slower than 800 seconds\ndf <- qualtrics_text %>%\n exclude_preview() %>%\n mark_duration(max_duration = 800)\n\n\n"} {"package":"excluder","topic":"mark_ip","snippet":"### Name: mark_ip\n### Title: Mark IP addresses from outside of a specified country.\n### Aliases: mark_ip\n\n### ** Examples\n\n# Mark IP addresses outside of the US\ndata(qualtrics_text)\ndf <- mark_ip(qualtrics_text)\n\n# Remove preview data first\ndf <- qualtrics_text %>%\n exclude_preview() %>%\n mark_ip()\n\n# Mark IP addresses outside of Germany\ndf <- qualtrics_text %>%\n exclude_preview() %>%\n mark_ip(country = \"DE\")\n\n\n"} {"package":"excluder","topic":"mark_location","snippet":"### Name: mark_location\n### Title: Mark locations outside of US\n### Aliases: mark_location\n\n### ** Examples\n\n# Mark locations outside of the US\ndata(qualtrics_text)\ndf <- mark_location(qualtrics_text)\n\n# Remove preview data first\ndf <- qualtrics_text %>%\n exclude_preview() %>%\n mark_location()\n\n\n"} {"package":"excluder","topic":"mark_preview","snippet":"### Name: mark_preview\n### Title: Mark survey previews\n### Aliases: mark_preview\n\n### ** Examples\n\n# Mark survey previews\ndata(qualtrics_text)\ndf <- mark_preview(qualtrics_text)\n\n# Works for Qualtrics data exported as numeric values, too\ndf <- qualtrics_numeric %>%\n mark_preview()\n\n\n"} {"package":"excluder","topic":"mark_progress","snippet":"### Name: mark_progress\n### Title: Mark survey progress\n### Aliases: mark_progress\n\n### ** Examples\n\n# Mark rows with incomplete progress\ndata(qualtrics_text)\ndf <- mark_progress(qualtrics_text)\n\n# Remove preview data first\ndf <- qualtrics_text %>%\n exclude_preview() %>%\n mark_progress()\n\n# Include a lower acceptable completion percentage\ndf <- qualtrics_numeric %>%\n exclude_preview() %>%\n mark_progress(min_progress = 98)\n\n\n"} {"package":"excluder","topic":"mark_resolution","snippet":"### Name: mark_resolution\n### Title: Mark unacceptable screen resolution\n### Aliases: mark_resolution\n\n### ** Examples\n\n# Mark low screen resolutions\ndata(qualtrics_text)\ndf <- mark_resolution(qualtrics_text)\n\n# Remove preview data first\ndf <- qualtrics_text %>%\n exclude_preview() %>%\n mark_resolution()\n\n\n"} {"package":"excluder","topic":"remove_label_rows","snippet":"### Name: remove_label_rows\n### Title: Remove two initial rows created in Qualtrics data\n### Aliases: remove_label_rows\n\n### ** Examples\n\n# Remove label rows\ndata(qualtrics_raw)\ndf <- remove_label_rows(qualtrics_raw)\n\n\n"} {"package":"excluder","topic":"rename_columns","snippet":"### Name: rename_columns\n### Title: Rename columns to match standard Qualtrics names\n### Aliases: rename_columns\n\n### ** Examples\n\n# Rename columns\ndata(qualtrics_fetch)\nqualtrics_renamed <- qualtrics_fetch %>%\n rename_columns()\nnames(qualtrics_fetch)\nnames(qualtrics_renamed)\n\n# Alerts when columns cannot be renamed\ndata(qualtrics_numeric)\nrename_columns(qualtrics_numeric)\n\n# Turn off alert\nrename_columns(qualtrics_numeric, alert = FALSE)\n\n\n\n"} {"package":"excluder","topic":"unite_exclusions","snippet":"### Name: unite_exclusions\n### Title: Unite multiple exclusion columns into single column\n### Aliases: unite_exclusions\n\n### ** Examples\n\n\n# Unite all exclusion types\ndf <- qualtrics_text %>%\n mark_duplicates() %>%\n mark_duration(min_duration = 100) %>%\n mark_ip() %>%\n mark_location() %>%\n mark_preview() %>%\n mark_progress() %>%\n mark_resolution()\ndf2 <- df %>%\n unite_exclusions()\n\n# Unite subset of exclusion types\ndf2 <- df %>%\n unite_exclusions(exclusion_types = c(\"duplicates\", \"duration\", \"ip\"))\n\n\n"} {"package":"excluder","topic":"use_labels","snippet":"### Name: use_labels\n### Title: Use Qualtrics labels as column names\n### Aliases: use_labels\n\n### ** Examples\n\n# Rename columns\ndata(qualtrics_fetch)\nqualtrics_renamed <- qualtrics_fetch %>%\n use_labels()\nnames(qualtrics_fetch)\nnames(qualtrics_renamed)\n\n\n"} {"package":"mcgibbsit","topic":"mcgibbsit","snippet":"### Name: mcgibbsit\n### Title: Warnes and Raftery's MCGibbsit MCMC diagnostic\n### Aliases: mcgibbsit print.mcgibbsit\n### Keywords: models\n\n### ** Examples\n\n\n###\n# Create example data files for 20 independent chains\n# with serial correlation of 0.25\n###\n\nset.seed(42)\ntmpdir <- tempdir()\n\nnsamples <- 1000\n\nfor(i in 1:20){\n x <- matrix(nrow = nsamples+1, ncol=4)\n colnames(x) <- c(\"alpha\",\"beta\",\"gamma\", \"nu\")\n \n x[,\"alpha\"] <- rnorm (nsamples+1, mean=0.025, sd=0.0025)^2\n x[,\"beta\"] <- rnorm (nsamples+1, mean=53, sd=12)\n x[,\"gamma\"] <- rbinom(nsamples+1, 20, p=0.25) + 1\n x[,\"nu\"] <- rnorm (nsamples+1, mean=x[,\"alpha\"] * x[,\"beta\"], sd=1/x[,\"gamma\"])\n\n # induce serial correlation of 0.25\n x <- 0.75 * x[2:(nsamples+1),] + 0.25 * x[1:nsamples,]\n \n \n write.table(\n x,\n file = file.path(\n tmpdir,\n paste(\"mcmc\", i, \"csv\", sep=\".\")\n ),\n sep = \",\",\n row.names = FALSE\n )\n}\n\n# Read them back in as an mcmc.list object\ndata <- read.mcmc(\n 20, \n file.path(tmpdir, \"mcmc.#.csv\"), \n sep=\",\",\n col.names=c(\"alpha\",\"beta\",\"gamma\", \"nu\")\n )\n\n# Summary statistics\nsummary(data)\n\n# Trace and Density Plots\nplot(data)\n\n# And check the necessary run length \nmcgibbsit(data)\n\n\n\n\n"} {"package":"mcgibbsit","topic":"read.mcmc","snippet":"### Name: read.mcmc\n### Title: Read in data from a set of MCMC runs\n### Aliases: read.mcmc\n### Keywords: file\n\n### ** Examples\n\n\n###\n# Create example data files for 20 independent chains\n# with serial correlation of 0.\n###\n\nset.seed(42)\ntmpdir <- tempdir()\n\nfor(i in 1:20){\n x <- matrix(rnorm(1000), ncol=4)\n \n x[,4] <- x[,4] + 1/3 * (x[,1] + x[,2] + x[,3])\n \n colnames(x) <- c(\"alpha\",\"beta\",\"gamma\", \"nu\")\n \n write.table(\n x,\n file = file.path(\n tmpdir,\n paste(\"mcmc\", i, \"csv\", sep=\".\")\n ),\n sep = \",\",\n row.names=FALSE\n )\n}\n\n# Read them back in as an mcmc.list object\ndata <- read.mcmc(20, file.path(tmpdir, \"mcmc.#.csv\"), sep=\",\")\n\nsummary(data)\n\n\n"} {"package":"ez","topic":"ANT","snippet":"### Name: ANT\n### Title: ANT data\n### Aliases: ANT\n### Keywords: datasets\n\n### ** Examples\n\ndata(ANT)\nhead(ANT)\nezPrecis(ANT)\n\n\n"} {"package":"ez","topic":"ANT2","snippet":"### Name: ANT2\n### Title: Messy ANT data\n### Aliases: ANT2\n### Keywords: datasets\n\n### ** Examples\n\ndata(ANT2)\nhead(ANT2)\nezPrecis(ANT2)\n\n\n"} {"package":"ez","topic":"ezANOVA","snippet":"### Name: ezANOVA\n### Title: Compute ANOVA\n### Aliases: ezANOVA\n\n### ** Examples\n\n\n#Read in the ANT data (see ?ANT).\ndata(ANT)\nhead(ANT)\nezPrecis(ANT)\n\n\n#Run an ANOVA on the mean correct RT data.\nrt_anova = ezANOVA(\n data = ANT[ANT$error==0,]\n , dv = rt\n , wid = subnum\n , within = .(cue,flank)\n , between = group\n)\n\n#Show the ANOVA and assumption tests.\nprint(rt_anova)\n\n## Not run: \n##D #Run an ANOVA on the mean correct RT data, ignoring group.\n##D rt_anova2 = ezANOVA(\n##D data = ANT[ANT$error==0,]\n##D , dv = rt\n##D , wid = subnum\n##D , within = .(cue,flank)\n##D )\n##D \n##D #Show the ANOVA and assumption tests.\n##D print(rt_anova2)\n## End(Not run)\n\n#Run a purely between-Ss ANOVA on the mean_rt data.\n#NOTE use of within_full to ensure that the data are\n# collapsed properly\nrt_anova3 = ezANOVA(\n data = ANT[ANT$error==0,]\n , dv = rt\n , wid = subnum\n , within_full = .(cue,flank)\n , between = group\n)\n\n#Show the ANOVA and assumption tests.\nprint(rt_anova3)\n\n\n#add a within-Ss effect to be used as a covariate\nANT$rt2 = ANT$rt + ANT$block*1000 #additive with and independent of the other predictors!\n\n## Not run: \n##D #Run an anova that doesn't use the covariate\n##D rt_anova4a = ezANOVA(\n##D data = ANT[ANT$error==0,]\n##D , dv = rt2\n##D , wid = subnum\n##D , within = .(cue,flank)\n##D , between = group\n##D )\n##D \n##D #Show the ANOVA and assumption tests.\n##D # Note loss of power to observe the within effects\n##D print(rt_anova4a)\n## End(Not run)\n\n#Run an anova that does use the covariate\nrt_anova4b = ezANOVA(\n data = ANT[ANT$error==0,]\n , dv = rt2\n , wid = subnum\n , within = .(cue,flank)\n , within_covariates = block\n , between = group\n)\n\n#Show the ANOVA and assumption tests.\n# Note power to observe the within effects has returned\nprint(rt_anova4b)\n\n\n#add a between-Ss effect to be used as a covariate\nANT$bc = as.numeric(as.character(ANT$subnum))%%10 #Note that the effect is balanced across groups\nANT$rt3 = ANT$rt + ANT$bc*1000 #additive with and independent of the other predictors!\n\n## Not run: \n##D #Run an anova that doesn't use the covariate\n##D rt_anova5a = ezANOVA(\n##D data = ANT[ANT$error==0,]\n##D , dv = rt2\n##D , wid = subnum\n##D , within = .(cue,flank)\n##D , between = group\n##D )\n##D \n##D #Show the ANOVA and assumption tests.\n##D # Note loss of power to observe the between effects\n##D print(rt_anova5a)\n## End(Not run)\n\n#Run an anova that does use the covariate\nrt_anova5b = ezANOVA(\n data = ANT[ANT$error==0,]\n , dv = rt2\n , wid = subnum\n , within = .(cue,flank)\n , between = group\n , between_covariates = bc\n)\n\n#Show the ANOVA and assumption tests.\n# Note power to observe the between effects has returned\nprint(rt_anova5b)\n\n\n\n"} {"package":"ez","topic":"ezBoot","snippet":"### Name: ezBoot\n### Title: Compute bootstrap resampled predictions\n### Aliases: ezBoot\n\n### ** Examples\n\n\n#Read in the ANT data (see ?ANT).\ndata(ANT)\nhead(ANT)\nezPrecis(ANT)\n\n#Run ezBoot on the accurate RT data\nrt = ezBoot(\n data = ANT\n , dv = rt\n , wid = subnum\n , within = .(cue,flank)\n , between = group\n , iterations = 1e1 #1e3 or higher is best for publication\n)\n\n## Not run: \n##D #plot the full design\n##D p = ezPlot2(\n##D preds = rt\n##D , x = flank\n##D , split = cue\n##D , col = group\n##D )\n##D print(p)\n##D \n##D #plot the effect of group across the flank*cue design\n##D p = ezPlot2(\n##D preds = rt\n##D , x = flank\n##D , split = cue\n##D , diff = group\n##D )\n##D print(p)\n##D \n##D #plot the flank*cue design, averaging across group\n##D p = ezPlot2(\n##D preds = rt\n##D , x = flank\n##D , split = cue\n##D )\n##D print(p)\n## End(Not run)\n\n\n"} {"package":"ez","topic":"ezCor","snippet":"### Name: ezCor\n### Title: Compute and plot an information-dense correlation matrix\n### Aliases: ezCor\n\n### ** Examples\n\n########\n# Set up some fake data\n########\nlibrary(MASS)\nN=100\n\n#first pair of variables\nvariance1=1\nvariance2=2\nmean1=10\nmean2=20\nrho = .8\nSigma=matrix(\n c(\n variance1\n , sqrt(variance1*variance2)*rho\n , sqrt(variance1*variance2)*rho\n , variance2\n )\n , 2\n , 2\n)\npair1=mvrnorm(N,c(mean1,mean2),Sigma,empirical=TRUE)\n\n#second pair of variables\nvariance1=10\nvariance2=20\nmean1=100\nmean2=200\nrho = -.4\nSigma=matrix(\n c(\n variance1\n , sqrt(variance1*variance2)*rho\n , sqrt(variance1*variance2)*rho\n , variance2\n )\n , 2\n , 2\n)\npair2=mvrnorm(N,c(mean1,mean2),Sigma,empirical=TRUE)\n\nmy_data=data.frame(cbind(pair1,pair2))\n\n########\n# Now plot\n########\np = ezCor(\n data = my_data\n)\nprint(p)\n\n#you can modify the default colours of the\n##correlation coefficients as follows\nlibrary(ggplot2)\np = p + scale_colour_manual(values = c('red','blue'))\nprint(p)\n#see the following for alternatives:\n# http://had.co.nz/ggplot2/scale_manual.html\n# http://had.co.nz/ggplot2/scale_hue.html\n# http://had.co.nz/ggplot2/scale_brewer.html\n\n\n\n"} {"package":"ez","topic":"ezDesign","snippet":"### Name: ezDesign\n### Title: Plot the balance of data in an experimental design\n### Aliases: ezDesign\n\n### ** Examples\n\n#Read in the ANT2 data (see ?ANT2).\ndata(ANT2)\nhead(ANT2)\nezPrecis(ANT2)\n\n#toss NA trials\nANT2 = ANT2[!is.na(ANT2$rt),]\n\nezDesign(\n data = ANT2\n , x = trial\n , y = subnum\n , row = block\n , col = group\n)\n#subnum #7 is missing data from the last half of the experiment\n\n## Not run: \n##D ezDesign(\n##D data = ANT2\n##D , x = flank\n##D , y = subnum\n##D , row = cue\n##D )\n##D #again, subnum#7 has half the data as the rest\n##D \n##D #now look at error rates, which affect the number of RTs we can use\n##D ezDesign(\n##D data = ANT2[ANT2$error==0,]\n##D , x = flank\n##D , y = subnum\n##D , row = cue\n##D )\n##D #again, subnum#7 stands out because they have half the data as the rest\n##D #also, subnum#14 has no data in any incongruent cells, suggesting that\n##D ##they made all errors in this condition\n##D #finally, subnum#12 has virtually no data, suggesting that they mistakenly\n##D ##swapped responses\n## End(Not run)\n\n\n"} {"package":"ez","topic":"ezMixed","snippet":"### Name: ezMixed\n### Title: Compute evidence for fixed effects in an mixed effects modelling\n### context\n### Aliases: ezMixed\n\n### ** Examples\n\n#Read in the ANT data (see ?ANT).\ndata(ANT)\nhead(ANT)\nezPrecis(ANT)\n\n#Run ezMixed on the accurate RT data\nrt = ezMixed(\n data = ANT[ANT$error==0,]\n , dv = .(rt)\n , random = .(subnum)\n , fixed = .(cue,flank,group)\n)\nprint(rt$summary)\n\n## Not run: \n##D #Run ezMixed on the error rate data\n##D er = ezMixed(\n##D data = ANT\n##D , dv = .(error)\n##D , random = .(subnum)\n##D , fixed = .(cue,flank,group)\n##D , family = 'binomial'\n##D )\n##D print(er$summary)\n## End(Not run)\n\n\n"} {"package":"ez","topic":"ezMixedProgress","snippet":"### Name: ezMixedProgress\n### Title: Retrieve information saved to file by a call to ezMixed\n### Aliases: ezMixedProgress\n\n### ** Examples\n\n## Not run: \n##D #Read in the ANT data (see ?ANT).\n##D data(ANT)\n##D head(ANT)\n##D ezPrecis(ANT)\n##D \n##D #Run ezMixed on the accurate RT data\n##D rt_mix = ezMixed(\n##D data = ANT[ANT$error==0,]\n##D , dv = .(rt)\n##D , random = .(subnum)\n##D , fixed = .(cue,flank,group)\n##D , progress_dir = 'rt_mix'\n##D )\n##D \n##D rt_mix = ezMixedProgress('rt_mix')\n##D print(rt_mix$summary)\n## End(Not run)\n\n\n\n"} {"package":"ez","topic":"ezPerm","snippet":"### Name: ezPerm\n### Title: Perform a factorial permutation test\n### Aliases: ezPerm\n\n### ** Examples\n\nlibrary(plyr)\n#Read in the ANT data (see ?ANT).\ndata(ANT)\nhead(ANT)\nezPrecis(ANT)\n\n#Compute some useful statistics per cell.\ncell_stats = ddply(\n .data = ANT\n , .variables = .( subnum , group , cue , flank )\n , .fun = function(x){\n #Compute error rate as percent.\n error_rate = mean(x$error)*100\n #Compute mean RT (only accurate trials).\n mean_rt = mean(x$rt[x$error==0])\n #Compute SD RT (only accurate trials).\n sd_rt = sd(x$rt[x$error==0])\n to_return = data.frame(\n error_rate = error_rate\n , mean_rt = mean_rt\n , sd_rt = sd_rt\n )\n return(to_return)\n }\n)\n\n#Compute the grand mean RT per Ss.\ngmrt = ddply(\n .data = cell_stats\n , .variables = .( subnum , group )\n , .fun = function(x){\n to_return = data.frame(\n mrt = mean(x$mean_rt)\n )\n return(to_return)\n }\n)\n\n#Run a purely between-Ss permutation test on the mean_rt data.\nmean_rt_perm = ezPerm(\n data = gmrt\n , dv = mrt\n , wid = subnum\n , between = group\n , perms = 1e1 #1e3 or higher is best for publication\n)\n\n#Show the Permutation test.\nprint(mean_rt_perm)\n\n\n"} {"package":"ez","topic":"ezPlot","snippet":"### Name: ezPlot\n### Title: Plot data from a factorial experiment\n### Aliases: ezPlot\n\n### ** Examples\n\n#Read in the ANT data (see ?ANT).\ndata(ANT)\nhead(ANT)\nezPrecis(ANT)\n\n\n## Not run: \n##D #Run an ANOVA on the mean correct RT data.\n##D mean_rt_anova = ezANOVA(\n##D data = ANT[ANT$error==0,]\n##D , dv = .(rt)\n##D , wid = .(subnum)\n##D , within = .(cue,flank)\n##D , between = .(group)\n##D )\n##D \n##D #Show the ANOVA and assumption tests.\n##D print(mean_rt_anova)\n## End(Not run)\n\n#Plot the main effect of group.\ngroup_plot = ezPlot(\n data = ANT[ANT$error==0,]\n , dv = .(rt)\n , wid = .(subnum)\n , between = .(group)\n , x = .(group)\n , do_lines = FALSE\n , x_lab = 'Group'\n , y_lab = 'RT (ms)'\n)\n\n#Show the plot.\nprint(group_plot)\n\n#tweak the plot\n# group_plot = group_plot +\n# theme(\n# panel.grid.major = element_blank()\n# , panel.grid.minor = element_blank()\n# )\n# print(group_plot)\n\n\n\n#use the \"print_code\" argument to print the\n# code for creating the plot and return the\n# data to plot. This is useful when you want\n# to learn how to create plots from scratch\n# (which can in turn be useful when you can't\n# get a combination of ezPlot and tweaking to\n# achieve what you want)\ngroup_plot_data = ezPlot(\n data = ANT[ANT$error==0,]\n , dv = .(rt)\n , wid = .(subnum)\n , between = .(group)\n , x = .(group)\n , do_lines = FALSE\n , x_lab = 'Group'\n , y_lab = 'RT (ms)'\n , print_code = TRUE\n)\n\n\n#Re-plot the main effect of group, using the levels\n##argument to re-arrange/rename levels of group\ngroup_plot = ezPlot(\n data = ANT[ANT$error==0,]\n , dv = .(rt)\n , wid = .(subnum)\n , between = .(group)\n , x = .(group)\n , do_lines = FALSE\n , x_lab = 'Group'\n , y_lab = 'RT (ms)'\n , levels = list(\n group = list(\n new_order = c('Treatment','Control')\n , new_names = c('Treatment\\nGroup','Control\\nGroup')\n )\n )\n)\n\n#Show the plot.\nprint(group_plot)\n\n\n#Plot the cue*flank interaction.\ncue_by_flank_plot = ezPlot(\n data = ANT[ANT$error==0,]\n , dv = .(rt)\n , wid = .(subnum)\n , within = .(cue,flank)\n , x = .(flank)\n , split = .(cue)\n , x_lab = 'Flanker'\n , y_lab = 'RT (ms)'\n , split_lab = 'Cue'\n)\n\n#Show the plot.\nprint(cue_by_flank_plot)\n\n\n#Plot the cue*flank interaction by collapsing the cue effect to\n##the difference between None and Double\ncue_by_flank_plot2 = ezPlot(\n data = ANT[ ANT$error==0 & (ANT$cue %in% c('None','Double')) ,]\n , dv = .(rt)\n , wid = .(subnum)\n , within = .(flank)\n , diff = .(cue)\n , reverse_diff = TRUE\n , x = .(flank)\n , x_lab = 'Flanker'\n , y_lab = 'RT Effect (None - Double, ms)'\n)\n\n#Show the plot.\nprint(cue_by_flank_plot2)\n\n\n\n#Plot the group*cue*flank interaction.\ngroup_by_cue_by_flank_plot = ezPlot(\n data = ANT[ANT$error==0,]\n , dv = .(rt)\n , wid = .(subnum)\n , within = .(cue,flank)\n , between = .(group)\n , x = .(flank)\n , split = .(cue)\n , col = .(group)\n , x_lab = 'Flanker'\n , y_lab = 'RT (ms)'\n , split_lab = 'Cue'\n)\n\n#Show the plot.\nprint(group_by_cue_by_flank_plot)\n\n\n#Plot the group*cue*flank interaction in both error rate and mean RT.\ngroup_by_cue_by_flank_plot_both = ezPlot(\n data = list(\n ANT\n , ANT[ANT$error==0,]\n )\n , dv = .(error,rt)\n , wid = .(subnum)\n , within = .(cue,flank)\n , between = .(group)\n , x = .(flank)\n , split = .(cue)\n , col = .(group)\n , x_lab = 'Flanker'\n , split_lab = 'Cue'\n , dv_labs = c('ER (%)', 'RT (ms)')\n , y_free = TRUE\n)\n\n#Show the plot.\nprint(group_by_cue_by_flank_plot_both)\n\n\n\n\n"} {"package":"ez","topic":"ezPlot2","snippet":"### Name: ezPlot2\n### Title: Plot bootstrap predictions and confidence intervals\n### Aliases: ezPlot2\n\n### ** Examples\n\n#see example in ezPredict documentation\n\n\n"} {"package":"ez","topic":"ezPrecis","snippet":"### Name: ezPrecis\n### Title: Obtain a structure summary of a given data frame\n### Aliases: ezPrecis\n\n### ** Examples\n\n#Read in the ANT2 data (see ?ANT2).\ndata(ANT2)\nhead(ANT2)\n\n#Show a summary of the ANT2 data.\nezPrecis(ANT2)\n\n\n"} {"package":"ez","topic":"ezPredict","snippet":"### Name: ezPredict\n### Title: Compute predicted values from the fixed effects of a mixed\n### effects model\n### Aliases: ezPredict\n\n### ** Examples\n\nlibrary(lme4)\n\n#Read in the ANT data (see ?ANT).\ndata(ANT)\nhead(ANT)\n\n#fit a mixed effects model to the rt data\nrt_fit = lmer(\n formula = rt ~ cue*flank*group + (1|subnum)\n , data = ANT[ANT$error==0,]\n)\n\n#obtain the predictions from the model\nrt_preds = ezPredict(\n fit = rt_fit\n)\n\n\n#visualize the predictions\nezPlot2(\n preds = rt_preds\n , x = flank\n , row = cue\n , col = group\n , y_lab = 'RT (ms)'\n)\n\n\n\n"} {"package":"ez","topic":"ezResample","snippet":"### Name: ezResample\n### Title: Resample data from a factorial experiment\n### Aliases: ezResample\n\n### ** Examples\n\nlibrary(plyr)\n#Read in the ANT data (see ?ANT).\ndata(ANT)\nhead(ANT)\nezPrecis(ANT)\n\n\n#Bootstrap the within-cell variances\nvar_boots = ldply(\n .data = 1:1e1 #1e3 or higher should be used for publication\n , .fun = function(x){\n this_resample = ezResample(\n data = ANT[ANT$error==0,]\n , wid = .(subnum)\n , within = .(cue,flank)\n , between = .(group)\n )\n cell_vars = ddply(\n .data = idata.frame(this_resample)\n , .variables = .(subnum,cue,flank,group)\n , .fun = function(x){\n to_return = data.frame(\n value = var(x$rt)\n )\n return(to_return)\n }\n )\n mean_cell_vars = ddply(\n .data = idata.frame(cell_vars)\n , .variables = .(cue,flank,group)\n , .fun = function(x){\n to_return = data.frame(\n value = mean(x$value)\n )\n return(to_return)\n }\n )\n mean_cell_vars$iteration = x\n return(mean_cell_vars)\n }\n , .progress = 'time'\n)\n\n\n\n\n\n"} {"package":"ez","topic":"ezStats","snippet":"### Name: ezStats\n### Title: Compute descriptive statistics from a factorial experiment\n### Aliases: ezStats\n\n### ** Examples\n\n#Read in the ANT data (see ?ANT).\ndata(ANT)\nhead(ANT)\nezPrecis(ANT)\n\n\n#Run an ANOVA on the mean correct RT data.\nmean_rt_anova = ezANOVA(\n data = ANT[ANT$error==0,]\n , dv = rt\n , wid = subnum\n , within = .(cue,flank)\n , between = group\n)\n\n#Show the ANOVA and assumption tests.\nprint(mean_rt_anova)\n\n#Compute descriptives for the main effect of group.\ngroup_descriptives = ezStats(\n data = ANT[ANT$error==0,]\n , dv = rt\n , wid = subnum\n , between = .(group)\n)\n\n#Show the descriptives.\nprint(group_descriptives)\n\n\n"} {"package":"cld2","topic":"cld2","snippet":"### Name: cld2\n### Title: Compact Language Detector 2\n### Aliases: cld2 detect_language detect_language_mixed\n### detect_language_multi\n\n### ** Examples\n\n# Vectorized function\ntext <- c(\"To be or not to be?\", \"Ce n'est pas grave.\", \"Nou breekt mijn klomp!\")\ndetect_language(text)\n\n## Not run: \n##D # Read HTML from connection\n##D detect_language(url('http://www.un.org/ar/universal-declaration-human-rights/'), plain_text = FALSE)\n##D \n##D # More detailed classification output\n##D detect_language_mixed(\n##D url('http://www.un.org/fr/universal-declaration-human-rights/'), plain_text = FALSE)\n##D \n##D detect_language_mixed(\n##D url('http://www.un.org/zh/universal-declaration-human-rights/'), plain_text = FALSE)\n## End(Not run)\n\n\n"} {"package":"factormodel","topic":"cproxyme","snippet":"### Name: cproxyme\n### Title: cproxyme\n### Aliases: cproxyme\n\n### ** Examples\n\ndat1 <- data.frame(proxy1=c(1,2,3),proxy2=c(0.1,0.3,0.6),proxy3=c(2,3,5))\ncproxyme(dat=dat1,anchor=1)\n## you can specify weights\ncproxyme(dat=dat1,anchor=1,weights=c(0.1,0.5,0.4))\n\n\n\n"} {"package":"factormodel","topic":"dproxyme","snippet":"### Name: dproxyme\n### Title: dproxyme\n### Aliases: dproxyme\n\n### ** Examples\n\ndat1 <- data.frame(proxy1=c(1,2,3),proxy2=c(2,3,4),proxy3=c(4,3,2))\n## default minimum num of obs to run an EM algorithm is 10\ndproxyme(dat=dat1,sbar=2,initvar=1,minobs=3)\n## you can specify weights\ndproxyme(dat=dat1,sbar=2,initvar=1,minobs=3,weights=c(0.1,0.5,0.4))\n\n\n\n\n"} {"package":"factormodel","topic":"makeDummy","snippet":"### Name: makeDummy\n### Title: makeDummy\n### Aliases: makeDummy\n\n### ** Examples\n\nmakeDummy(c(1,2,3))\n\n\n\n"} {"package":"factormodel","topic":"weighted.cov","snippet":"### Name: weighted.cov\n### Title: weighted.cov\n### Aliases: weighted.cov\n\n### ** Examples\n\n# If you do not specify weights, \n# it returns the usual unweighted sample covariance \nweighted.cov(x=c(1,3,5),y=c(2,3,1)) \n\nweighted.cov(x=c(1,3,5),y=c(2,3,1),w=c(0.1,0.5,0.4))\n\n\n\n"} {"package":"factormodel","topic":"weighted.var","snippet":"### Name: weighted.var\n### Title: weighted.var\n### Aliases: weighted.var\n\n### ** Examples\n\n## If you do not specify weights, \n## it returns the usual unweighted sample variance\nweighted.var(x=c(1,3,5)) \n\nweighted.var(x=c(1,3,5),w=c(0.1,0.5,0.4))\n\n\n\n"} {"package":"TSEind","topic":"FULLSDi","snippet":"### Name: FULLSDi\n### Title: Full scale-dependent statistics (MAE, MSE, RMSE, MSLE, and\n### RMSLE)\n### Aliases: FULLSDi\n\n### ** Examples\n\nFULLSDi(Actual1=TESTIND$A1, Survey1=TESTIND$S1, Actual2=TESTIND$A1, Survey2=TESTIND$S2,\nActual3=TESTIND$A2, Survey3=TESTIND$S3)\n\n\n"} {"package":"TSEind","topic":"FULLSIi","snippet":"### Name: FULLSIi\n### Title: Full scale-independent statistics (MAPE, SMAPE, RAE, RSE, and\n### RRSE)\n### Aliases: FULLSIi\n\n### ** Examples\n\nFULLSIi(Actual1=TESTIND$A1, Survey1=TESTIND$S1, Actual2=TESTIND$A1, Survey2=TESTIND$S2,\nActual3=TESTIND$A2, Survey3=TESTIND$S3)\n\n\n"} {"package":"TSEind","topic":"MAEi","snippet":"### Name: MAEi\n### Title: Mean absolute error (MAE)\n### Aliases: MAEi\n\n### ** Examples\n\nMAEi(Actual1=TESTIND$A1, Survey1=TESTIND$S1, Actual2=TESTIND$A1, Survey2=TESTIND$S2,\nActual3=TESTIND$A2, Survey3=TESTIND$S3)\n\n\n"} {"package":"TSEind","topic":"MAPEi","snippet":"### Name: MAPEi\n### Title: Mean absolte percentage error (MAPE)\n### Aliases: MAPEi\n\n### ** Examples\n\nMAPEi(Actual1=TESTIND$A1, Survey1=TESTIND$S1, Actual2=TESTIND$A1, Survey2=TESTIND$S2,\nActual3=TESTIND$A2, Survey3=TESTIND$S3)\n\n\n"} {"package":"TSEind","topic":"MSEi","snippet":"### Name: MSEi\n### Title: Mean squared error (MSE) with bias-variance decomposition\n### Aliases: MSEi\n\n### ** Examples\n\nMSEi(Actual1=TESTIND$A1, Survey1=TESTIND$S1, Actual2=TESTIND$A1, Survey2=TESTIND$S2,\nActual3=TESTIND$A2, Survey3=TESTIND$S3)\n\n\n"} {"package":"TSEind","topic":"MSLEi","snippet":"### Name: MSLEi\n### Title: Mean squared logarithmic error (MSLE)\n### Aliases: MSLEi\n\n### ** Examples\n\nMSLEi(Actual1=TESTIND$A1, Survey1=TESTIND$S1, Actual2=TESTIND$A1, Survey2=TESTIND$S2,\nActual3=TESTIND$A2, Survey3=TESTIND$S3)\n\n\n"} {"package":"TSEind","topic":"RAEi","snippet":"### Name: RAEi\n### Title: Relative absolute error (RAE)\n### Aliases: RAEi\n\n### ** Examples\n\nRAEi(Actual1=TESTIND$A1, Survey1=TESTIND$S1, Actual2=TESTIND$A1, Survey2=TESTIND$S2,\nActual3=TESTIND$A2, Survey3=TESTIND$S3)\n\n\n"} {"package":"TSEind","topic":"RMSEi","snippet":"### Name: RMSEi\n### Title: Root mean squared error (MAE)\n### Aliases: RMSEi\n\n### ** Examples\n\nRMSEi(Actual1=TESTIND$A1, Survey1=TESTIND$S1, Actual2=TESTIND$A1, Survey2=TESTIND$S2,\nActual3=TESTIND$A2, Survey3=TESTIND$S3)\n\n\n"} {"package":"TSEind","topic":"RMSLEi","snippet":"### Name: RMSLEi\n### Title: Root mean squared logarithmic error (RMSLE)\n### Aliases: RMSLEi\n\n### ** Examples\n\nRMSLEi(Actual1=TESTIND$A1, Survey1=TESTIND$S1, Actual2=TESTIND$A1, Survey2=TESTIND$S2,\nActual3=TESTIND$A2, Survey3=TESTIND$S3)\n\n\n"} {"package":"TSEind","topic":"RRSEi","snippet":"### Name: RRSEi\n### Title: Root relative squared error (RRSE)\n### Aliases: RRSEi\n\n### ** Examples\n\nRRSEi(Actual1=TESTIND$A1, Survey1=TESTIND$S1, Actual2=TESTIND$A1, Survey2=TESTIND$S2,\nActual3=TESTIND$A2, Survey3=TESTIND$S3)\n\n\n"} {"package":"TSEind","topic":"RSEi","snippet":"### Name: RSEi\n### Title: Relative squared error (RSE)\n### Aliases: RSEi\n\n### ** Examples\n\nRSEi(Actual1=TESTIND$A1, Survey1=TESTIND$S1, Actual2=TESTIND$A1, Survey2=TESTIND$S2,\nActual3=TESTIND$A2, Survey3=TESTIND$S3)\n\n\n"} {"package":"TSEind","topic":"SMAPEi","snippet":"### Name: SMAPEi\n### Title: Symmetric mean absolte percentage error (SMAPE)\n### Aliases: SMAPEi\n\n### ** Examples\n\nSMAPEi(Actual1=TESTIND$A1, Survey1=TESTIND$S1, Actual2=TESTIND$A1, Survey2=TESTIND$S2,\nActual3=TESTIND$A2, Survey3=TESTIND$S3)\n\n\n"} {"package":"LBSPR","topic":"LBSPRfit","snippet":"### Name: LBSPRfit\n### Title: Fit LBSPR model to length data\n### Aliases: LBSPRfit\n\n### ** Examples\n\n## Not run: \n##D MyFit <- LBSPRfit(LBparameters, LBlengths)\n##D MyFit@Ests\n## End(Not run)\n\n\n\n"} {"package":"LBSPR","topic":"LBSPRsim","snippet":"### Name: LBSPRsim\n### Title: LBSPR Simulation Model\n### Aliases: LBSPRsim\n\n### ** Examples\n\n LB_pars <- new(\"LB_pars\")\n LB_pars@MK <- 1.5\n LB_pars@Linf <- 100\n LB_pars@L50 <- 50\n LB_pars@L95 <- 55\n LB_pars@SL50 <- 60\n LB_pars@SL95 <- 65\n LB_pars@FM <- 1\n Sim <- LBSPRsim(LB_pars)\n Sim@SPR\n\n\n\n"} {"package":"LBSPR","topic":"plotSim","snippet":"### Name: plotSim\n### Title: General plotting function for simulated data\n### Aliases: plotSim\n\n### ** Examples\n\n LB_pars <- new(\"LB_pars\")\n LB_pars@MK <- 1.5\n LB_pars@Linf <- 100\n LB_pars@L50 <- 50\n LB_pars@L95 <- 55\n LB_pars@SL50 <- 60\n LB_pars@SL95 <- 65\n LB_pars@FM <- 1\n Sim <- LBSPRsim(LB_pars)\n plotSim(Sim)\n\n\n\n"} {"package":"minimaxApprox","topic":"minimaxApprox","snippet":"### Name: minimaxApprox\n### Title: Minimax Approximation of Functions\n### Aliases: minimaxApprox\n### Keywords: optimize NumericalMathematics\n\n### ** Examples\n\nminimaxApprox(exp, 0, 1, 5) # Built-in & polynomial\n\nfn <- function(x) sin(x) ^ 2 + cosh(x) # Pre-defined\nminimaxApprox(fn, 0, 1, c(2, 3)) # Rational\n\nminimaxApprox(function(x) x ^ 3 / sin(x), 0.7, 1.6, 6L) # Anonymous\n\nfn <- function(x) besselJ(x, nu = 0) # More than one input\nb0 <- 0.893576966279167522 # Zero of besselY\nminimaxApprox(fn, 0, b0, c(3L, 3L)) # Cf. DLMF 3.11.19\n\n\n"} {"package":"minimaxApprox","topic":"coef.minimaxApprox","snippet":"### Name: coef.minimaxApprox\n### Title: Extract coefficients from a '\"minimaxApprox\"' object\n### Aliases: coef.minimaxApprox\n### Keywords: methods NumericalMathematics\n\n### ** Examples\n\nPP <- minimaxApprox(exp, 0, 1, 5)\ncoef(PP)\nidentical(unlist(coef(PP), use.names = FALSE), PP$a)\nRR <- minimaxApprox(exp, 0, 1, c(2, 3))\ncoef(RR)\nidentical(coef(RR), list(a = RR$a, b = RR$b))\n\n\n"} {"package":"minimaxApprox","topic":"minimaxErr","snippet":"### Name: minimaxErr\n### Title: Evaluate the Minimax Approximation Error\n### Aliases: minimaxErr\n### Keywords: NumericalMathematics\n\n### ** Examples\n\n# Show results\nx <- seq(0, 0.5, length.out = 11L)\nmmA <- minimaxApprox(exp, 0, 0.5, 5L)\nerr <- minimaxEval(x, mmA) - exp(x)\nall.equal(err, minimaxErr(x, mmA))\n\n# Plot results\nx <- seq(0, 0.5, length.out = 1001L)\nplot(x, minimaxErr(x, mmA), type = \"l\")\n\n\n"} {"package":"minimaxApprox","topic":"minimaxEval","snippet":"### Name: minimaxEval\n### Title: Evaluate Minimax Approximation\n### Aliases: minimaxEval\n### Keywords: NumericalMathematics\n\n### ** Examples\n\n# Show results\nx <- seq(0, 0.5, length.out = 11L)\nmmA <- minimaxApprox(exp, 0, 0.5, 5L)\napErr <- abs(exp(x) - minimaxEval(x, mmA))\nall.equal(max(apErr), mmA$EE)\n\n# Plot results\ncurve(exp, 0.0, 0.5)\ncurve(minimaxEval(x, mmA), 0.0, 0.5, add = TRUE, col = \"red\", lty = 2L)\n\n\n"} {"package":"minimaxApprox","topic":"plot.minimaxApprox","snippet":"### Name: plot.minimaxApprox\n### Title: Plot errors from a '\"minimaxApprox\"' object\n### Aliases: plot.minimaxApprox\n### Keywords: hplot methods NumericalMathematics\n\n### ** Examples\n\nPP <- minimaxApprox(exp, 0, 1, 5)\nplot(PP)\n\n\n"} {"package":"minimaxApprox","topic":"print.minimaxApprox","snippet":"### Name: print.minimaxApprox\n### Title: Print method for a '\"minimaxApprox object\"'\n### Aliases: print.minimaxApprox\n### Keywords: print methods NumericalMathematics\n\n### ** Examples\n\nPP <- minimaxApprox(exp, 0, 1, 5)\nPP\nprint(PP, digits = 2L)\nprint.default(PP)\n\n\n"} {"package":"MPLikelihoodWB","topic":"LX.mat.weibull","snippet":"### Name: LX.mat.weibull\n### Title: Compensating factor for a possible mathematical disturbance\n### Aliases: LX.mat.weibull\n\n### ** Examples\n\ndat <- data.weibull(n = 20, shape=2, regco=c(2,1.5,3,2.5))\n\npar=c(1,1,1,1,1,1)\n\nLX.mat.weibull(Y=log(dat$ftime),X=model.matrix(ftime~x1+x2+x3+x4,data=dat),\nsigma=2,phi=matrix(par[-1],ncol=1),delta=dat$delta,whc=2)\n\npar=c(1,1,1)\n\nLX.mat.weibull(Y=log(dat$ftime),X=model.matrix(ftime~x1,data=dat),sigma=2,\nphi=matrix(par[-1],ncol=1),delta=dat$delta,whc=2)\n\n\n"} {"package":"MPLikelihoodWB","topic":"Mprofile.wb","snippet":"### Name: Mprofile.wb\n### Title: Modified profile likelihood estimation of Weibull shape and\n### regression parameter\n### Aliases: Mprofile.wb\n### Keywords: Modified profile likelihood Profile likelihood Weibull\n### regression model\n\n### ** Examples\n\ndat <- data.weibull(n = 40, shape=2, regco=c(2,1.5,3,2.5))\n\nMprofile.wb(formula=ftime~x1+x2+x3+x4,censor=\"delta\",data=dat)\n\nsurvreg(Surv(ftime,delta)~x1+x2+x3+x4,data=dat,dist=\"weibull\")\n\n\n"} {"package":"MPLikelihoodWB","topic":"data.weibull","snippet":"### Name: data.weibull\n### Title: Random data set generating function.\n### Aliases: data.weibull\n### Keywords: Simulated Weibull regression data Weibull data with\n### correlated covariates\n\n### ** Examples\n\ndata.weibull(n = 20)\ndata.weibull(n = 20, shape=1.7, regco=c(2,1,3,4))\ndata.weibull(n = 20, shape=1.5, ncorvar=4, correlated=TRUE)\n\n\n"} {"package":"MPLikelihoodWB","topic":"infm.weibul","snippet":"### Name: infm.weibul\n### Title: Observed information matrix for fixed regression parameter of\n### interest\n### Aliases: infm.weibul\n\n### ** Examples\n\n\ndat <- data.weibull(n = 20, shape=2, regco=c(2,1.5,3,2.5))\n\npar=c(1,1,1,1,1,1)\n\ninfm.weibul(Y=log(dat$ftime),X=model.matrix(ftime~x1+x2+x3+x4,data=dat),\nsigma=2,phi=matrix(par[-1],ncol=1),delta=dat$delta,whc=2)\n\npar=c(1,1,1)\ninfm.weibul(Y=log(dat$ftime),X=model.matrix(ftime~x1,data=dat),sigma=2,\nphi=matrix(par[-1],ncol=1),delta=dat$delta,whc=2)\n\n\n\n"} {"package":"MPLikelihoodWB","topic":"mplik.wb.bi","snippet":"### Name: mplik.wb.bi\n### Title: Modified profile likelihood function of Weibull regression\n### parameters\n### Aliases: mplik.wb.bi\n### Keywords: Weibull regression model\n\n### ** Examples\n\ndat <- data.weibull(n = 40, shape=2, regco=c(2,1.5,3,2.5))\n\nmplik.wb.bi(par=c(1,1,1,1,1,1),Y=dat$ftime,X=model.matrix(ftime~x1+x2+x3+x4,data=dat),\ndelta=dat$delta,whc=2)\n\n\n"} {"package":"MPLikelihoodWB","topic":"mplik.wb.s","snippet":"### Name: mplik.wb.s\n### Title: Modified profile likelihood function of Weibull shape parameter\n### Aliases: mplik.wb.s\n### Keywords: Weibull regression model\n\n### ** Examples\n\ndat <- data.weibull(n = 40, shape=2, regco=c(2,1.5,3,2.5))\n\nmplik.wb.s(par=c(1,1,1,1,1,1),Y=dat$ftime,X=model.matrix(ftime~x1+x2+x3+x4,data=dat),\ndelta=dat$delta)\n\n\n"} {"package":"auRoc","topic":"auc.nonpara.kernel","snippet":"### Name: auc.nonpara.kernel\n### Title: AUC by Kernel Methods\n### Aliases: auc.nonpara.kernel\n### Keywords: htest\n\n### ** Examples\n\n\n #Example 1\n data(petBrainGlioma)\n y <- subset(petBrainGlioma, grade==1, select=\"FDG\", drop=TRUE)\n x <- subset(petBrainGlioma, grade==2, select=\"FDG\", drop=TRUE)\n auc.nonpara.kernel(x, y)\n\n \n ## Not run: \n##D #Example 2\n##D data(petBrainGlioma)\n##D y <- subset(petBrainGlioma, grade==1, select=\"ACE\", drop=TRUE)\n##D x <- subset(petBrainGlioma, grade==2, select=\"ACE\", drop=TRUE)\n##D auc.nonpara.kernel(x, y, integration=\"TRUE\",\n##D bw=\"sj\", method=\"bootstrapBCa\", nboot=999)\n##D \n## End(Not run)\n\n\n\n\n"} {"package":"auRoc","topic":"auc.nonpara.mw","snippet":"### Name: auc.nonpara.mw\n### Title: AUC Based on the Mann-Whitney Statistic\n### Aliases: auc.nonpara.mw\n### Keywords: htest\n\n### ** Examples\n\n\n data(petBrainGlioma)\n y <- subset(petBrainGlioma, grade==1, select=\"FDG\", drop=TRUE)\n x <- subset(petBrainGlioma, grade==2, select=\"FDG\", drop=TRUE)\n auc.nonpara.mw(x, y)\n auc.nonpara.mw(x, y, method=\"delong\")\n\n\n"} {"package":"auRoc","topic":"auc.para.bayes","snippet":"### Name: auc.para.bayes\n### Title: AUC by the Bayesian MCMC\n### Aliases: auc.para.bayes\n### Keywords: htest\n\n### ** Examples\n\n\n #Example 1\n data(petBrainGlioma)\n y <- subset(petBrainGlioma, grade==1, select=\"FDG\", drop=TRUE)\n x <- subset(petBrainGlioma, grade==2, select=\"FDG\", drop=TRUE)\n auc.para.bayes(x, y, dist=\"exp\")\n\n \n #Example 2\n data(petBrainGlioma)\n y <- subset(petBrainGlioma, grade==1, select=\"ACE\", drop=TRUE)\n x <- subset(petBrainGlioma, grade==2, select=\"ACE\", drop=TRUE)\n auc.para.bayes(x, y, dist=\"normalDV\")\n \n\n\n\n\n"} {"package":"auRoc","topic":"auc.para.frequentist","snippet":"### Name: auc.para.frequentist\n### Title: AUC by Frequentist Parametric Methods\n### Aliases: auc.para.frequentist\n### Keywords: htest\n\n### ** Examples\n\n\n #Example 1\n data(petBrainGlioma)\n y <- subset(petBrainGlioma, grade==1, select=\"FDG\", drop=TRUE)\n x <- subset(petBrainGlioma, grade==2, select=\"FDG\", drop=TRUE)\n auc.para.frequentist(x, y, dist=\"exp\")\n\n \n #Example 2\n data(petBrainGlioma)\n y <- subset(petBrainGlioma, grade==1, select=\"ACE\", drop=TRUE)\n x <- subset(petBrainGlioma, grade==2, select=\"ACE\", drop=TRUE)\n auc.para.frequentist(x, y, method=\"RG1\")\n \n\n\n\n\n"} {"package":"DTSg","topic":"DTSg","snippet":"### Name: DTSg\n### Title: DTSg class\n### Aliases: DTSg new\n\n### ** Examples\n\n# new DTSg object\n## R6 constructor\nDTSg$new(\n values = flow,\n ID = \"River Flow\"\n)\n\n## abused S4 constructor\nnew(\n Class = \"DTSg\",\n values = flow,\n ID = \"River Flow\"\n)\n\n\n\n"} {"package":"DTSg","topic":"S3WrapperGenerator","snippet":"### Name: S3WrapperGenerator\n### Title: S3 wrapper method generator\n### Aliases: S3WrapperGenerator\n\n### ** Examples\n\n# generate an S3 wrapper method for 'alter()' of 'DTSg'\nalter.DTSg <- S3WrapperGenerator(\n R6Method = DTSg$public_methods$alter\n)\n\n\n\n"} {"package":"DTSg","topic":"aggregate.DTSg","snippet":"### Name: aggregate.DTSg\n### Title: Aggregate values\n### Aliases: aggregate.DTSg aggregate\n\n### ** Examples\n\n# new DTSg object\nx <- DTSg$new(values = flow)\n\n# mean yearly river flows\n## R6 method\nx$aggregate(\n funby = byY_____,\n fun = \"mean\",\n na.rm = TRUE\n)$print()\n\n## S3 method\nprint(aggregate(\n x = x,\n funby = byY_____,\n fun = \"mean\",\n na.rm = TRUE\n))\n\n# variance and standard deviation of river flows per quarter\n## R6 method\nx$aggregate(\n funby = byYQ____,\n fun = c(var = \"var\", sd = \"sd\"),\n na.rm = TRUE\n)$print()\n\n## S3 method\nprint(aggregate(\n x = x,\n funby = byYQ____,\n fun = c(var = \"var\", sd = \"sd\"),\n na.rm = TRUE\n))\n\n# mean of river flows of all first and all second half years\n## R6 method\nx$aggregate(\n funby = by_m____,\n fun = \"mean\",\n na.rm = TRUE,\n multiplier = 6\n)$print()\n\n## S3 method\nprint(aggregate(\n x = x,\n funby = by_m____,\n fun = \"mean\",\n na.rm = TRUE,\n multiplier = 6\n))\n\n\n\n"} {"package":"DTSg","topic":"alter.DTSg","snippet":"### Name: alter.DTSg\n### Title: Alter time series\n### Aliases: alter.DTSg alter\n\n### ** Examples\n\n# new DTSg object\nx <- DTSg$new(values = flow)\n\n# filter for the first two years\n## R6 method\nx$alter(\n from = \"2007-01-01\",\n to = \"2008-12-31\"\n)$print()\n\n## S3 method\nprint(alter(\n x = x,\n from = \"2007-01-01\",\n to = \"2008-12-31\"\n))\n\n# change periodicity to one month\n## R6 method\nx$alter(by = \"1 month\")$print()\n\n## S3 method\nprint(alter(x = x, by = \"1 month\"))\n\n\n\n"} {"package":"DTSg","topic":"clone.DTSg","snippet":"### Name: clone.DTSg\n### Title: Clone object\n### Aliases: clone.DTSg clone\n\n### ** Examples\n\n# new DTSg object\nx <- DTSg$new(values = flow)\n\n# make a deep copy\n## R6 method\nx$clone(deep = TRUE)\n\n## S3 method\nclone(x = x, deep = TRUE)\n\n\n\n"} {"package":"DTSg","topic":"colapply.DTSg","snippet":"### Name: colapply.DTSg\n### Title: Apply function column-wise\n### Aliases: colapply.DTSg colapply\n\n### ** Examples\n\n# new DTSg object\nx <- DTSg$new(values = flow)\n\n# linear interpolation of missing values\n## R6 method\nx$colapply(fun = interpolateLinear)$print()\n\n## S3 method\nprint(colapply(x = x, fun = interpolateLinear))\n\n# daily cumulative sums per month\n## R6 method\nx$colapply(\n fun = cumsum,\n helpers = FALSE,\n funby = byYm____\n)$print()\n\n## S3 method\nprint(colapply(\n x = x,\n fun = cumsum,\n helpers = FALSE,\n funby = byYm____\n))\n\n# calculate moving averages with the help of 'runner' (all four given\n# approaches provide the same result with explicitly missing timestamps)\nif (requireNamespace(\"runner\", quietly = TRUE) &&\n packageVersion(\"runner\") >= package_version(\"0.3.5\")) {\n wrapper <- function(..., .helpers) {\n runner::runner(..., idx = .helpers[[\".dateTime\"]])\n }\n\n ## R6 method\n x$colapply(\n fun = runner::runner,\n f = mean,\n k = 5,\n lag = -2\n )$print()\n x$colapply(\n fun = wrapper,\n f = mean,\n k = \"5 days\",\n lag = \"-2 days\"\n )$print()\n x$colapply(\n fun = runner::runner,\n f = mean,\n k = \"5 days\",\n lag = \"-2 days\",\n idx = x$getCol(col = \".dateTime\")\n )$print()\n x$colapply(\n fun = runner::runner,\n f = mean,\n k = \"5 days\",\n lag = \"-2 days\",\n idx = x[\".dateTime\"]\n )$print()\n\n ## S3 method\n print(colapply(\n x = x,\n fun = runner::runner,\n f = mean,\n k = 5,\n lag = -2\n ))\n print(colapply(\n x = x,\n fun = wrapper,\n f = mean,\n k = \"5 days\",\n lag = \"-2 days\"\n ))\n print(colapply(\n x = x,\n fun = runner::runner,\n f = mean,\n k = \"5 days\",\n lag = \"-2 days\",\n idx = getCol(x = x, col = \".dateTime\")\n ))\n print(colapply(\n x = x,\n fun = runner::runner,\n f = mean,\n k = \"5 days\",\n lag = \"-2 days\",\n idx = x[\".dateTime\"]\n ))\n}\n\n# calculate rolling correlations somewhat inefficiently with the help of\n# 'runner'\nif (requireNamespace(\"runner\", quietly = TRUE) &&\n packageVersion(\"runner\") >= package_version(\"0.3.8\")) {\n wrapper <- function(x, y, f, k, lag, ...) {\n runner::runner(\n cbind(x, y),\n f = function(x) f(x[, 1], x[, 2]),\n k = k,\n lag = lag\n )\n }\n\n ## R6 method\n x$colapply(\n fun = wrapper,\n y = x[\"flow\"] + rnorm(length(x[\"flow\"])),\n f = cor,\n k = 5,\n lag = -2\n )$print()\n\n ## S3 method\n print(colapply(\n x = x,\n fun = wrapper,\n y = x[\"flow\"] + rnorm(length(x[\"flow\"])),\n f = cor,\n k = 5,\n lag = -2\n ))\n}\n\n\n"} {"package":"DTSg","topic":"cols.DTSg","snippet":"### Name: cols.DTSg\n### Title: Get column names\n### Aliases: cols.DTSg cols\n\n### ** Examples\n\n# new DTSg object\nx <- DTSg$new(values = flow)\n\n# get names of numeric columns\n## R6 method\nx$cols(class = \"numeric\")\n\n## 'names()' is a \"hidden\" R6 alias for 'cols()'\nx$names(class = \"numeric\")\n\n## S3 method\ncols(x = x, class = \"numeric\")\n\n\n\n"} {"package":"DTSg","topic":"getCol.DTSg","snippet":"### Name: getCol.DTSg\n### Title: Get column vector\n### Aliases: getCol.DTSg getCol [.DTSg\n\n### ** Examples\n\n# new DTSg object\nx <- DTSg$new(values = flow)\n\n# get the first ten values of the \"flow\" column\n## R6 methods\nx$getCol(col = \"flow\")[1:10]\nx$`[`(\"flow\")[1:10]\n\n## S3 methods\ngetCol(x = x, col = \"flow\")[1:10]\nx[\"flow\"][1:10]\n\n\n\n"} {"package":"DTSg","topic":"interpolateLinear","snippet":"### Name: interpolateLinear\n### Title: Linear interpolation\n### Aliases: interpolateLinear\n\n### ** Examples\n\n# new DTSg object\nx <- DTSg$new(values = flow)\n\n# linear interpolation of missing values\n## R6 method\nx$colapply(fun = interpolateLinear)$print()\n\n## S3 method\nprint(colapply(x = x, fun = interpolateLinear))\n\n\n\n"} {"package":"DTSg","topic":"merge.DTSg","snippet":"### Name: merge.DTSg\n### Title: Merge two objects\n### Aliases: merge.DTSg merge\n\n### ** Examples\n\n# new DTSg object\nx <- DTSg$new(values = flow)\n\n# merge with 'data.table'\n## R6 method\nx$merge(\n y = flow,\n suffixes = c(\"_1\", \"_2\")\n)$print()\n\n## S3 method\nprint(merge(\n x = x,\n y = flow,\n suffixes = c(\"_1\", \"_2\")\n))\n\n\n\n"} {"package":"DTSg","topic":"nas.DTSg","snippet":"### Name: nas.DTSg\n### Title: List missing values\n### Aliases: nas.DTSg nas\n\n### ** Examples\n\n# new DTSg object\nx <- DTSg$new(values = flow)\n\n# list missing values\n## R6 method\nx$nas()\n\n## S3 method\nnas(x = x)\n\n\n\n"} {"package":"DTSg","topic":"plot.DTSg","snippet":"### Name: plot.DTSg\n### Title: Plot time series data\n### Aliases: plot.DTSg plot\n\n### ** Examples\n\n# new DTSg object\nx <- DTSg$new(values = flow)\n\n# plot data\nif (requireNamespace(\"dygraphs\", quietly = TRUE) &&\n requireNamespace(\"RColorBrewer\", quietly = TRUE)) {\n ## R6 method\n x$plot()\n\n ## S3 method\n plot(x = x)\n}\n\n\n\n"} {"package":"DTSg","topic":"print.DTSg","snippet":"### Name: print.DTSg\n### Title: Print object\n### Aliases: print.DTSg print\n\n### ** Examples\n\n# new DTSg object\nx <- DTSg$new(values = flow)\n\n# print object\n## R6 method\nx$print()\n\n## S3 method\nprint(x = x)\n\n\n\n"} {"package":"DTSg","topic":"refresh.DTSg","snippet":"### Name: refresh.DTSg\n### Title: Object integrity\n### Aliases: refresh.DTSg refresh\n\n### ** Examples\n\n# new DTSg object\nx <- DTSg$new(values = flow)\n\n# check the object's integrity\n## R6 method\nx$refresh()\n\n## S3 method\nrefresh(x = x)\n\n\n\n"} {"package":"DTSg","topic":"rollapply.DTSg","snippet":"### Name: rollapply.DTSg\n### Title: Rolling window function\n### Aliases: rollapply.DTSg rollapply\n\n### ** Examples\n\n# new DTSg object\nx <- DTSg$new(values = flow)\n\n# calculate a moving average\n## R6 method\nx$rollapply(\n fun = mean,\n na.rm = TRUE,\n before = 2,\n after = 2\n)$print()\n\n## S3 method\nprint(rollapply(\n x = x,\n fun = mean,\n na.rm = TRUE,\n before = 2,\n after = 2\n))\n\n\n\n"} {"package":"DTSg","topic":"rollback","snippet":"### Name: rollback\n### Title: Rollback of months\n### Aliases: rollback\n\n### ** Examples\n\n# rollback monthly time series\nby <- \"1 month\"\nrollback(\n .dateTime = seq(\n from = as.POSIXct(\"2000-01-31\", tz = \"UTC\"),\n to = as.POSIXct(\"2000-12-31\", tz = \"UTC\"),\n by = by\n ),\n periodicity = by\n)\n\n\n\n"} {"package":"DTSg","topic":"rowaggregate.DTSg","snippet":"### Name: rowaggregate.DTSg\n### Title: Aggregate values row-wise\n### Aliases: rowaggregate.DTSg rowaggregate raggregate\n\n### ** Examples\n\n# new DTSg object\nDT <- data.table::data.table(\n date = flow$date,\n flow1 = flow$flow - abs(rnorm(nrow(flow))),\n flow2 = flow$flow,\n flow3 = flow$flow + abs(rnorm(nrow(flow)))\n)\nx <- DTSg$new(values = DT)\n\n# mean and standard deviation of multiple measurements per timestamp\n## R6 method\nx$rowaggregate(\n resultCols = \"flow\",\n fun = list(mean = mean, sd = sd)\n)$print()\n\n## 'raggregate()' is a \"hidden\" R6 alias for 'rowaggregate()'\nx$raggregate(\n resultCols = \"flow\",\n fun = list(mean = mean, sd = sd)\n)$print()\n\n## S3 method\nprint(rowaggregate(\n x = x,\n resultCols = \"flow\",\n fun = list(mean = mean, sd = sd)\n))\n\n\n\n"} {"package":"DTSg","topic":"rowbind.DTSg","snippet":"### Name: rowbind.DTSg\n### Title: Combine rows\n### Aliases: rowbind.DTSg rowbind rbind\n\n### ** Examples\n\n# new DTSg object\nx <- DTSg$new(values = flow[1:500, ])\n\n# combine rows\n## R6 method\nx$rowbind(\n list(flow[1001:1500, ], DTSg$new(values = flow[501:1000, ])),\n flow[1501:.N, ]\n)$print()\n\n## 'rbind()' is a \"hidden\" R6 alias for 'rowbind()'\nx$rbind(\n list(flow[1001:1500, ], DTSg$new(values = flow[501:1000, ])),\n flow[1501:.N, ]\n)$print()\n\n## S3 method\nprint(rowbind(\n x = x,\n list(flow[1001:1500, ], DTSg$new(values = flow[501:1000, ])),\n flow[1501:.N, ]\n))\n\n\n\n"} {"package":"DTSg","topic":"setColNames.DTSg","snippet":"### Name: setColNames.DTSg\n### Title: Set column names\n### Aliases: setColNames.DTSg setColNames setnames\n\n### ** Examples\n\n# new DTSg object\nx <- DTSg$new(values = flow)\n\n# rename column \"flow\" to \"River Flow\"\n## R6 method\nx$setColNames(\n cols = \"flow\",\n values = \"River Flow\"\n)$print()\n\n## 'setnames()' is a \"hidden\" R6 alias for 'setColNames()'\nx$setnames(\n cols = \"flow\",\n values = \"River Flow\"\n)$print()\n\n## S3 method\nprint(setColNames(\n x = x,\n cols = \"flow\",\n values = \"River Flow\"\n))\n\n\n\n"} {"package":"DTSg","topic":"setCols.DTSg","snippet":"### Name: setCols.DTSg\n### Title: Set column values\n### Aliases: setCols.DTSg setCols set\n\n### ** Examples\n\n# new DTSg object\nx <- DTSg$new(values = flow)\n\n# cap river flows to 100\n## R6 method\nx$setCols(\n i = flow > 100,\n cols = \"flow\",\n values = 100\n)$print()\n\n## 'set()' is a \"hidden\" R6 alias for 'setCols()'\nx$set(\n i = flow > 100,\n cols = \"flow\",\n values = 100\n)$print()\n\n## S3 method\nprint(setCols(\n x = x,\n i = flow > 100,\n cols = \"flow\",\n values = 100\n))\n\n# set measurement unit with the help of 'units'\nif (requireNamespace(\"units\", quietly = TRUE)) {\n ## R6 method\n x$setCols(\n cols = \"flow\",\n values = units::set_units(x[\"flow\"], \"m^3/s\")\n )$print()\n\n ## S3 method\n print(setCols(\n x = x,\n cols = \"flow\",\n values = units::set_units(x[\"flow\"], \"m^3/s\")\n ))\n}\n\n\n\n"} {"package":"DTSg","topic":"subset.DTSg","snippet":"### Name: subset.DTSg\n### Title: Subset time series data\n### Aliases: subset.DTSg subset\n\n### ** Examples\n\n# new DTSg object\nx <- DTSg$new(values = flow)\n\n# filter for the first six observations\n## R6 method\nx$subset(i = 1:6)$print()\n\n## S3 method\nprint(subset(x = x, i = 1:6))\n\n# filter for the last two observations per year\n## R6 method\nx$subset(\n i = (.N - 1):.N,\n funby = function(x, ...) {data.table::year(x)}\n)$print()\n\n## S3 method\nprint(subset(\n x = x,\n i = (.N - 1):.N,\n funby = function(x, ...) {data.table::year(x)}\n))\n\n\n\n"} {"package":"DTSg","topic":"summary.DTSg","snippet":"### Name: summary.DTSg\n### Title: Summarise time series data\n### Aliases: summary.DTSg summary\n\n### ** Examples\n\n# new DTSg object\nx <- DTSg$new(values = flow)\n\n# calculate summary statistics\n## R6 method\nx$summary()\n\n## S3 method\nsummary(object = x)\n\n\n\n"} {"package":"DTSg","topic":"values.DTSg","snippet":"### Name: values.DTSg\n### Title: Get values\n### Aliases: values.DTSg values\n\n### ** Examples\n\n# new DTSg object\nx <- DTSg$new(values = flow)\n\n# get values\n## R6 method\nx$values()\n\n## S3 method\nvalues(x = x)\n\n\n\n"} {"package":"bivrp","topic":"bivrp","snippet":"### Name: bivrp\n### Title: Bivariate Residual Plots with Simulation Polygons\n### Aliases: bivrp print.bivrp\n\n### ** Examples\n\n## simulating a bivariate normal response variable\n\nrequire(mvtnorm)\n\nn <- 40\nbeta1 <- c(2, .4)\nbeta2 <- c(.2, .2)\nx <- seq(1, 10, length = n)\nX <- model.matrix(~ x)\nmu1 <- X%*%beta1\nmu2 <- X%*%beta2\nsig1 <- 2\nsig2 <- 3\nsig12 <- -1.7\nSig1 <- diag(rep(sig1), n)\nSig2 <- diag(rep(sig2), n)\nSig12 <- diag(rep(sig12), n)\nV <- rbind(cbind(Sig1, Sig12),\n cbind(Sig12, Sig2))\n\nset.seed(2016)\nY <- as.numeric(rmvnorm(1, c(mu1, mu2), V))\n\n## code for fitting the model estimating covariance or not\nbivnormfit <- function(Y, X, covariance) {\n n <- nrow(X)\n p <- ncol(X)\n y <- cbind(Y[1:n],Y[(n+1):(2*n)])\n XtXinv <- solve(crossprod(X, X))\n beta.hat <- XtXinv %*% crossprod(X, y)\n mu.hat <- X%*%beta.hat\n sigma.hat <- 1/n * t(y - mu.hat) %*% (y - mu.hat)\n if(!covariance) sigma.hat <- diag(diag(sigma.hat))\n cov.betas <- sigma.hat %x% XtXinv\n se.s1 <- sqrt(2*sigma.hat[1]^2/(n-p+1))\n se.s2 <- sqrt(2*sigma.hat[4]^2/(n-p+1))\n if(!covariance) se.s12 <- NA else {\n rho <- sigma.hat[2]/sqrt(sigma.hat[1]*sigma.hat[4])\n se.s12 <- sqrt((1+rho^2)*sigma.hat[1]*sigma.hat[4]/(n-p+1))\n }\n se.betas <- sqrt(diag(cov.betas))\n se.sigma <- c(se.s1, se.s2, se.s12)\n coefs <- c(beta.hat, sigma.hat[1], sigma.hat[4], sigma.hat[2])\n names(coefs) <- c(\"beta1.0\", \"beta1.1\", \"beta2.0\", \"beta2.1\", \"sig1\", \"sig2\", \"sig12\")\n fitted <- c(mu.hat)\n resid <- Y - fitted\n Sig1 <- diag(rep(sigma.hat[1]), n)\n Sig2 <- diag(rep(sigma.hat[4]), n)\n Sig12 <- diag(rep(sigma.hat[2]), n)\n V <- rbind(cbind(Sig1, Sig12),\n cbind(Sig12, Sig2))\n llik <- dmvnorm(Y, c(mu.hat), V, log = TRUE)\n ret <- list(\"coefs\" = coefs, \"covariance\" = covariance, \"n\" = n, \n \"X\" = X, \"fitted\" = fitted, \"resid\" = resid, \"loglik\" = llik,\n \"Y\" = Y, \"se\" = c(se.betas, se.sigma))\n class(ret) <- \"bivnormfit\"\n return(ret)\n}\n\n## fitting bivariate models with and without estimating covariance\nfit0 <- bivnormfit(Y, X, covariance=FALSE)\nfit1 <- bivnormfit(Y, X, covariance=TRUE)\n## likelihood-ratio test\n2*(fit0$loglik - fit1$loglik)\npchisq(54.24, 1, lower=FALSE)\n\n## function for extracting diagnostics (raw residuals)\ndfun <- function(obj) {\n r <- obj$resid\n n <- obj$n\n return(list(r[1:n], r[(n+1):(2*n)]))\n}\n\n## function for simulating new response variables\nsfun <- function(obj) {\n n <- obj$n\n fitted <- obj$fitted\n sig1 <- obj$coefs[5]\n sig2 <- obj$coefs[6]\n if(obj$covariance) sig12 <- obj$coefs[7] else sig12 <- 0\n Sig1 <- diag(rep(sig1), n)\n Sig2 <- diag(rep(sig2), n)\n Sig12 <- diag(rep(sig12), n)\n V <- rbind(cbind(Sig1, Sig12),\n cbind(Sig12, Sig2))\n Y <- as.numeric(rmvnorm(1, c(mu1, mu2), V))\n return(list(Y[1:n], Y[(n+1):(2*n)], \"X\" = obj$X, \n \"covariance\" = obj$covariance))\n}\n\n## function for refitting the model to simulated data\nffun <- function(new.obj) {\n Ynew <- c(new.obj[[1]], new.obj[[2]])\n bivnormfit(Ynew, new.obj$X, new.obj$covariance)\n}\n\n## Bivariate residual plot for model 1 (without estimating covariance)\nplot1 <- bivrp(fit0, diagfun=dfun, simfun=sfun, fitfun=ffun, verb=TRUE)\n## without polygon area reduction\nplot(plot1, conf=1)\n## drawing polygons\nplot(plot1, add.polygon=TRUE)\n## without ordering\nplot(plot1, theta.sort=FALSE, kernel=TRUE, add.dplots=TRUE, superpose=TRUE)\n\n## Bivariate residual plot for model 2 (estimating covariance)\nplot2 <- bivrp(fit1, diagfun=dfun, simfun=sfun, fitfun=ffun, verb=TRUE)\n## without polygon area reduction\nplot(plot2, conf=1)\n## drawing polygons\nplot(plot2, add.polygon=TRUE, conf=1)\n## without ordering\nplot(plot2, theta.sort=FALSE, kernel=TRUE, add.dplots=TRUE, superpose=TRUE)\n\n\n"} {"package":"bivrp","topic":"is_point_inside","snippet":"### Name: is_point_inside\n### Title: Determine if point is inside or outside a simple polygon area\n### Aliases: is_point_inside\n\n### ** Examples\n\nmy_polygon <- data.frame(c(1, 2, 3, 4, 3),\n c(1, 0, .5, 3, 4))\npoints_to_test <- list(c(0, 0), c(2.5, 1), c(3.5, 4))\n\nunlist(lapply(points_to_test, is_point_inside, my_polygon))\n\n\n\n"} {"package":"bivrp","topic":"polygon_area","snippet":"### Name: polygon-operations\n### Title: Polygon operations\n### Aliases: polygon_area get_k get_newpolygon get_reduced_bag\n### compute_bagplot\n### Keywords: polygon\n\n### ** Examples\n\n oldPolygon <- data.frame(x=c(2,1,3,4.5,5), y=c(1,3,5,4.5,2))\n \n # area\n polygon_area(oldPolygon)$area\n # centre of mass\n polygon_area(oldPolygon)$centre\n \n # get a new polygon with 50% of the area of the old one\n newPolygon <- get_newpolygon(conf=.5, P=oldPolygon, method=\"proportional\")\n polygon_area(newPolygon)$area/polygon_area(oldPolygon)$area\n \n # second method\n newPolygon2 <- get_newpolygon(conf=.5, P=oldPolygon, method=\"get.k\")\n polygon_area(newPolygon2)$area/polygon_area(oldPolygon)$area\n \n # illustration\n plot(oldPolygon, xlim=c(0,6), ylim=c(0,6), main=\"(a)\", pch=16)\n polygon(oldPolygon, lwd=2, col=\"#00000033\")\n text(oldPolygon, c(expression(P[1]), expression(P[2]),\n expression(P[3]), expression(P[4]),\n expression(P[5])), pos=c(1,2,3,4,4), cex=2)\n polygon(newPolygon, border=4, lwd=2, col=\"#52A3E199\")\n points(newPolygon, pch=16, col=4)\n text(newPolygon, c(expression(paste(P[1],minute)), expression(paste(P[2],minute)),\n expression(paste(P[3],minute)), expression(paste(P[4],minute)),\n expression(paste(P[5],minute))), pos=c(1,3,2,4,4), col=4, cex=2)\n\n C <- polygon_area(oldPolygon)$centre\n text(C[1], C[2], \"C\", pos=4, cex=2)\n for(i in 1:5) lines(c(C[1], oldPolygon[i,1]), \n c(C[2], oldPolygon[i,2]), lty=2, lwd=2, type=\"b\")\n\n\n"} {"package":"MRTSampleSizeBinary","topic":"compute_m_sigma","snippet":"### Name: compute_m_sigma\n### Title: Computes \"M\" and \"Sigma\" matrices for the sandwich estimator of\n### variance-covariance matrix.\n### Aliases: compute_m_sigma\n\n### ** Examples\n\n compute_m_sigma(tau_t_1, f_t_1, g_t_1, beta_1, alpha_1,\n p_t_1)\n\n\n"} {"package":"MRTSampleSizeBinary","topic":"compute_ncp","snippet":"### Name: compute_ncp\n### Title: Computes the non-centrality parameter for an F distributed\n### random variable in the context of a MRT with binary outcome.\n### Aliases: compute_ncp\n\n### ** Examples\n\ncompute_ncp(300, beta_1, m_matrix_1, sigma_matrix_1)\n\n\n"} {"package":"MRTSampleSizeBinary","topic":"is_full_column_rank","snippet":"### Name: is_full_column_rank\n### Title: Check if a matrix is full column rank.\n### Aliases: is_full_column_rank\n\n### ** Examples\n\n is_full_column_rank(diag(4))\n\n\n"} {"package":"MRTSampleSizeBinary","topic":"max_samp","snippet":"### Name: max_samp\n### Title: Returns default maximum sample size to end power_vs_n_plot().\n### Aliases: max_samp\n\n### ** Examples\n\nmax_samp(100)\n\n\n"} {"package":"MRTSampleSizeBinary","topic":"min_samp","snippet":"### Name: min_samp\n### Title: Compute minimum sample size.\n### Aliases: min_samp\n\n### ** Examples\n\nmin_samp(alpha_1, beta_1)\n\n\n"} {"package":"MRTSampleSizeBinary","topic":"mrt_binary_power","snippet":"### Name: mrt_binary_power\n### Title: Calculate power for binary outcome MRT\n### Aliases: mrt_binary_power\n\n### ** Examples\n\n mrt_binary_power(tau_t_1, f_t_1, g_t_1, beta_1,\n alpha_1, p_t_1, 0.05, 100)\n\n\n"} {"package":"MRTSampleSizeBinary","topic":"mrt_binary_ss","snippet":"### Name: mrt_binary_ss\n### Title: Calculate sample size for binary outcome MRT\n### Aliases: mrt_binary_ss\n\n### ** Examples\n\nmrt_binary_ss(tau_t_1, f_t_1, g_t_1, \n beta_1, alpha_1, p_t_1, \n 0.05, .2, FALSE)\n\n\n"} {"package":"MRTSampleSizeBinary","topic":"power_summary","snippet":"### Name: power_summary\n### Title: Calculate sample size at a range of power levels.\n### Aliases: power_summary\n\n### ** Examples\n\n power_summary(tau_t_1, f_t_1, g_t_1,\n beta_1, alpha_1, p_t_1, 0.05)\n\n\n"} {"package":"MRTSampleSizeBinary","topic":"power_vs_n_plot","snippet":"### Name: power_vs_n_plot\n### Title: Returns a plot of power vs sample size in the context of a\n### binary outcome MRT. See the vignette for more details.\n### Aliases: power_vs_n_plot\n\n### ** Examples\n\n power_vs_n_plot(tau_t_1, f_t_1, g_t_1, beta_1, alpha_1,\n p_t_1, 0.05, 15, 700)\n\n\n"} {"package":"QGglmm","topic":"QGicc","snippet":"### Name: QGicc\n### Title: Intra - Class Correlation coefficients (ICC) on the observed\n### data scale\n### Aliases: QGicc\n\n### ** Examples\n\n## Example using Poisson count data\n# Parameters\nmu <- 0\nva <- 0.5\nvm <- 0.2 # Maternal effect\nvp <- 1\n\n# Simulating data l = mu + a + e\nlat <- mu + \n rnorm(1000, 0, sqrt(va)) + \n rnorm(1000, 0, sqrt(vm)) +\n rnorm(1000, 0, sqrt(vp - (va + vm)))\ny <- rpois(1000, exp(lat))\n\n# Computing the broad - sense heritability\nQGicc(mu = mu, var.p = vp, var.comp = va, model = \"Poisson.log\")\n# Computing the maternal effect ICC\nQGicc(mu = mu, var.p = vp, var.comp = vm, model = \"Poisson.log\")\n\n# Using integral computation\nQGicc(mu = mu, var.p = vp, var.comp = vm, model = \"Poisson.log\", closed.form = FALSE)\n# Note that the \"approximation\" is exactly equal to the results obtained with the closed form\n\n# Let's create a custom model\ncustom <- list(inv.link = function(x){exp(x)},\n var.func = function(x){exp(x)},\n d.inv.link = function(x){exp(x)})\n \nQGicc(mu = mu, var.p = vp, var.comp = vm, custom.model = custom)\n# Again, exactly equal\n\n# Integrating over a posterior distribution\n# e.g. output from MCMCglmm named \"model\"\n# df <- data.frame(mu = model$Sol[, 'intercept'], \n# vm = model$VCV[, 'mother'], \n# vp = rowSums(model$VCV))\n# params <- apply(df, 1, function(row){\n# QGicc(mu = row$mu, var.comp = row$vm, var.p = row$vp, model = \"Poisson.log\")\n# })\n\n\n"} {"package":"QGglmm","topic":"QGlink.funcs","snippet":"### Name: QGlink.funcs\n### Title: List of functions according to a distribution and a link\n### function\n### Aliases: QGlink.funcs\n\n### ** Examples\n\n## Getting the functions for a Poisson.log model\nQGlink.funcs(\"Poisson.log\")\n# Note that because the variance is equal to the mean in a Poisson distribution\n# and the derivative of exp is exp\n# all functions are the same!\n\n## Getting the functions for a binom1.probit model\nQGlink.funcs(\"binom1.probit\")\n\n## The function QGparams automatically computes these functions\nQGparams(mu = 0, var.p = 2, var.a = 1, model = \"binom1.logit\")\n# Hence this is the same as using the custom.model argument with QGlink.funcs\nQGparams(mu = 0, var.p = 2, var.a = 1, custom.model = QGlink.funcs(\"binom1.logit\"))\n\n## We can create our own custom set of functions\n# Let's create a custom model exactly identical to QGlink.funcs(\"binom1.logit\")\ncustom <- list(inv.link = function(x){plogis(x)},\n var.func = function(x){plogis(x) * (1 - plogis(x))},\n d.inv.link = function(x){dlogis(x)})\n \nQGparams(mu = 0, var.p = 2, var.a = 1, custom.model = custom)\n\n\n"} {"package":"QGglmm","topic":"QGmean","snippet":"### Name: QGmean\n### Title: Compute the phenotypic mean on the observed scale\n### Aliases: QGmean\n\n### ** Examples\n\n## Computing the observed mean for a probit link\nQGmean(mu = 0.3, var = 1, link.inv = pnorm)\n# The theoretical expectation is\n1 - pnorm(0, 0.3, sqrt(1 + 1))\n\n# Or, using the QGlink.funcs function\nQGmean(mu = 0.3, var = 1, link.inv = QGlink.funcs(name = \"binom1.probit\")$inv.link)\n\n## Computing the observed mean for a logarithm link\nQGmean(mu = 1, var = 1, link.inv = exp)\n# The theoretical expectation is\nexp(1 + 0.5 * 1)\n\n# This computation is automatically performed by QGparams\n# but directly using the closed form solution when available\nQGparams(mu = 1, var.p = 1, var.a = 0.5, model = \"Poisson.log\")\n\n\n\n"} {"package":"QGglmm","topic":"QGmvicc","snippet":"### Name: QGmvicc\n### Title: Intra - Class Correlation coefficients (ICC) on the observed\n### data scale (multivariate analysis).\n### Aliases: QGmvicc\n\n### ** Examples\n\n## Example using a bivariate model (Binary trait/Gaussian trait)\n# Parameters\nmu <- c(0, 1)\nG <- diag(c(0.5, 2))\nM <- diag(c(0.2, 1)) # Maternal effect VCV matrix\nP <- diag(c(1, 4))\n\n# Broad - sense \"G-matrix\" on observed data scale\n## Not run: QGmvicc(mu = mu, vcv.comp = G, vcv.P = P, models = c(\"binom1.probit\", \"Gaussian\"))\n# Maternal effect VCV matrix on observed data scale\n## Not run: QGmvicc(mu = mu, vcv.comp = M, vcv.P = P, models = c(\"binom1.probit\", \"Gaussian\"))\n# Reminder: the results are the same here because we have no correlation between the two traits\n\n# Defining the model \"by hand\" using the list\nlist.models = list(\n model1 = list(inv.link = function(x){pnorm(x)},\n d.inv.link = function(x){dnorm(x)},\n var.func = function(x){pnorm(x) * (1 - pnorm(x))}),\n model2 = list(inv.link = function(x){x},\n d.inv.link = function(x){1},\n var.func = function(x){0})\n)\n# Running the same analysis than above\nQGmvicc(mu = mu, vcv.comp = M, vcv.P = P, models = list.models)\n\n# Using predicted values\n# Say we have 100 individuals\nn <- 100\n# Let's simulate predicted values\np <- matrix(c(runif(n), runif(n)), ncol = 2)\n# Note that p has as many as columns as we have traits (i.e. two)\n# Multivariate analysis with predicted values\n## Not run: QGmvicc(predict = p, vcv.comp = M, vcv.P = P, models = c(\"binom1.probit\", \"Gaussian\"))\n# That can be a bit long to run!\n\n\n"} {"package":"QGglmm","topic":"QGmvmean","snippet":"### Name: QGmvmean\n### Title: Compute the multivariate phenotypic mean on the observed scale\n### Aliases: QGmvmean\n\n### ** Examples\n\n## Example using a bivariate model (Binary trait/Gaussian trait)\n# Parameters\nmu <- c(0, 1)\nP <- diag(c(1, 4))\n\n# Note: no phenotypic, nor genetic correlations, hence should be equal to univariate case!\n\n# Setting up the link functions\n# Note that since the use of \"cubature\" to compute the integrals,\n# the functions must use a matrix as input and yield a matrix as output,\n# each row corresponding to a trait\ninv.links <- function(mat) {matrix(c(pnorm(mat[1, ]), mat[2, ]), nrow = 2, byrow = TRUE)}\n# probit link and identity link respectively\n\n# Computing the multivariate mean on observed scale\nQGmvmean(mu = mu, vcov = P, link.inv = inv.links)\nQGmean(mu = 0, var = 1, link.inv = pnorm) # Same result than trait 1!\nQGmean(mu = 1, var = 4, link.inv = function(x){x}) # Same result than trait 2!\n# Reminder: the results are the same here because we have no correlation between the two traits\n\n\n"} {"package":"QGglmm","topic":"QGmvparams","snippet":"### Name: QGmvparams\n### Title: Quantitative Genetics parameters from GLMM estimates\n### (multivariate analysis).\n### Aliases: QGmvparams\n\n### ** Examples\n\n## Example using a bivariate model (Binary trait/Gaussian trait)\n# Parameters\nmu <- c(0, 1)\nG <- diag(c(0.5, 2))\nP <- diag(c(1, 4))\n\n# Note: no phenotypic, nor genetic correlations, hence should be equal to univariate case!\n\n# Multivariate analysis\nQGmvparams(mu = mu, vcv.G = G, vcv.P = P, models = c(\"binom1.probit\", \"Gaussian\"))\nQGparams(mu = 0, var.a = 0.5, var.p = 1, model = \"binom1.probit\") # Consistent results!\n# Reminder: the results are the same here because we have no correlation between the two traits\n\n# Defining the model \"by hand\" using the list\nlist.models = list(\n model1 = list(inv.link = function(x){pnorm(x)},\n d.inv.link = function(x){dnorm(x)},\n var.func = function(x){pnorm(x) * (1 - pnorm(x))}),\n model2 = list(inv.link = function(x){x},\n d.inv.link = function(x){1},\n var.func = function(x){0})\n)\n# Running the same analysis than above\nQGmvparams(mu = mu, vcv.G = G, vcv.P = P, models = list.models) # Same results!\n\n# Using predicted values\n# Say we have 100 individuals\nn <- 100\n# Let's simulate predicted values\np <- matrix(c(runif(n), runif(n)), ncol = 2)\n# Note that p has as many as columns as we have traits (i.e. two)\n# Multivariate analysis with predicted values\n## Not run: QGmvparams(predict = p, vcv.G = G, vcv.P = P, models = c(\"binom1.probit\", \"Gaussian\"))\n\n\n"} {"package":"QGglmm","topic":"QGmvpred","snippet":"### Name: QGmvpred\n### Title: Predict the evolutionary response to selection on the observed\n### scale\n### Aliases: QGmvpred\n\n### ** Examples\n\n## Bivariate example with a binary trait and a Gaussian one\n\n# Assume a bivariate GLMM with Binomial(probit)/Gaussian distributions with:\nmu <- c(0, 10)\nG <- matrix(c(0.5, 0, 0, 1), nrow = 2)\nP <- matrix(c(1, 0, 0, 2), nrow = 2) \n\n# Link functions\ninv.links = function(vec){c(pnorm(vec[1]), vec[2])}\n\n# Creating the expected fitness function\n# i.e. expected fitness given a latent trait vector l\n# Say if the binary trait is 1, then the fitness is 0.5 * \"the Gaussian trait\"\n# But if the binary trait is 0, then the fitness is 0\nlat.fit <- function(mat) {pnorm(mat[1, ]) * 0.5 * mat[2, ]}\n# Derivative of the above function\n# This function yields a vector which elements are the derivative according to each trait\nd.lat.fit <- function(mat) {matrix(c(dnorm(mat[1, ]) * 0.5 * mat[2, ], pnorm(mat[1, ]) * 0.5),\n nrow = 2, \n byrow = TRUE)}\n\n# Predicting the latent evolutionary response\npred<- QGmvpred(mu = mu, vcv.P = P, vcv.G = G, fit.func = lat.fit, d.fit.func = d.lat.fit)\n\n\n# Predicting the observed evolutionary response\n# Current observed phenotypic mean\nQGmvmean(mu = mu, vcov = P, link.inv = inv.links)\n# Predicted observed phenotypic mean after selection\nQGmvmean(mu = mu + pred$lat.resp, vcov = P, link.inv = inv.links)\n\n\n"} {"package":"QGglmm","topic":"QGmvpsi","snippet":"### Name: QGmvpsi\n### Title: Compute a multivariate \"Psi\" (used to compute the additive\n### genetic variance on the observed scale).\n### Aliases: QGmvpsi\n\n### ** Examples\n\n## Example using a bivariate model (Binary trait/Gaussian trait)\n# Parameters\nmu <- c(0, 1)\nG <- diag(c(0.5, 2))\nP <- diag(c(1, 4))\n\n# Setting up the derivatives of the inverse-link functions\ndinvs <- function(mat) {matrix(c(dnorm(mat[1, ]), rep(1, length(mat[2, ]))),\n nrow = 2, \n byrow = TRUE)}\n# The derivative of pnorm() is dnorm(), and the derivative of the identity is 1\n\n# Computing Psi\nPsi <- QGmvpsi(mu = mu, vcov = P, d.link.inv = dinvs)\n# Computing genetic additive variance-covariance matrix on the observed scale\nPsi\nG.obs <- Psi %*% G %*% t(Psi)\n\nQGparams(mu = 0, var.a = 0.5, var.p = 1, model = \"binom1.probit\")\n# Same additive variance than trait 1\n# Reminder: the results are the same here because we have no correlation between the two traits\n\n\n"} {"package":"QGglmm","topic":"QGparams","snippet":"### Name: QGparams\n### Title: Quantitative Genetics parameters from GLMM estimates.\n### Aliases: QGparams\n\n### ** Examples\n\n## Example using binary data\n# Parameters\nmu <- 0\nva <- 1\nvp <- 2\n\n# Simulating data l = mu + a + e\nlat<- mu + rnorm(1000, 0, sqrt(va)) + rnorm(1000, 0, sqrt(vp - va))\ny<- rbinom(1000, 1, pnorm(lat))\n\n# Expected results\nQGparams(mu = 0, var.p = 2, var.a = 1, model = \"binom1.probit\")\n# Simulated results for mean and variance\nmean(y)\nvar(y)\n\n# Using integral approximations\nQGparams(mu = 0, var.p = 2, var.a = 1, model = \"binom1.probit\", closed.form = FALSE)\n# Note that the approximation is exactly equal to the results obtained with the closed form\n\n# Let's create a custom model\ncustom <- list(inv.link = function(x){pnorm(x)},\n var.func = function(x){pnorm(x) * (1 - pnorm(x))},\n d.inv.link = function(x){dnorm(x)})\n \nQGparams(mu = 0, var.p = 2, var.a = 1, custom.model = custom)\n\n# Using an ordinal model (with 4 categories)\nQGparams(mu = 0.1, var.a = 1, var.p = 2, cut.points = c( - Inf, 0, 0.5, 1, Inf), model = \"ordinal\")\n# Note the slightly different output (see QGmvparams)\n\n# Integrating over a posterior distribution\n# e.g. output from MCMCglmm named \"model\"\n# df <- data.frame(mu = model$Sol[, 'intercept'], \n# va = model$VCV[, 'animal'], \n# vp = rowSums(model$VCV))\n# params <- apply(df, 1, function(row){\n# QGparams(mu = row$mu, var.a = row$va, var.p = row$vp, model = \"Poisson.log\")\n# })\n\n\n"} {"package":"QGglmm","topic":"QGpred","snippet":"### Name: QGpred\n### Title: Predict the evolutionary response to selection on the observed\n### scale\n### Aliases: QGpred\n\n### ** Examples\n\n## Example with binary traits and a fitness measurement\n# Let's assume we dispose of a binary trait measurement \n# and associated fitness of trait 0 (say 1) and trait 1 (say 1.86)\n# We further assume a GLMM with Binomial distribution and probit link with:\nmu <- -0.1\nva <- 2\nvp <- 2.5 # note that the latent heritability is very high\n\n# Creating the latent fitness function\n# i.e. expected fitness given a latent trait l\n# We have a trait 1 with probability pnorm(l) with fitness 1.86\n# We have a trait 0 with probability (1 - pnorm(l)) with fitness 1\nlat.fit<- function(l){(1 - pnorm(l)) * 1 + pnorm(l) * 1.86}\n# Derivate of the fitnes function\nd.lat.fit<- function(l){- dnorm(l) * 1 + dnorm(l) * 1.86}\n\n# Predicting the latent evolutionary response\npred <- QGpred(mu = mu, var.p = vp, var.a = va, fit.func = lat.fit, d.fit.func = d.lat.fit)\n\n# Predicting the observed evolutionary response\n# Current observed phenotypic mean\nQGmean(mu = mu, var = vp, link.inv = QGlink.funcs(\"binom1.probit\")$inv.link)\n# Predicted observed phenotypic mean after selection\nQGmean(mu = mu + pred$lat.resp, var = vp, link.inv = QGlink.funcs(\"binom1.probit\")$inv.link)\n\n\n"} {"package":"QGglmm","topic":"QGpsi","snippet":"### Name: QGpsi\n### Title: Compute \"Psi\" (used to compute the additive genetic variance on\n### the observed scale).\n### Aliases: QGpsi\n\n### ** Examples\n\n## Example using binom1.probit model\nmu <- 0\nva <- 1\nvp <- 2\n# The inverse-link for a probit is the CDF of a standard Gaussian\n# Hence its derivative is the PDF of a standard Gaussian\ndinv <- function(x){dnorm(x)}\n\n# Computing Psi\nPsi <- QGpsi(mu = 0, var = 2, d.link.inv = dinv)\n# Computing additive variance on the observed scale\n(Psi^2) * va\n\n# This function is used by QGparams to obtain var.a.obs\nQGparams(mu = 0, var.p = vp, var.a = va, model = \"binom1.probit\")\n# Same results as above!\n\n\n"} {"package":"QGglmm","topic":"QGvar.dist","snippet":"### Name: QGvar.dist\n### Title: Compute the distribution variance\n### Aliases: QGvar.dist\n\n### ** Examples\n\n## Example using Poisson.log model\nmu <- 1\nva <- 0.2\nvp <- 0.5\n\n# The variance function is simply the inverse-link function\n# because the variance of a Poisson is its mean\nvarfunc <- function(x) { exp(x) }\n\nQGvar.dist(mu = mu, var = vp, var.func = varfunc)\n\n# The QGlink.funcs gives a ready - to - use var.func\nfuncs <- QGlink.funcs(name = \"Poisson.log\")\n\n# Calculating the distribution variance\nvdist <- QGvar.dist(mu = mu, var = vp, var.func = funcs$var.func)\n\nvdist # Same value as above\n\n# Calculating the variance of the expected values\nvexp <- QGvar.exp(mu = mu, var = vp, link.inv = funcs$inv.link)\n\n# The phenotypic variance on the observed scale is then:\nvexp + vdist\n\n# This computation is automatically performed by QGparams\n# but directly using the closed form solutions when available\nQGparams(mu = mu, var.p = vp, var.a = va, model = \"Poisson.log\")\n# var.obs is equal to the sum above\n\n\n"} {"package":"QGglmm","topic":"QGvar.exp","snippet":"### Name: QGvar.exp\n### Title: Compute the variance of expected values (i.e. the latent values\n### after inverse-link transformation.)\n### Aliases: QGvar.exp\n\n### ** Examples\n\n## Example using Poisson.log model\nmu <- 1\nva <- 0.2\nvp <- 0.5\n\n# The inverse-link for a logarithm link is the exponential\ninv.link<- function(x){exp(x)}\n\n# We can then calculate the variance of expected values\nQGvar.exp(mu = mu, var = vp, link.inv = inv.link)\n\n# The mean on the observed scale can be computed beforehand\ny_bar <- QGmean(mu = mu, var = vp, link.inv = inv.link)\nQGvar.exp(mu = mu, var = vp, obs.mean = y_bar, link.inv = inv.link)\n\n# The QGlink.funcs gives a ready - to - use inverse-link function\nfuncs<- QGlink.funcs(name = \"Poisson.log\")\n\n# Calculating the distribution variance\nvexp <- QGvar.exp(mu = mu, var = vp, obs.mean = y_bar, link.inv = funcs$var.func)\n\nvexp # Same value as above\n\n# Calculating the associated distribution variance\nvdist <- QGvar.dist(mu = mu, var = vp, var.func = funcs$var.func)\n\n# The phenotypic variance on the observed scale is then:\nvexp + vdist\n\n# This computation is automatically performed by QGparams\n# but directly using the closed form solutions when available\nQGparams(mu = mu, var.p = vp, var.a = va, model = \"Poisson.log\")\n# var.obs is equal to the sum above\n\n\n"} {"package":"QGglmm","topic":"QGvcov","snippet":"### Name: QGvcov\n### Title: Compute the phenotypic variance-covariance matrix on the\n### observed / expected scale\n### Aliases: QGvcov\n\n### ** Examples\n\n## Example using a bivariate model (Binary trait/Gaussian trait)\n# Parameters\nmu <- c(0, 1)\nP <- diag(c(1, 4))\n\n# Note: no phenotypic, nor genetic correlations, hence should be equal to univariate case!\n\n# Setting up the link functions\n# Note that since the use of \"cubature\" to compute the integrals,\n# the functions must use a matrix as input and yield a matrix as output,\n# each row corresponding to a trait\ninv.links <- function(mat) {matrix(c(pnorm(mat[1, ]), mat[2, ]), nrow = 2, byrow = TRUE)}\n\n# Setting up the distribution variance functions\nvar.funcs <- function(mat) {matrix(c(pnorm(mat[1, ]) * (1 - pnorm(mat[1, ])), 0 * mat[2, ]),\n nrow = 2, \n byrow = TRUE)}\n# The first row is p * (1 - p) (variance of a binomial)\n# The second row is 0 because no extra distribution is assumed for a Gaussian trait\n\n# Computing the multivariate mean on observed scale\n# Phenotypic VCV matrix on observed scale\nQGvcov(mu = mu, vcov = P, link.inv = inv.links, var.func = var.funcs) \n# Phenotypic VCV matrix on the expected scale\nQGvcov(mu = mu, vcov = P, link.inv = inv.links, var.func = var.funcs, exp.scale = TRUE) \n\nQGvar.exp(mu = 0, var = 1, link.inv = pnorm) # Same variance on the expected scale\nQGvar.exp(mu = 0, var = 1, link.inv = pnorm) +\n QGvar.dist(mu = 0, var = 1, var.func = function(x){pnorm(x) * (1 - pnorm(x))})\n# Same variance on the observed scale\n# Reminder: the results are the same here because we have no correlation between the two traits\n\n\n"} {"package":"Rwave","topic":"DOG","snippet":"### Name: DOG\n### Title: Continuous Wavelet Transform with derivative of Gaussian\n### Aliases: DOG\n### Keywords: ts\n\n### ** Examples\n\n x <- 1:512\n chirp <- sin(2*pi * (x + 0.002 * (x-256)^2 ) / 16)\n \n DOG(chirp, noctave=5, nvoice=12, 3, twoD=TRUE, plot=TRUE)\n\n\n\n\n"} {"package":"Rwave","topic":"Ekg","snippet":"### Name: Ekg\n### Title: Heart Rate Data\n### Aliases: Ekg\n### Keywords: datasets\n\n### ** Examples\n\ndata(Ekg)\nplot.ts(Ekg)\n\n\n"} {"package":"Rwave","topic":"HOWAREYOU","snippet":"### Name: HOWAREYOU\n### Title: How Are You?\n### Aliases: HOWAREYOU\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(HOWAREYOU)\nplot.ts(HOWAREYOU)\n\n\n\n"} {"package":"Rwave","topic":"HeartRate","snippet":"### Name: HeartRate\n### Title: Pixel from Amber Camara\n### Aliases: HeartRate\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(HeartRate)\nplot.ts(HeartRate)\n\n\n\n"} {"package":"Rwave","topic":"SVD","snippet":"### Name: SVD\n### Title: Singular Value Decomposition\n### Aliases: SVD\n### Keywords: ts\n\n### ** Examples\n\n hilbert <- function(n) { i <- 1:n; 1 / outer(i - 1, i, \"+\") }\n X <- hilbert(6)\n z = SVD(X)\n z\n\n\n"} {"package":"Rwave","topic":"W_tilda.1","snippet":"### Name: W_tilda.1\n### Title: Pixel from Amber Camara\n### Aliases: W_tilda.1\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(W_tilda.1)\nplot.ts(W_tilda.1)\n\n\n\n"} {"package":"Rwave","topic":"W_tilda.2","snippet":"### Name: W_tilda.2\n### Title: Pixel from Amber Camara\n### Aliases: W_tilda.2\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(W_tilda.2)\nplot.ts(W_tilda.2)\n\n\n\n"} {"package":"Rwave","topic":"W_tilda.3","snippet":"### Name: W_tilda.3\n### Title: Pixel from Amber Camara\n### Aliases: W_tilda.3\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(W_tilda.3)\nplot.ts(W_tilda.3)\n\n\n\n"} {"package":"Rwave","topic":"W_tilda.4","snippet":"### Name: W_tilda.4\n### Title: Pixel from Amber Camara\n### Aliases: W_tilda.4\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(W_tilda.4)\nplot.ts(W_tilda.4)\n\n\n\n"} {"package":"Rwave","topic":"W_tilda.5","snippet":"### Name: W_tilda.5\n### Title: Pixel from Amber Camara\n### Aliases: W_tilda.5\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(W_tilda.5)\nplot.ts(W_tilda.5)\n\n\n\n"} {"package":"Rwave","topic":"W_tilda.6","snippet":"### Name: W_tilda.6\n### Title: Pixel from Amber Camara\n### Aliases: W_tilda.6\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(W_tilda.6)\nplot.ts(W_tilda.6)\n\n\n\n"} {"package":"Rwave","topic":"W_tilda.7","snippet":"### Name: W_tilda.7\n### Title: Pixel from Amber Camara\n### Aliases: W_tilda.7\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(W_tilda.7)\nplot.ts(W_tilda.7)\n\n\n\n"} {"package":"Rwave","topic":"W_tilda.8","snippet":"### Name: W_tilda.8\n### Title: Pixel from Amber Camara\n### Aliases: W_tilda.8\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(W_tilda.8)\nplot.ts(W_tilda.8)\n\n\n\n"} {"package":"Rwave","topic":"W_tilda.9","snippet":"### Name: W_tilda.9\n### Title: Pixel from Amber Camara\n### Aliases: W_tilda.9\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(W_tilda.9)\nplot.ts(W_tilda.9)\n\n\n\n"} {"package":"Rwave","topic":"YN","snippet":"### Name: YN\n### Title: Logarithms of the Prices of Japanese Yen\n### Aliases: YN\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(YN)\nplot.ts(YN)\n\n\n\n"} {"package":"Rwave","topic":"YNdiff","snippet":"### Name: YNdiff\n### Title: Daily differences of Japanese Yen\n### Aliases: YNdiff\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(YNdiff)\nplot.ts(YNdiff)\n\n\n\n"} {"package":"Rwave","topic":"A0","snippet":"### Name: A0\n### Title: Transient Signal\n### Aliases: A0\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(A0)\nplot.ts(A0)\n\n\n\n"} {"package":"Rwave","topic":"A4","snippet":"### Name: A4\n### Title: Transient Signal\n### Aliases: A4\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(A4)\nplot.ts(A4)\n\n\n\n"} {"package":"Rwave","topic":"amber7","snippet":"### Name: amber7\n### Title: Pixel from Amber Camara\n### Aliases: amber7\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(amber7)\nplot.ts(amber7)\n\n\n\n"} {"package":"Rwave","topic":"amber8","snippet":"### Name: amber8\n### Title: Pixel from Amber Camara\n### Aliases: amber8\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(amber8)\nplot.ts(amber8)\n\n\n\n"} {"package":"Rwave","topic":"amber9","snippet":"### Name: amber9\n### Title: Pixel from Amber Camara\n### Aliases: amber9\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(amber9)\nplot.ts(amber9)\n\n\n\n"} {"package":"Rwave","topic":"B0","snippet":"### Name: B0\n### Title: Transient Signal\n### Aliases: B0\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(B0)\nplot.ts(B0)\n\n\n\n"} {"package":"Rwave","topic":"B4","snippet":"### Name: B4\n### Title: Transient Signal\n### Aliases: B4\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(B4)\nplot.ts(B4)\n\n\n\n"} {"package":"Rwave","topic":"back1.000","snippet":"### Name: back1.000\n### Title: Acoustic Returns\n### Aliases: back1.000\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(back1.000)\nplot.ts(back1.000)\n\n\n\n"} {"package":"Rwave","topic":"back1.180","snippet":"### Name: back1.180\n### Title: Acoustic Returns\n### Aliases: back1.180\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(back1.180)\nplot.ts(back1.180)\n\n\n\n"} {"package":"Rwave","topic":"back1.220","snippet":"### Name: back1.220\n### Title: Acoustic Returns\n### Aliases: back1.220\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(back1.220)\nplot.ts(back1.220)\n\n\n\n"} {"package":"Rwave","topic":"backscatter.1.000","snippet":"### Name: backscatter.1.000\n### Title: Pixel from Amber Camara\n### Aliases: backscatter.1.000\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(backscatter.1.000)\nplot.ts(backscatter.1.000)\n\n\n\n"} {"package":"Rwave","topic":"backscatter.1.180","snippet":"### Name: backscatter.1.180\n### Title: Pixel from Amber Camara\n### Aliases: backscatter.1.180\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(backscatter.1.180)\nplot.ts(backscatter.1.180)\n\n\n\n"} {"package":"Rwave","topic":"backscatter.1.220","snippet":"### Name: backscatter.1.220\n### Title: Pixel from Amber Camara\n### Aliases: backscatter.1.220\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(backscatter.1.220)\nplot.ts(backscatter.1.220)\n\n\n\n"} {"package":"Rwave","topic":"C0","snippet":"### Name: C0\n### Title: Transient Signal\n### Aliases: C0\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(C0)\nplot.ts(C0)\n\n\n\n"} {"package":"Rwave","topic":"C4","snippet":"### Name: C4\n### Title: Transient Signal\n### Aliases: C4\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(C4)\nplot.ts(C4)\n\n\n\n"} {"package":"Rwave","topic":"cfamily","snippet":"### Name: cfamily\n### Title: Ridge Chaining Procedure\n### Aliases: cfamily\n### Keywords: ts\n\n### ** Examples\n\n\n## Not run: \n##D data(HOWAREYOU)\n##D plot.ts(HOWAREYOU)\n##D \n##D cgtHOWAREYOU <- cgt(HOWAREYOU,70,0.01,100)\n##D \n##D clHOWAREYOU <- crc(Mod(cgtHOWAREYOU),nbclimb=1000)\n##D \n##D cfHOWAREYOU <- cfamily(clHOWAREYOU,ptile=0.001)\n##D image(cfHOWAREYOU$ordered > 0)\n## End(Not run)\n\n\n"} {"package":"Rwave","topic":"cgt","snippet":"### Name: cgt\n### Title: Continuous Gabor Transform\n### Aliases: cgt\n### Keywords: ts\n\n### ** Examples\n\ndata(HOWAREYOU)\n plot.ts(HOWAREYOU)\n \ncgtHOWAREYOU <- cgt(HOWAREYOU,70,0.01,100)\n\n\n\n"} {"package":"Rwave","topic":"ch","snippet":"### Name: ch\n### Title: Chen's Chirp\n### Aliases: ch\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(ch)\nplot.ts(ch)\n\n\n\n"} {"package":"Rwave","topic":"chirpm5db.dat","snippet":"### Name: chirpm5db.dat\n### Title: Pixel from Amber Camara\n### Aliases: chirpm5db.dat\n### Keywords: datasets\n\n### ** Examples\n\n## Not run: \n##D data(chirpm5db.dat)\n##D \n## End(Not run)\n\n\n\n\n"} {"package":"Rwave","topic":"click","snippet":"### Name: click\n### Title: Dolphin Click Data\n### Aliases: click\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(click)\nplot.ts(click)\n\n\n\n"} {"package":"Rwave","topic":"click.asc","snippet":"### Name: click.asc\n### Title: Pixel from Amber Camara\n### Aliases: click.asc\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(click.asc)\nplot.ts(click.asc)\n\n\n\n"} {"package":"Rwave","topic":"crc","snippet":"### Name: crc\n### Title: Ridge Extraction by Crazy Climbers\n### Aliases: crc\n### Keywords: ts\n\n### ** Examples\n\n data(HOWAREYOU)\n plot.ts(HOWAREYOU)\n \ncgtHOWAREYOU <- cgt(HOWAREYOU,70,0.01,100)\n\nclHOWAREYOU <- crc(Mod(cgtHOWAREYOU),nbclimb=1000)\n\n\n\n"} {"package":"Rwave","topic":"cwt","snippet":"### Name: cwt\n### Title: Continuous Wavelet Transform\n### Aliases: cwt\n### Keywords: ts\n\n### ** Examples\n\n x <- 1:512\n chirp <- sin(2*pi * (x + 0.002 * (x-256)^2 ) / 16)\n retChirp <- cwt(chirp, noctave=5, nvoice=12)\n\n\n"} {"package":"Rwave","topic":"cwtimage","snippet":"### Name: cwtimage\n### Title: Continuous Wavelet Transform Display\n### Aliases: cwtimage\n### Keywords: ts\n\n### ** Examples\n\n x <- 1:512\n chirp <- sin(2*pi * (x + 0.002 * (x-256)^2 ) / 16)\n retChirp <- cwt(chirp, noctave=5, nvoice=12, twoD=FALSE, plot=FALSE)\n retPolar <- cwtpolar(retChirp)\n retImageMod <- cwtimage(retPolar$modulus)\n retImageArg <- cwtimage(retPolar$argument)\n\n\n"} {"package":"Rwave","topic":"cwtp","snippet":"### Name: cwtp\n### Title: Continuous Wavelet Transform with Phase Derivative\n### Aliases: cwtp\n### Keywords: ts\n\n### ** Examples\n\n ## discards imaginary part with error,\n ## c code does not account for Im(input)\n x <- 1:512\n chirp <- sin(2*pi * (x + 0.002 * (x-256)^2 ) / 16)\n chirp <- chirp + 1i * sin(2*pi * (x + 0.004 * (x-256)^2 ) / 16)\n retChirp <- cwtp(chirp, noctave=5, nvoice=12)\n\n\n"} {"package":"Rwave","topic":"cwtpolar","snippet":"### Name: cwtpolar\n### Title: Conversion to Polar Coordinates\n### Aliases: cwtpolar\n### Keywords: ts\n\n### ** Examples\n\n x <- 1:512\n chirp <- sin(2*pi * (x + 0.002 * (x-256)^2 ) / 16)\n retChirp <- cwt(chirp, noctave=5, nvoice=12, twoD=FALSE, plot=FALSE)\n retPolar <- cwtpolar(retChirp)\n\n\n"} {"package":"Rwave","topic":"cwtTh","snippet":"### Name: cwtTh\n### Title: Cauchy's wavelet transform\n### Aliases: cwtTh\n### Keywords: ts\n\n### ** Examples\n\n x <- 1:512\n chirp <- sin(2*pi * (x + 0.002 * (x-256)^2 ) / 16)\n retChirp <- cwtTh(chirp, noctave=5, nvoice=12, moments=20)\n\n\n"} {"package":"Rwave","topic":"D0","snippet":"### Name: D0\n### Title: Transient Signal\n### Aliases: D0\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(D0)\nplot.ts(D0)\n\n\n\n"} {"package":"Rwave","topic":"D4","snippet":"### Name: D4\n### Title: Transient Signal\n### Aliases: D4\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(D4)\nplot.ts(D4)\n\n\n\n"} {"package":"Rwave","topic":"gabor","snippet":"### Name: gabor\n### Title: Generate Gabor function\n### Aliases: gabor\n### Keywords: ts\n\n### ** Examples\n\n\nm1 = gabor(1024, 512, 2 * pi, 20 )\n\nplot.ts(Re(m1) )\n\n\n\n\n"} {"package":"Rwave","topic":"hurst.est","snippet":"### Name: hurst.est\n### Title: Estimate Hurst Exponent\n### Aliases: hurst.est\n### Keywords: ts\n\n### ** Examples\n\n# White Noise Hurst Exponent: The plots on the top row of Figure 6.8\n# were produced by the folling S-commands. These make use of the two\n# functions Hurst.est (estimation of Hurst exponent from CWT) and\n# wspec.pl (display wavelet spectrum).\n\n# Compare the periodogram and the wavelet spectral estimate.\nwnoise <- rnorm(8192)\nplot.ts(wnoise)\nspwnoise <- fft(wnoise)\nspwnoise <- Mod(spwnoise)\nspwnoise <- spwnoise*spwnoise\nplot(spwnoise[1:4096], log=\"xy\", type=\"l\")\nlswnoise <- lsfit(log10(1:4096), log10(spwnoise[1:4096]))\nabline(lswnoise$coef)\ncwtwnoise <- DOG(wnoise, 10, 5, 1, plot=FALSE)\nmcwtwnoise <- Mod(cwtwnoise)\nmcwtwnoise <- mcwtwnoise*mcwtwnoise\nwspwnoise <- tfmean(mcwtwnoise, plot=FALSE)\nwspec.pl(wspwnoise, 5)\nhurst.est(wspwnoise, 1:50, 5)\n\n\n"} {"package":"Rwave","topic":"morlet","snippet":"### Name: morlet\n### Title: Morlet Wavelets\n### Aliases: morlet\n### Keywords: ts\n\n### ** Examples\n\n\nm1 = morlet(1024, 512, 20, w0=2 * pi)\nplot.ts(Re(m1) )\n\n\n\n\n"} {"package":"Rwave","topic":"noisy.dat","snippet":"### Name: noisy.dat\n### Title: Pixel from Amber Camara\n### Aliases: noisy.dat\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(noisy.dat)\nplot.ts(noisy.dat)\n\n\n\n"} {"package":"Rwave","topic":"noisywave","snippet":"### Name: noisywave\n### Title: Noisy Gravitational Wave\n### Aliases: noisywave\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(noisywave)\nplot.ts(noisywave)\n\n\n\n"} {"package":"Rwave","topic":"pixel_8.7","snippet":"### Name: pixel_8.7\n### Title: Pixel from Amber Camara\n### Aliases: pixel_8.7\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(pixel_8.7)\nplot.ts(pixel_8.7)\n\n\n\n"} {"package":"Rwave","topic":"pixel_8.8","snippet":"### Name: pixel_8.8\n### Title: Pixel from Amber Camara\n### Aliases: pixel_8.8\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(pixel_8.8)\nplot.ts(pixel_8.8)\n\n\n\n"} {"package":"Rwave","topic":"pixel_8.9","snippet":"### Name: pixel_8.9\n### Title: Pixel from Amber Camara\n### Aliases: pixel_8.9\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(pixel_8.9)\nplot.ts(pixel_8.9)\n\n\n\n"} {"package":"Rwave","topic":"pure.dat","snippet":"### Name: pure.dat\n### Title: Pixel from Amber Camara\n### Aliases: pure.dat\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(pure.dat)\nplot.ts(pure.dat)\n\n\n\n"} {"package":"Rwave","topic":"purwave","snippet":"### Name: purwave\n### Title: Pure Gravitational Wave\n### Aliases: purwave\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(purwave)\nplot.ts(purwave)\n\n\n\n"} {"package":"Rwave","topic":"sig_W_tilda.1","snippet":"### Name: sig_W_tilda.1\n### Title: Pixel from Amber Camara\n### Aliases: sig_W_tilda.1\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(sig_W_tilda.1)\nplot.ts(sig_W_tilda.1)\n\n\n\n"} {"package":"Rwave","topic":"sig_W_tilda.2","snippet":"### Name: sig_W_tilda.2\n### Title: Pixel from Amber Camara\n### Aliases: sig_W_tilda.2\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(sig_W_tilda.2)\nplot.ts(sig_W_tilda.2)\n\n\n\n"} {"package":"Rwave","topic":"sig_W_tilda.3","snippet":"### Name: sig_W_tilda.3\n### Title: Pixel from Amber Camara\n### Aliases: sig_W_tilda.3\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(sig_W_tilda.3)\nplot.ts(sig_W_tilda.3)\n\n\n\n"} {"package":"Rwave","topic":"sig_W_tilda.4","snippet":"### Name: sig_W_tilda.4\n### Title: Pixel from Amber Camara\n### Aliases: sig_W_tilda.4\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(sig_W_tilda.4)\nplot.ts(sig_W_tilda.4)\n\n\n\n"} {"package":"Rwave","topic":"sig_W_tilda.5","snippet":"### Name: sig_W_tilda.5\n### Title: Pixel from Amber Camara\n### Aliases: sig_W_tilda.5\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(sig_W_tilda.5)\nplot.ts(sig_W_tilda.5)\n\n\n\n"} {"package":"Rwave","topic":"signal_W_tilda.1","snippet":"### Name: signal_W_tilda.1\n### Title: Pixel from Amber Camara\n### Aliases: signal_W_tilda.1\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(signal_W_tilda.1)\nplot.ts(signal_W_tilda.1)\n\n\n\n"} {"package":"Rwave","topic":"signal_W_tilda.2","snippet":"### Name: signal_W_tilda.2\n### Title: Pixel from Amber Camara\n### Aliases: signal_W_tilda.2\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(signal_W_tilda.2)\nplot.ts(signal_W_tilda.2)\n\n\n\n"} {"package":"Rwave","topic":"signal_W_tilda.3","snippet":"### Name: signal_W_tilda.3\n### Title: Pixel from Amber Camara\n### Aliases: signal_W_tilda.3\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(signal_W_tilda.3)\nplot.ts(signal_W_tilda.3)\n\n\n\n"} {"package":"Rwave","topic":"signal_W_tilda.4","snippet":"### Name: signal_W_tilda.4\n### Title: Pixel from Amber Camara\n### Aliases: signal_W_tilda.4\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(signal_W_tilda.4)\nplot.ts(signal_W_tilda.4)\n\n\n\n"} {"package":"Rwave","topic":"signal_W_tilda.5","snippet":"### Name: signal_W_tilda.5\n### Title: Pixel from Amber Camara\n### Aliases: signal_W_tilda.5\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(signal_W_tilda.5)\nplot.ts(signal_W_tilda.5)\n\n\n\n"} {"package":"Rwave","topic":"signal_W_tilda.6","snippet":"### Name: signal_W_tilda.6\n### Title: Pixel from Amber Camara\n### Aliases: signal_W_tilda.6\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(signal_W_tilda.6)\nplot.ts(signal_W_tilda.6)\n\n\n\n"} {"package":"Rwave","topic":"signal_W_tilda.7","snippet":"### Name: signal_W_tilda.7\n### Title: Pixel from Amber Camara\n### Aliases: signal_W_tilda.7\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(signal_W_tilda.7)\nplot.ts(signal_W_tilda.7)\n\n\n\n"} {"package":"Rwave","topic":"signal_W_tilda.8","snippet":"### Name: signal_W_tilda.8\n### Title: Pixel from Amber Camara\n### Aliases: signal_W_tilda.8\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(signal_W_tilda.8)\nplot.ts(signal_W_tilda.8)\n\n\n\n"} {"package":"Rwave","topic":"signal_W_tilda.9","snippet":"### Name: signal_W_tilda.9\n### Title: Pixel from Amber Camara\n### Aliases: signal_W_tilda.9\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(signal_W_tilda.9)\nplot.ts(signal_W_tilda.9)\n\n\n\n"} {"package":"Rwave","topic":"yen","snippet":"### Name: yen\n### Title: Pixel from Amber Camara\n### Aliases: yen\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(yen)\nplot.ts(yen)\n\n\n\n"} {"package":"Rwave","topic":"yendiff","snippet":"### Name: yendiff\n### Title: Pixel from Amber Camara\n### Aliases: yendiff\n### Keywords: datasets\n\n### ** Examples\n\n\ndata(yendiff)\nplot.ts(yendiff)\n\n\n\n"} {"package":"scattermore","topic":"geom_scattermore","snippet":"### Name: geom_scattermore\n### Title: geom_scattermore\n### Aliases: geom_scattermore\n\n### ** Examples\n\nlibrary(ggplot2)\nlibrary(scattermore)\nggplot(data.frame(x = rnorm(1e6), y = rexp(1e6))) +\n geom_scattermore(aes(x, y, color = x),\n pointsize = 3,\n alpha = 0.1,\n pixels = c(1000, 1000),\n interpolate = TRUE\n ) +\n scale_color_viridis_c()\n\n\n"} {"package":"scattermore","topic":"geom_scattermost","snippet":"### Name: geom_scattermost\n### Title: geom_scattermost\n### Aliases: geom_scattermost\n\n### ** Examples\n\nlibrary(ggplot2)\nlibrary(scattermore)\nd <- data.frame(x = rnorm(1000000), y = rnorm(1000000))\nx_rng <- range(d$x)\nggplot() +\n geom_scattermost(cbind(d$x, d$y),\n color = heat.colors(100, alpha = .01)\n [1 + 99 * (d$x - x_rng[1]) / diff(x_rng)],\n pointsize = 2.5,\n pixels = c(1000, 1000),\n interpolate = TRUE\n )\n\n\n"} {"package":"scattermore","topic":"scattermore","snippet":"### Name: scattermore\n### Title: scattermore\n### Aliases: scattermore\n\n### ** Examples\n\nlibrary(scattermore)\nplot(scattermore(cbind(rnorm(1e6), rnorm(1e6)), rgba = c(64, 128, 192, 10)))\n\n\n"} {"package":"scattermore","topic":"scattermoreplot","snippet":"### Name: scattermoreplot\n### Title: scattermoreplot\n### Aliases: scattermoreplot\n\n### ** Examples\n\n# plot an actual rainbow\nlibrary(scattermore)\nd <- data.frame(s = qlogis(1:1e6 / (1e6 + 1), 6, 0.5), t = rnorm(1e6, pi / 2, 0.5))\nscattermoreplot(\n d$s * cos(d$t),\n d$s * sin(d$t),\n col = rainbow(1e6, alpha = .05)[c((9e5 + 1):1e6, 1:9e5)],\n main = \"scattermore demo\"\n)\n\n\n"} {"package":"filebin","topic":"base_url","snippet":"### Name: base_url\n### Title: Set or query API base URL\n### Aliases: base_url\n\n### ** Examples\n\nbase_url(\"https://testnet.binance.vision/\")\n\n\n"} {"package":"filebin","topic":"bin_archive","snippet":"### Name: bin_archive\n### Title: Get contents of bin as archive\n### Aliases: bin_archive\n\n### ** Examples\n\n## Not run: \n##D bin_archive(\"placeholder\", file = tempfile(fileext = \".zip\"))\n## End(Not run)\n\n\n"} {"package":"filebin","topic":"bin_delete","snippet":"### Name: bin_delete\n### Title: Delete a bin from Filebin\n### Aliases: bin_delete\n\n### ** Examples\n\n## Not run: \n##D bin_delete(\"placeholder\")\n##D bin_delete(\"https://filebin.net/placeholder/\")\n## End(Not run)\n\n\n"} {"package":"filebin","topic":"bin_get","snippet":"### Name: bin_get\n### Title: Retrieve a bin from Filebin\n### Aliases: bin_get\n\n### ** Examples\n\n## Not run: \n##D posted <- file_post(LOREM_IPSUM)\n##D bin_get(posted$bin)\n## End(Not run)\n\n\n"} {"package":"filebin","topic":"bin_lock","snippet":"### Name: bin_lock\n### Title: Lock a bin on Filebin\n### Aliases: bin_lock\n\n### ** Examples\n\n## Not run: \n##D posted <- file_post(LOREM_IPSUM)\n##D bin_get(posted$bin)\n## End(Not run)\n\n\n"} {"package":"filebin","topic":"bin_name_random","snippet":"### Name: bin_name_random\n### Title: Create random bin name for Filebin\n### Aliases: bin_name_random\n\n### ** Examples\n\nbin_name_random(8)\n\n\n"} {"package":"filebin","topic":"bin_qr_code","snippet":"### Name: bin_qr_code\n### Title: Get QR code for bin on Filebin\n### Aliases: bin_qr_code\n\n### ** Examples\n\n## Not run: \n##D posted <- file_post(LOREM_IPSUM, bin = \"latin-text\")\n##D bin_qr_code(\"latin-text\", file = tempfile(fileext = \".png\"))\n## End(Not run)\n\n\n"} {"package":"filebin","topic":"file_delete","snippet":"### Name: file_delete\n### Title: Delete a file from Filebin\n### Aliases: file_delete\n\n### ** Examples\n\n## Not run: \n##D posted <- file_post(LOREM_IPSUM, bin = \"latin-text\")\n##D \n##D file_delete(\"lorem-ipsum.txt\", \"latin-text\")\n##D file_delete(\"https://filebin.net/latin-text/lorem-ipsum.txt\")\n## End(Not run)\n\n\n"} {"package":"filebin","topic":"file_get","snippet":"### Name: file_get\n### Title: Retrieve a file from Filebin\n### Aliases: file_get\n\n### ** Examples\n\n## Not run: \n##D posted <- file_post(LOREM_IPSUM, bin = \"latin-text\")\n##D \n##D # Discard path and just retain filename.\n##D filename <- basename(LOREM_IPSUM)\n##D file_get(filename, \"latin-text\")\n##D \n##D # Delete downloaded file.\n##D file.remove(filename)\n## End(Not run)\n\n\n"} {"package":"filebin","topic":"file_post","snippet":"### Name: file_post\n### Title: Upload a file to Filebin\n### Aliases: file_post\n\n### ** Examples\n\n## Not run: \n##D # Upload a single file.\n##D file_post(LOREM_IPSUM)\n##D # Upload multiple files.\n##D file_post(c(LOREM_IPSUM, MORE_LOREM_IPSUM))\n##D \n##D # Upload to a specific bin.\n##D bin <- bin_name_random(length = 24)\n##D file_post(LOREM_IPSUM, bin)\n## End(Not run)\n\n\n"} {"package":"autostsm","topic":"stsm_dates_to_interpolate","snippet":"### Name: stsm_dates_to_interpolate\n### Title: Create dates to interpolate\n### Aliases: stsm_dates_to_interpolate\n\n### ** Examples\n\n## Not run: \n##D #GDP Not seasonally adjusted\n##D library(autostsm)\n##D data(\"NA000334Q\", package = \"autostsm\") #From FRED\n##D NA000334Q = data.table(NA000334Q, keep.rownames = TRUE)\n##D colnames(NA000334Q) = c(\"date\", \"y\")\n##D NA000334Q[, \"date\" := as.Date(date)]\n##D NA000334Q[, \"y\" := as.numeric(y)]\n##D NA000334Q = NA000334Q[date >= \"1990-01-01\", ]\n##D dates_interp = stsm_dates_to_interpolate(y = NA000334Q$y, dates = NA000334Q$date, \n##D interpolate = \"monthly\")\n## End(Not run)\n\n\n"} {"package":"autostsm","topic":"stsm_detect_anomalies","snippet":"### Name: stsm_detect_anomalies\n### Title: Detect Anomalies\n### Aliases: stsm_detect_anomalies\n\n### ** Examples\n\n## Not run: \n##D #GDP Not seasonally adjusted\n##D library(autostsm)\n##D data(\"NA000334Q\", package = \"autostsm\") #From FRED\n##D NA000334Q = data.table(NA000334Q, keep.rownames = TRUE)\n##D colnames(NA000334Q) = c(\"date\", \"y\")\n##D NA000334Q[, \"date\" := as.Date(date)]\n##D NA000334Q[, \"y\" := as.numeric(y)]\n##D NA000334Q = NA000334Q[date >= \"1990-01-01\", ]\n##D stsm = stsm_estimate(NA000334Q)\n##D anomalies = stsm_detect_anomalies(model = stsm, y = NA000334Q, plot = TRUE)\n## End(Not run)\n\n\n"} {"package":"autostsm","topic":"stsm_detect_breaks","snippet":"### Name: stsm_detect_breaks\n### Title: Detect Structural Breaks\n### Aliases: stsm_detect_breaks\n\n### ** Examples\n\n## Not run: \n##D #GDP Not seasonally adjusted\n##D library(autostsm)\n##D data(\"NA000334Q\", package = \"autostsm\") #From FRED\n##D NA000334Q = data.table(NA000334Q, keep.rownames = TRUE)\n##D colnames(NA000334Q) = c(\"date\", \"y\")\n##D NA000334Q[, \"date\" := as.Date(date)]\n##D NA000334Q[, \"y\" := as.numeric(y)]\n##D NA000334Q = NA000334Q[date >= \"1990-01-01\", ]\n##D stsm = stsm_estimate(NA000334Q)\n##D breaks = stsm_detect_breaks(model = stsm, y = NA000334Q, plot = TRUE, cores = 2)\n## End(Not run)\n\n\n"} {"package":"autostsm","topic":"stsm_detect_cycle","snippet":"### Name: stsm_detect_cycle\n### Title: Detect cycle from the data\n### Aliases: stsm_detect_cycle\n\n### ** Examples\n\n## Not run: \n##D #GDP Not seasonally adjusted\n##D library(autostsm)\n##D data(\"NA000334Q\", package = \"autostsm\") #From FRED\n##D NA000334Q = data.table(NA000334Q, keep.rownames = TRUE)\n##D colnames(NA000334Q) = c(\"date\", \"y\")\n##D NA000334Q[, \"date\" := as.Date(date)]\n##D NA000334Q[, \"y\" := as.numeric(y)]\n##D NA000334Q = NA000334Q[date >= \"1990-01-01\", ]\n##D cycle = stsm_detect_cycle(y = NA000334Q$y, freq = 4)\n## End(Not run)\n\n\n"} {"package":"autostsm","topic":"stsm_detect_frequency","snippet":"### Name: stsm_detect_frequency\n### Title: Detect frequency and dates from the data\n### Aliases: stsm_detect_frequency\n\n### ** Examples\n\n## Not run: \n##D #GDP Not seasonally adjusted\n##D library(autostsm)\n##D data(\"NA000334Q\", package = \"autostsm\") #From FRED\n##D NA000334Q = data.table(NA000334Q, keep.rownames = TRUE)\n##D colnames(NA000334Q) = c(\"date\", \"y\")\n##D NA000334Q[, \"date\" := as.Date(date)]\n##D NA000334Q[, \"y\" := as.numeric(y)]\n##D NA000334Q = NA000334Q[date >= \"1990-01-01\", ]\n##D freq = stsm_detect_frequency(y = NA000334Q)\n## End(Not run)\n\n\n"} {"package":"autostsm","topic":"stsm_detect_multiplicative","snippet":"### Name: stsm_detect_multiplicative\n### Title: Detect if log transformation is best\n### Aliases: stsm_detect_multiplicative\n\n### ** Examples\n\n## Not run: \n##D #GDP Not seasonally adjusted\n##D library(autostsm)\n##D data(\"NA000334Q\", package = \"autostsm\") #From FRED\n##D NA000334Q = data.table(NA000334Q, keep.rownames = TRUE)\n##D colnames(NA000334Q) = c(\"date\", \"y\")\n##D NA000334Q[, \"date\" := as.Date(date)]\n##D NA000334Q[, \"y\" := as.numeric(y)]\n##D NA000334Q = NA000334Q[date >= \"1990-01-01\", ]\n##D multiplicative = stsm_detect_multiplicative(y = NA000334Q$y, freq = 4)\n## End(Not run)\n\n\n"} {"package":"autostsm","topic":"stsm_detect_seasonality","snippet":"### Name: stsm_detect_seasonality\n### Title: Detect seasonality from the data\n### Aliases: stsm_detect_seasonality\n\n### ** Examples\n\n## Not run: \n##D #GDP Not seasonally adjusted\n##D library(autostsm)\n##D data(\"NA000334Q\", package = \"autostsm\") #From FRED\n##D NA000334Q = data.table(NA000334Q, keep.rownames = TRUE)\n##D colnames(NA000334Q) = c(\"date\", \"y\")\n##D NA000334Q[, \"date\" := as.Date(date)]\n##D NA000334Q[, \"y\" := as.numeric(y)]\n##D NA000334Q = NA000334Q[date >= \"1990-01-01\", ]\n##D seasonality = stsm_detect_seasonality(y = NA000334Q$y, freq = 4)\n## End(Not run)\n\n\n"} {"package":"autostsm","topic":"stsm_detect_trend","snippet":"### Name: stsm_detect_trend\n### Title: Detect trend type\n### Aliases: stsm_detect_trend\n\n### ** Examples\n\n## Not run: \n##D #GDP Not seasonally adjusted\n##D library(autostsm)\n##D data(\"NA000334Q\", package = \"autostsm\") #From FRED\n##D NA000334Q = data.table(NA000334Q, keep.rownames = TRUE)\n##D colnames(NA000334Q) = c(\"date\", \"y\")\n##D NA000334Q[, \"date\" := as.Date(date)]\n##D NA000334Q[, \"y\" := as.numeric(y)]\n##D NA000334Q = NA000334Q[date >= \"1990-01-01\", ]\n##D trend = stsm_detect_trend(y = NA000334Q$y, freq = 4)\n## End(Not run)\n\n\n"} {"package":"autostsm","topic":"stsm_estimate","snippet":"### Name: stsm_estimate\n### Title: Trend cycle seasonal decomposition using the Kalman filter.\n### Aliases: stsm_estimate\n\n### ** Examples\n\n## Not run: \n##D #GDP Not seasonally adjusted\n##D library(autostsm)\n##D data(\"NA000334Q\", package = \"autostsm\") #From FRED\n##D NA000334Q = data.table(NA000334Q, keep.rownames = TRUE)\n##D colnames(NA000334Q) = c(\"date\", \"y\")\n##D NA000334Q[, \"date\" := as.Date(date)]\n##D NA000334Q[, \"y\" := as.numeric(y)]\n##D NA000334Q = NA000334Q[date >= \"1990-01-01\", ]\n##D stsm = stsm_estimate(NA000334Q)\n## End(Not run)\n\n\n"} {"package":"autostsm","topic":"stsm_filter","snippet":"### Name: stsm_filter\n### Title: Kalman Filter\n### Aliases: stsm_filter\n\n### ** Examples\n\n## Not run: \n##D #GDP Not seasonally adjusted\n##D library(autostsm)\n##D data(\"NA000334Q\", package = \"autostsm\") #From FRED\n##D NA000334Q = data.table(NA000334Q, keep.rownames = TRUE)\n##D colnames(NA000334Q) = c(\"date\", \"y\")\n##D NA000334Q[, \"date\" := as.Date(date)]\n##D NA000334Q[, \"y\" := as.numeric(y)]\n##D NA000334Q = NA000334Q[date >= \"1990-01-01\", ]\n##D stsm = stsm_estimate(NA000334Q)\n##D fc = stsm_filter(stsm, y = NA000334Q, plot = TRUE)\n## End(Not run)\n\n\n"} {"package":"autostsm","topic":"stsm_forecast","snippet":"### Name: stsm_forecast\n### Title: Kalman Filter and Forecast\n### Aliases: stsm_forecast\n\n### ** Examples\n\n## Not run: \n##D #GDP Not seasonally adjusted\n##D library(autostsm)\n##D data(\"NA000334Q\", package = \"autostsm\") #From FRED\n##D NA000334Q = data.table(NA000334Q, keep.rownames = TRUE)\n##D colnames(NA000334Q) = c(\"date\", \"y\")\n##D NA000334Q[, \"date\" := as.Date(date)]\n##D NA000334Q[, \"y\" := as.numeric(y)]\n##D NA000334Q = NA000334Q[date >= \"1990-01-01\", ]\n##D stsm = stsm_estimate(NA000334Q)\n##D fc = stsm_forecast(stsm, y = NA000334Q, n.ahead = floor(stsm$freq)*3, plot = TRUE)\n## End(Not run)\n\n\n"} {"package":"autostsm","topic":"stsm_prior","snippet":"### Name: stsm_prior\n### Title: Return a naive model prior decomposition\n### Aliases: stsm_prior\n\n### ** Examples\n\n## Not run: \n##D #GDP Not seasonally adjusted\n##D library(autostsm)\n##D data(\"NA000334Q\", package = \"autostsm\") #From FRED\n##D NA000334Q = data.table(NA000334Q, keep.rownames = TRUE)\n##D colnames(NA000334Q) = c(\"date\", \"y\")\n##D NA000334Q[, \"date\" := as.Date(date)]\n##D NA000334Q[, \"y\" := as.numeric(y)]\n##D NA000334Q = NA000334Q[date >= \"1990-01-01\", ]\n##D prior = stsm_prior(y = NA000334Q$y, freq = 4)\n## End(Not run)\n\n\n"} {"package":"autostsm","topic":"stsm_ssm","snippet":"### Name: stsm_ssm\n### Title: State space model\n### Aliases: stsm_ssm\n\n### ** Examples\n\n## Not run: \n##D #GDP Not seasonally adjusted\n##D library(autostsm)\n##D data(\"NA000334Q\", package = \"autostsm\") #From FRED\n##D NA000334Q = data.table(NA000334Q, keep.rownames = TRUE)\n##D colnames(NA000334Q) = c(\"date\", \"y\")\n##D NA000334Q[, \"date\" := as.Date(date)]\n##D NA000334Q[, \"y\" := as.numeric(y)]\n##D NA000334Q = NA000334Q[date >= \"1990-01-01\", ]\n##D stsm = stsm_estimate(NA000334Q)\n##D ssm = stsm_ssm(model = stsm)\n## End(Not run)\n\n\n"} {"package":"ExclusionTable","topic":"exclusion_table","snippet":"### Name: exclusion_table\n### Title: Exclusion Table\n### Aliases: exclusion_table\n\n### ** Examples\n\n#Example without using the obj argument\nexclusion_table(\n data = mtcars,\n exclusion_criteria = c(\"disp <= 70 | disp >= 300\",\n \"as.character(gear) == '4'\"),\n labels_exclusion = c(\"First exclusion\",\n \"Second exclusion\")\n)\n\n#Example using the obj argument\nmy_selection <- c(8, 6)\n\nexclusion_table(\n data = mtcars,\n exclusion_criteria = c(\"cyl %in% my_selection\"),\n labels_exclusion = c(\"First exclusion\"),\n obj = list(my_selection = my_selection)\n)\n\n\n\n"} {"package":"RationalExp","topic":"estimDev","snippet":"### Name: estimDev\n### Title: Estimation of the minimal deviations from rational expectations\n### with unconstrained information set g*\n### Aliases: estimDev\n\n### ** Examples\n\nn_p=200\nn_y=200\nsig=0.1\nu=1\nb=0.10\na=2\nrho= 0.4\npsi <- rnorm(n_p,0,u)\npp_y <- runif(n_y,0,1)\nzeta <- rnorm(n_y,a,sig)\nzeta1 <- rnorm(n_y,-a,sig)\npp1_y <- 1*(pp_y 1-b)\npp3_y <- 1*(pp_y <=(1-b) & pp_y >=b)\npsi_y <-rnorm(n_p,0,u)\ny = rho*psi_y+ pp1_y*zeta + pp2_y*zeta1\n\ng_star <- estimDev(psi,y)\n\n\n\n\n"} {"package":"RationalExp","topic":"test","snippet":"### Name: test\n### Title: Implementation of the RE test with possible survey weights\n### (direct and with parallel computing)\n### Aliases: test\n\n### ** Examples\n\n## The RE test without covariates\nn_p=600\nn_y=n_p\nN <- n_y + n_p\nrho <-0.29\nsig=0.1\nu=1\nb=0.10\na=2\n\npsi <-rnorm(n_p,0,u)\npp_y <- runif(n_y,0,1)\nzeta <- rnorm(n_y,a,sig)\nzeta1 <- rnorm(n_y,-a,sig)\npp1_y <- 1*(pp_y 1-b)\npp3_y <- 1*(pp_y <=(1-b) & pp_y >=b)\npsi_y <-rnorm(n_y,0,u)\ny = rho*psi_y+ pp1_y*zeta + pp2_y*zeta1\n\n\nD <- rbind(matrix(1,n_y,1),matrix(0,n_p,1))\nY_tilde <- rbind(matrix(y,n_y,1),matrix(psi,n_p,1))\n\n#res <- test(Y_tilde ,D)\n\n\n\n\n"} {"package":"subrank","topic":"corc","snippet":"### Name: corc\n### Title: Function to estimate copula using ranks and sub-sampling\n### Aliases: corc\n\n### ** Examples\n\nlon <- 30\na <- 2\nx <- rnorm(lon)\ny = a*x^2+rnorm(lon)\ndatatable = as.data.frame(cbind(x,y))\nc=corc(datatable,c(\"x\",\"y\"),8)\nc\nsum(c$cop)\n\n\n"} {"package":"subrank","topic":"corc0","snippet":"### Name: corc0\n### Title: Function to estimate copula using ranks and sub-sampling,\n### minimal version.\n### Aliases: corc0\n\n### ** Examples\n\nlon <- 30\na <- 2.85\nx <- rnorm(lon)\ny = a*x^2+rnorm(lon)\nc=corc0(c(x,y),lon,2,8,1e5,75014)\nc\n\nc0=c(\n1203, 1671, 1766, 959, 1586, 1715, 1803, 1205, 1260,1988, 2348, 1917, 3506, 2045, 1340,\n1093, 2694, 2757,2233, 1085, 2322, 1793, 1569, 1263, 1709, 1747, 1512,1308, 1778, 1354,\n1184, 1097, 2487, 2730, 2112, 1100,2435, 2033, 1572, 1093, 1369, 1722, 1462, 1015, 1228,\n1419, 1776, 1852, 1009, 1097, 1179, 1323, 1595, 1316,1477, 2628, 889, 1178, 1981, 4000, \n35, 840, 2091, 4467,0, 27405)\nset.seed(75013)\nlon=30\ndimension=3\nsssize=4\nc0==corc0(rnorm(lon*dimension),lon,dimension,sssize,1e5,75014)\n\n\n"} {"package":"subrank","topic":"desscop","snippet":"### Name: desscop\n### Title: Discrete copula graph, a two-dimensional projection\n### Aliases: desscop\n\n### ** Examples\n\nlon <- 31\na <- 2.85\nx <- rnorm(lon)\ny = a*x^2+rnorm(lon)\ntablo = as.data.frame(cbind(x,y))\nc=corc(tablo,c(\"x\",\"y\"),8)\ndesscop(c,\"x\",\"y\")\n\ntablo = as.data.frame(cbind(x=rep(0,each=lon),y=rep(0,each=lon)))\nc=corc(tablo,c(\"x\",\"y\"),8,mixties=TRUE)\ndesscop(c,\"x\",\"y\")\n\n\n\n"} {"package":"subrank","topic":"desscoptous","snippet":"### Name: desscoptous\n### Title: Discrete copula graph, ALL two-dimensional projections\n### Aliases: desscoptous\n\n### ** Examples\n\nlon <- 31\na <- 2.85\nx <- rnorm(lon)\ny = a*x^2+rnorm(lon)\nz = rnorm(lon)\ntablo = as.data.frame(cbind(x,y,z))\nc=corc(tablo,c(\"x\",\"y\",\"z\"),8)\ndesscoptous(c)\n\n\n"} {"package":"subrank","topic":"estimdep","snippet":"### Name: estimdep\n### Title: Dependence estimation\n### Aliases: estimdep\n\n### ** Examples\n\nlon=3000\nplon=3000\nsubsampsize=20\n\n##############\nx=(runif(lon)-1/2)*3\ny=x^2+rnorm(lon)\nz=rnorm(lon)\ndonori=as.data.frame(cbind(x,y,z))\ndepori=estimdep(donori,c(\"x\",\"y\",\"z\"),subsampsize)\n\nknownvalues=data.frame(z=rnorm(plon))\nprev <- predictdep(knownvalues,depori)\nplot(prev$x,prev$y,xlim=c(-2,2),ylim=c(-2,5),pch=20,cex=0.5)\npoints(donori[,1:2],col='red',pch=20,cex=.5)\n\nknownvalues=data.frame(x=(runif(lon)-1/2)*3)\nprev <- predictdep(knownvalues,depori)\nplot(prev$x,prev$y,xlim=c(-2,2),ylim=c(-2,5),pch=20,cex=0.5)\npoints(donori[,1:2],col='red',pch=20,cex=.5)\n\nknownvalues=data.frame(y=runif(plon,min=-2,max=4))\nprev <- predictdep(knownvalues,depori)\nplot(prev$x,prev$y,xlim=c(-2,2),ylim=c(-2,5),pch=20,cex=0.5)\npoints(donori[,1:2],col='red',pch=20,cex=.5)\n\n\n"} {"package":"subrank","topic":"predictdep","snippet":"### Name: predictdep\n### Title: Probability forecasting\n### Aliases: predictdep\n\n### ** Examples\n\nlon=100\nplon=100\nsubsampsize=10\n\nshift=0\nnoise=0\nknowndims=1\n\nx=rnorm(lon)\ny=2*x+noise*rnorm(lon)\ndonori=as.data.frame(cbind(x,y))\ndepori=estimdep(donori,c(\"x\",\"y\"),subsampsize)\n##\nknownvalues=data.frame(x=rnorm(plon)+shift)\nprev <- predictdep(knownvalues,depori)\n##\nplot(prev$x,prev$y,xlim=c(-2,2),ylim=c(-2,5),pch=20,cex=0.5)\npoints(donori[,1:2],col='red',pch=20,cex=.5)\n##\nknownvalues=data.frame(x=rnorm(plon)+shift)\nprev <- predictdep(knownvalues,depori,smoothing=\"Beta\")\n##\nplot(prev$x,prev$y,xlim=c(-2,2),ylim=c(-2,5),pch=20,cex=0.5)\npoints(donori[,1:2],col='red',pch=20,cex=.5)\n\n# souci normal si |shift|>>1\n\nknownvalues=data.frame(z=rnorm(plon)+shift)\nprev <- predictdep(knownvalues,depori)\n##\nplot(prev$x,prev$y,xlim=c(-2,2),ylim=c(-2,5),pch=20,cex=0.5)\npoints(donori[,1:2],col='red',pch=20,cex=.5)\n\n\n"} {"package":"subrank","topic":"predonfly","snippet":"### Name: predonfly\n### Title: Probability forecasting\n### Aliases: predonfly\n\n### ** Examples\n\nlon=100\nplon=30\nsubsampsize=10\n\nx=rnorm(lon)\ny=2*x+rnorm(lon)*0\ndonori=as.data.frame(cbind(x,y))\n##\nknownvalues=data.frame(x=rnorm(plon))\nprev <- predonfly(donori,knownvalues,c(\"x\",\"y\"),subsampsize,100)\n\n##\nplot(prev$x,prev$y,pch=20,cex=0.5,\n ylim=range(c(prev$y,donori$y),na.rm=TRUE),xlim=range(c(prev$x,donori$x)))\npoints(donori[,1:2],col='red',pch=20,cex=.5)\n\nlon=3000\nmg=20\ndimtot=4\nrayon=6\n\ngenboules <- function(lon,a,d)\n{\n ss <- function(vec)\n {return(sum(vec*vec))}\n surface=matrix(nrow=lon,ncol=d,data=rnorm(lon*d))\n rayons=sqrt(apply(surface,1,ss))\n surface=surface/rayons\n return(matrix(nrow=lon,ncol=d,data=rnorm(lon*d))+a*surface)\n}\n\n##############\n\ndonori=genboules(lon,rayon,dimtot)\ndonori=as.data.frame(donori)\n\ndimconnues=3:dimtot\nvalconnues=matrix(nrow=1,ncol=length(dimconnues),data=0)\nvalconnues=as.data.frame(valconnues)\nnames(valconnues)=names(donori)[3:dimtot]\nprev <- predonfly(donori,valconnues,names(donori),subsampsize,100)\n\nboule2=genboules(plon,rayon,2)\n\nplot(boule2[,1:2],xlab='X1',ylab='X2',pch=20,cex=.5)\nplot(prev$V1,prev$V2,xlab='X1',ylab='X2',pch=20,cex=.5)\n\n\n"} {"package":"subrank","topic":"simany","snippet":"### Name: simany\n### Title: Test statistic distribution under any hypothesis\n### Aliases: simany\n\n### ** Examples\n\ndepquad <- function(lon,dd,a)\n{\n x <- rnorm(lon)\n y0 <- a*x^2\n y <- y0 + rnorm(lon)\n reste=rnorm((dd-2)*lon)\n return(c(x,y,reste))\n}\nsims0=simany(101,3,8,50,nbsafe=1)\nseuils=apply(sims0$lrs,3,quantile,0.95)\nseuils=matrix(ncol=4,nrow=50,seuils,byrow=TRUE)\nsims1=simany(101,3,8,50,nbsafe=1,fun=depquad,a=0.5)\napply(sims1$lrs[,1,]>seuils,2,mean)\n\n\n"} {"package":"subrank","topic":"simnul","snippet":"### Name: simnul\n### Title: Test statistic distribution under independence hypothesis\n### Aliases: simnul\n\n### ** Examples\n\nlibrary(datasets)\n# plot(swiss)\nc=corc(swiss,1:3,8)\nc\nRV=sum(c$cop*log(c$cop),na.rm=TRUE)+3*log(8)\nsims=simnul(47,3,8,100)\npvalue=mean(RV0.5, mdat$x1, NA)\nmdat$x2 = ifelse(mdat$x2>0.75, mdat$x2, NA)\ncc <- qgcomp.glm.noboot(f=y ~ z + x1 + x2, expnms = c('x1', 'x2'),\n data=mdat[complete.cases(mdat),], q=2, family=gaussian())\n\n## Not run: \n##D # note the following example imputes from the wrong parametric model and is expected to be biased\n##D # as a result (but it demonstrates how to use qgcomp and mice together)\n##D library(\"mice\")\n##D library(\"survival\")\n##D set.seed(1231)\n##D impdat = mice(data = mdat,\n##D method = c(\"\", \"leftcenslognorm\", \"leftcenslognorm\", \"\"),\n##D lod=c(NA, 0.5, 0.75, NA), debug=FALSE, m=10)\n##D qc.fit.imp <- list(\n##D call = call(\"qgcomp.glm.noboot(y~., expnms = c('x1', 'x2'), family=gaussian())\"),\n##D call1 = impdat$call,\n##D nmis = impdat$nmis,\n##D analyses = lapply(1:10, function(x) qgcomp.glm.noboot(y~., expnms = c(\"x1\", \"x2\"),\n##D data=complete(impdat, x), family=gaussian(), bayes=TRUE))\n##D )\n##D #alternative way to specify limits of detection (useful if not all observations have same limit)\n##D lodlist = list(rep(NA, N), rep(0.5, N), rep(0.75, N), rep(NA, N))\n##D #lodlist = data.frame(rep(NA, N), rep(0.5, N), rep(0.75, N), rep(NA, N)) # also works\n##D set.seed(1231)\n##D impdat_alt = mice(data = mdat,\n##D method = c(\"\", \"leftcenslognorm\", \"leftcenslognorm\", \"\"),\n##D lod=lodlist, debug=FALSE, m=10)\n##D qc.fit.imp_alt <- list(\n##D call = call(\"qgcomp.glm.noboot(y~., expnms = c('x1', 'x2'), family=gaussian())\"),\n##D call1 = impdat_alt$call,\n##D nmis = impdat_alt$nmis,\n##D analyses = lapply(1:10, function(x) qgcomp.glm.noboot(y~., expnms = c(\"x1\", \"x2\"),\n##D data=complete(impdat_alt, x), family=gaussian(), bayes=TRUE))\n##D )\n##D obj = pool(as.mira(qc.fit.imp))\n##D obj_alt = pool(as.mira(qc.fit.imp_alt))\n##D # true values\n##D true\n##D # complete case analysis\n##D cc\n##D # MI based analysis (identical answers for different ways to specify limits of detection)\n##D summary(obj)\n##D summary(obj_alt)\n##D \n##D # summarizing weights (note that the weights should *not* be pooled \n##D # because they mean different things depending on their direction)\n##D expnms = c(\"x1\", \"x2\")\n##D wts = as.data.frame(t(sapply(qc.fit.imp$analyses, \n##D function(x) c(-x$neg.weights, x$pos.weights)[expnms])))\n##D eachwt = do.call(c, wts)\n##D expwts = data.frame(Exposure = rep(expnms, each=nrow(wts)), Weight=eachwt)\n##D library(ggplot2)\n##D ggplot(data=expwts)+ theme_classic() +\n##D geom_point(aes(x=Exposure, y=Weight)) +\n##D geom_hline(aes(yintercept=0))\n##D \n##D # this function can be used to impute from an intercept only model\n##D # but you need to \"trick\" mice to bypass checks for collinearity by including\n##D # a variable that does not need to have values imputed (here, y).\n##D # The internal collinearity checks by the mice package remove collinear variables\n##D # and then throws an error if no predictor variabls are retained. Here, the \n##D # trick is to use the \"predictorMatrix\" parameter to \"impute\" the non-missing\n##D # variable y using x1 (which does nothing), and remove all predictors from the model for x1.\n##D # This function only imputes x1 from a log normal distribution.\n##D \n##D impdat2 = mice(data = mdat[,c(\"y\",\"x1\")],\n##D method = c(\"\", \"tobit\"), remove.collinear=FALSE,\n##D lod=c(NA, 0.5), debug=FALSE, m=1, \n##D maxit=1, # maxit=1 because there is only 1 variable to impute\n##D predictorMatrix = as.matrix(rbind(c(0,1), c(0,0))))\n##D plot(density(complete(impdat2, 1)$x1))\n##D \n##D # now with survival data (very similar)\n##D impdat = mice(data = mdat,\n##D method = c(\"\", \"tobit\", \"tobit\", \"\"),\n##D lod=c(NA, 0.5, 0.75, NA), debug=FALSE)\n##D qc.fit.imp <- list(\n##D call = call(\"qgcomp.cox.noboot(Surv(y)~., expnms = c('x1', 'x2'))\"),\n##D call1 = impdat$call,\n##D nmis = impdat$nmis,\n##D analyses = lapply(1:5, function(x) qgcomp.cox.noboot(Surv(y)~., expnms = c(\"x1\", \"x2\"),\n##D data=complete(impdat, x)))\n##D )\n##D obj = pool(as.mira(qc.fit.imp))\n##D # MI based analysis\n##D summary(obj)\n##D \n## End(Not run)\n\n\n"} {"package":"qgcomp","topic":"modelbound.boot","snippet":"### Name: modelbound.boot\n### Title: Estimating qgcomp regression line confidence bounds\n### Aliases: modelbound.boot\n\n### ** Examples\n\nset.seed(12)\n## Not run: \n##D dat <- data.frame(x1=(x1 <- runif(50)), x2=runif(50), x3=runif(50), z=runif(50),\n##D y=runif(50)+x1+x1^2)\n##D ft <- qgcomp.glm.boot(y ~ z + x1 + x2 + x3, expnms=c('x1','x2','x3'), data=dat, q=5)\n##D modelbound.boot(ft, 0.05)\n## End(Not run)\n\n\n"} {"package":"qgcomp","topic":"msm.predict","snippet":"### Name: msm.predict\n### Title: Secondary prediction method for the (non-survival) qgcomp MSM.\n### Aliases: msm.predict\n\n### ** Examples\n\nset.seed(50)\ndat <- data.frame(y=runif(50), x1=runif(50), x2=runif(50), z=runif(50))\nobj <- qgcomp.glm.boot(y ~ z + x1 + x2 + I(z*x1), expnms = c('x1', 'x2'), \n data=dat, q=4, B=10, seed=125)\ndat2 <- data.frame(psi=seq(1,4, by=0.1))\nsummary(msm.predict(obj))\nsummary(msm.predict(obj, newdata=dat2))\n\n\n"} {"package":"qgcomp","topic":"msm_fit","snippet":"### Name: msm_fit\n### Title: Fitting marginal structural model (MSM) within quantile\n### g-computation\n### Aliases: msm_fit\n\n### ** Examples\n\nset.seed(50)\ndat <- data.frame(y=runif(200), x1=runif(200), x2=runif(200), z=runif(200))\nX <- c('x1', 'x2')\nqdat <- quantize(dat, X, q=4)$data\nmod <- msm_fit(f=y ~ z + x1 + x2 + I(x1*x2),\n expnms = c('x1', 'x2'), qdata=qdat, intvals=1:4, bayes=FALSE)\nsummary(mod$fit) # outcome regression model\nsummary(mod$msmfit) # msm fit (variance not valid - must be obtained via bootstrap)\n\n\n"} {"package":"qgcomp","topic":"msm_multinomial_fit","snippet":"### Name: msm_multinomial_fit\n### Title: Fitting marginal structural model (MSM) within quantile\n### g-computation\n### Aliases: msm_multinomial_fit\n\n### ** Examples\n\ndata(\"metals\") # from qgcomp package\n# create categorical outcome from the existing continuous outcome (usually, one will already exist)\nmetals$ycat = factor(quantize(metals, \"y\",q=4)$data$y, levels=c(\"0\", \"1\", \"2\", \"3\"), \n labels=c(\"cct\", \"ccg\", \"aat\", \"aag\")) \n# restrict to smaller dataset for simplicity\nsmallmetals = metals[,c(\"ycat\", \"arsenic\", \"lead\", \"cadmium\", \"mage35\")]\n\n### 1: Define mixture and underlying model ####\nmixture = c(\"arsenic\", \"lead\", \"cadmium\")\nf0 = ycat ~ arsenic + lead + cadmium # the multinomial model \n# (be sure that factor variables are properly coded ahead of time in the dataset)\nqdat <- quantize(smallmetals, mixture, q=4)$data\nmod <- msm_multinomial_fit(f0,\n expnms = mixture, qdata=qdat, intvals=1:4, bayes=FALSE)\nsummary(mod$fit) # outcome regression model\nsummary(mod$msmfit) # msm fit (variance not valid - must be obtained via bootstrap)\n\n\n"} {"package":"qgcomp","topic":"plot.qgcompfit","snippet":"### Name: plot.qgcompfit\n### Title: Default plotting method for a qgcompfit object\n### Aliases: plot.qgcompfit plot.qgcompmultfit\n\n### ** Examples\n\nset.seed(12)\ndat <- data.frame(x1=(x1 <- runif(100)), x2=runif(100), x3=runif(100), z=runif(100),\n y=runif(100)+x1+x1^2)\nft <- qgcomp.glm.noboot(y ~ z + x1 + x2 + x3, expnms=c('x1','x2','x3'), data=dat, q=4)\nft\n# display weights\nplot(ft)\n# examining fit\nplot(ft$fit, which=1) # residual vs. fitted is not straight line!\n## Not run: \n##D \n##D # using non-linear outcome model\n##D ft2 <- qgcomp.glm.boot(y ~ z + x1 + x2 + x3 + I(x1*x1), expnms=c('x1','x2','x3'), \n##D data=dat, q=4, B=10)\n##D ft2\n##D plot(ft2$fit, which=1) # much better looking fit diagnostics suggests\n##D # it is better to include interaction term for x\n##D plot(ft2) # the msm predictions don't match up with a smooth estimate\n##D # of the expected outcome, so we should consider a non-linear MSM\n##D \n##D # using non-linear marginal structural model\n##D ft3 <- qgcomp.glm.boot(y ~ z + x1 + x2 + x3 + I(x1*x1), expnms=c('x1','x2','x3'), \n##D data=dat, q=4, B=10, degree=2)\n##D # plot(ft3$fit, which=1) - not run - this is identical to ft2 fit\n##D plot(ft3) # the MSM estimates look much closer to the smoothed estimates\n##D # suggesting the non-linear MSM fits the data better and should be used\n##D # for inference about the effect of the exposure\n##D \n##D # binary outcomes, logistic model with or without a log-binomial marginal \n##D structural model\n##D dat <- data.frame(y=rbinom(100,1,0.5), x1=runif(100), x2=runif(100), z=runif(100))\n##D fit1 <- qgcomp.glm.boot(y ~ z + x1 + x2, family=\"binomial\", expnms = c('x1', 'x2'), \n##D data=dat, q=9, B=100, rr=FALSE)\n##D fit2 <- qgcomp.glm.boot(y ~ z + x1 + x2, family=\"binomial\", expnms = c('x1', 'x2'), \n##D data=dat, q=9, B=100, rr=TRUE)\n##D plot(fit1)\n##D plot(fit2)\n##D # Using survival data ()\n##D set.seed(50)\n##D N=200\n##D dat <- data.frame(time=(tmg <- pmin(.1,rweibull(N, 10, 0.1))), \n##D d=1.0*(tmg<0.1), x1=runif(N), x2=runif(N), z=runif(N))\n##D expnms=paste0(\"x\", 1:2)\n##D f = survival::Surv(time, d)~x1 + x2\n##D (fit1 <- survival::coxph(f, data = dat))\n##D # non-bootstrap method to get a plot of weights\n##D (obj <- qgcomp.cox.noboot(f, expnms = expnms, data = dat))\n##D plot(obj)\n##D \n##D # bootstrap method to get a survival curve\n##D # this plots the expected survival curve for the underlying (conditional) model\n##D # as well as the expected survival curve for the MSM under the following scenarios:\n##D # 1) highest joint exposure category\n##D # 2) lowest joint exposure category\n##D # 3) average across all exposure categories \n##D # differences between the MSM and conditional fit suggest that the MSM is not flexible\n##D # enough to accomodate non-linearities in the underlying fit (or they may simply signal that\n##D # MCSize should be higher). Note that if linearity\n##D # is assumed in the conditional model, the MSM will typically also appear linear and\n##D # will certainly appear linear if no non-exposure covariates are included in the model\n##D # not run (slow when using boot version to proper precision)\n##D (obj2 <- qgcomp.cox.boot(f, expnms = expnms, data = dat, B=10, MCsize=2000))\n##D plot(obj2)\n## End(Not run)\n\n\n"} {"package":"qgcomp","topic":"pointwisebound.boot","snippet":"### Name: pointwisebound.boot\n### Title: Estimating pointwise comparisons for qgcomp.glm.boot objects\n### Aliases: pointwisebound.boot\n\n### ** Examples\n\nset.seed(12)\n## Not run: \n##D n=100\n##D # non-linear model for continuous outcome\n##D dat <- data.frame(x1=(x1 <- runif(100)), x2=runif(100), x3=runif(100), z=runif(100),\n##D y=runif(100)+x1+x1^2)\n##D ft <- qgcomp.glm.boot(y ~ z + x1 + x2 + x3, expnms=c('x1','x2','x3'), data=dat, q=10)\n##D pointwisebound.boot(ft, alpha=0.05, pointwiseref=3)\n## End(Not run)\n\n\n"} {"package":"qgcomp","topic":"pointwisebound.noboot","snippet":"### Name: pointwisebound.noboot\n### Title: Estimating pointwise comparisons for qgcomp.glm.noboot objects\n### Aliases: pointwisebound.noboot\n\n### ** Examples\n\nset.seed(12)\n## Not run: \n##D n = 100\n##D dat <- data.frame(x1=(x1 <- runif(n)), x2=(x2 <- runif(n)), \n##D x3=(x3 <- runif(n)), z=(z <- runif(n)),\n##D y=rnorm(n)+x1 + x2 - x3 +z)\n##D # linear model for continuous outcome\n##D ft <- qgcomp.glm.noboot(y ~ z + x1 + x2 + x3, \n##D expnms=c('x1','x2','x3'), data=dat, q=10)\n##D ft2 <- qgcomp.glm.boot(y ~ z + x1 + x2 + x3, \n##D expnms=c('x1','x2','x3'), data=dat, q=10)\n##D pointwisebound.noboot(ft, alpha=0.05, pointwiseref=3)\n##D pointwisebound.boot(ft2, alpha=0.05, pointwiseref=3)\n##D dat <- data.frame(x1=(x1 <- runif(n)), x2=(x2 <- runif(n)), \n##D x3=(x3 <- runif(n)), z=(z <- runif(n)),\n##D y=rbinom(n, 1, 1/(1+exp(-(x1 + x2 - x3 +z)))))\n##D # glms for binary outcome, centering covariate to a potentially more meaningful value\n##D dat$zcen = dat$z - mean(dat$z)\n##D ft <- qgcomp.glm.noboot(y ~ zcen + x1 + x2 + x3, \n##D expnms=c('x1','x2','x3'), data=dat, q=10, family=binomial())\n##D ft2 <- qgcomp.glm.boot(y ~ zcen + x1 + x2 + x3, \n##D expnms=c('x1','x2','x3'), data=dat, q=10, family=binomial())\n##D pointwisebound.noboot(ft, alpha=0.05, pointwiseref=3)\n##D pointwisebound.boot(ft2, alpha=0.05, pointwiseref=3)\n##D dat$z = as.factor(sample(1:3, n, replace=TRUE))\n##D ftf <- qgcomp.glm.noboot(y ~ zcen + x1 + x2 + x3, \n##D expnms=c('x1','x2','x3'), data=dat, q=10, family=binomial())\n##D pointwisebound.noboot(ftf, alpha=0.05, pointwiseref=3)\n## End(Not run)\n\n\n"} {"package":"qgcomp","topic":"predict.qgcompfit","snippet":"### Name: predict.qgcompfit\n### Title: Default prediction method for a qgcompfit object (non-survival\n### outcomes only)\n### Aliases: predict.qgcompfit\n\n### ** Examples\n\nset.seed(50)\ndat <- data.frame(y=runif(50), x1=runif(50), x2=runif(50), z=runif(50))\nobj1 <- qgcomp.glm.noboot(y ~ z + x1 + x2, expnms = c('x1', 'x2'), data=dat, q=2)\nobj2 <- qgcomp.glm.boot(y ~ z + x1 + x2, expnms = c('x1', 'x2'), data=dat, q=2, B=10, seed=125)\nset.seed(52)\ndat2 <- data.frame(y=runif(50), x1=runif(50), x2=runif(50), z=runif(50))\nsummary(predict(obj1, expnms = c('x1', 'x2'), newdata=dat2))\nsummary(predict(obj2, expnms = c('x1', 'x2'), newdata=dat2))\n\n\n"} {"package":"qgcomp","topic":"print.qgcompfit","snippet":"### Name: print.qgcompfit\n### Title: Default printing method for a qgcompfit object\n### Aliases: print.qgcompfit\n\n### ** Examples\n\nset.seed(50)\ndat <- data.frame(y=runif(50), x1=runif(50), x2=runif(50), z=runif(50))\nobj1 <- qgcomp.glm.noboot(y ~ z + x1 + x2, expnms = c('x1', 'x2'), data=dat, q=2)\nobj2 <- qgcomp.glm.boot(y ~ z + x1 + x2, expnms = c('x1', 'x2'), data=dat, q=2, B=10, seed=125)\n# does not need to be explicitly called, but included here for clarity\nprint(obj1)\nprint(obj2)\n\n\n"} {"package":"qgcomp","topic":"qgcomp","snippet":"### Name: qgcomp\n### Title: Quantile g-computation for continuous, binary, count, and\n### censored survival outcomes\n### Aliases: qgcomp\n\n### ** Examples\n\nset.seed(50)\ndat <- data.frame(y=runif(50), x1=runif(50), x2=runif(50), z=runif(50))\nqgcomp.glm.noboot(y ~ z + x1 + x2, expnms = c('x1', 'x2'), data=dat, q=2)\nqgcomp.glm.boot(y ~ z + x1 + x2, expnms = c('x1', 'x2'), data=dat, q=2, B=10, seed=125)\n# automatically selects appropriate method\nqgcomp(y ~ z + x1 + x2, expnms = c('x1', 'x2'), data=dat, q=2)\n# note for binary outcome this will choose the risk ratio (and bootstrap methods) by default\ndat <- data.frame(y=rbinom(100, 1, 0.5), x1=runif(100), x2=runif(100), z=runif(100))\n## Not run: \n##D qgcomp.glm.noboot(y ~ z + x1 + x2, expnms = c('x1', 'x2'), data=dat, q=2, family=binomial())\n##D set.seed(1231)\n##D qgcomp.glm.boot(y ~ z + x1 + x2, expnms = c('x1', 'x2'), data=dat, q=2, family=binomial())\n##D set.seed(1231)\n##D qgcomp(y ~ z + x1 + x2, expnms = c('x1', 'x2'), data=dat, q=2, family=binomial())\n##D \n##D # automatically selects appropriate method when specifying rr or degree explicitly\n##D qgcomp(y ~ z + x1 + x2, expnms = c('x1', 'x2'), data=dat, q=2, family=binomial(), rr=FALSE)\n##D qgcomp(y ~ z + x1 + x2, expnms = c('x1', 'x2'), data=dat, q=2, family=binomial(), rr=TRUE)\n##D qgcomp(y ~ z + factor(x1) + factor(x2), degree=2, expnms = c('x1', 'x2'), data=dat, q=4,\n##D family=binomial())\n##D \n##D #survival objects\n##D set.seed(50)\n##D N=200\n##D dat <- data.frame(time=(tmg <- pmin(.1,rweibull(N, 10, 0.1))),\n##D d=1.0*(tmg<0.1), x1=runif(N), x2=runif(N), z=runif(N))\n##D expnms=paste0(\"x\", 1:2)\n##D f = survival::Surv(time, d)~x1 + x2\n##D qgcomp(f, expnms = expnms, data = dat)\n##D # note if B or MCsize are set but the model is linear, an error will result\n##D try(qgcomp(f, expnms = expnms, data = dat, B1=, MCsize))\n##D # note that in the survival models, MCsize should be set to a large number\n##D # such that results are repeatable (within an error tolerance such as 2 significant digits)\n##D # if you run them under different seed values\n##D f = survival::Surv(time, d)~x1 + x2 + x1:x2\n##D qgcomp(f, expnms = expnms, data = dat, B=10, MCsize=100)\n## End(Not run)\n\n\n"} {"package":"qgcomp","topic":"qgcomp.cch.noboot","snippet":"### Name: qgcomp.cch.noboot\n### Title: Quantile g-computation for survival outcomes in a case-cohort\n### design under linearity/additivity\n### Aliases: qgcomp.cch.noboot\n\n### ** Examples\n\nset.seed(50)\nN=200\ndat <- data.frame(time=(tmg <- pmin(.1,rweibull(N, 10, 0.1))), \n d=1.0*(tmg<0.1), x1=runif(N), x2=runif(N), z=runif(N))\nexpnms=paste0(\"x\", 1:2)\nf = survival::Surv(time, d)~x1 + x2\n(fit1 <- survival::coxph(f, data = dat))\n(obj <- qgcomp.cox.noboot(f, expnms = expnms, data = dat))\n## Not run: \n##D \n##D # weighted analysis\n##D dat$w = runif(N)\n##D qdata = quantize(dat, expnms=expnms)\n##D (obj2 <- qgcomp.cox.noboot(f, expnms = expnms, data = dat, weight=w))\n##D obj2$fit\n##D survival::coxph(f, data = qdata$data, weight=w)\n##D \n##D # not run: bootstrapped version is much slower\n##D (obj2 <- qgcomp.cox.boot(f, expnms = expnms, data = dat, B=200, MCsize=20000))\n## End(Not run)\n\n\n"} {"package":"qgcomp","topic":"qgcomp.cox.boot","snippet":"### Name: qgcomp.cox.boot\n### Title: Quantile g-computation for survival outcomes\n### Aliases: qgcomp.cox.boot\n\n### ** Examples\n\nset.seed(50)\nN=200\ndat <- data.frame(time=(tmg <- pmin(.1,rweibull(N, 10, 0.1))), \n d=1.0*(tmg<0.1), x1=runif(N), x2=runif(N), z=runif(N))\nexpnms=paste0(\"x\", 1:2)\nf = survival::Surv(time, d)~x1 + x2\n(fit1 <- survival::coxph(f, data = dat))\n(obj <- qgcomp.cox.noboot(f, expnms = expnms, data = dat))\n## Not run: \n##D # not run (slow when using boot version to proper precision)\n##D (obj2 <- qgcomp.cox.boot(f, expnms = expnms, data = dat, B=10, MCsize=20000))\n##D \n##D # weighted analysis\n##D \n##D # using future package, marginalizing over confounder z\n##D (obj3 <- qgcomp.cox.boot(survival::Surv(time, d)~x1 + x2 + z, expnms = expnms, data = dat, \n##D B=1000, MCsize=20000, parallel=TRUE, parplan=TRUE))\n##D # non-constant hazard ratio, non-linear terms\n##D (obj4 <- qgcomp.cox.boot(survival::Surv(time, d)~factor(x1) + splines::bs(x2) + z, \n##D expnms = expnms, data = dat, \n##D B=1000, MCsize=20000, parallel=FALSE, degree=1))\n##D \n##D # weighted analysis\n##D dat$w = runif(N)\n##D (objw1 <- qgcomp.cox.noboot(f, expnms = expnms, data = dat, weights=w))\n##D (objw2 <- qgcomp.cox.boot(f, expnms = expnms, data = dat, weights=w, B=5, MCsize=20000))\n## End(Not run)\n\n\n"} {"package":"qgcomp","topic":"qgcomp.cox.noboot","snippet":"### Name: qgcomp.cox.noboot\n### Title: Quantile g-computation for survival outcomes under\n### linearity/additivity\n### Aliases: qgcomp.cox.noboot\n\n### ** Examples\n\nset.seed(50)\nN=200\ndat <- data.frame(time=(tmg <- pmin(.1,rweibull(N, 10, 0.1))), \n d=1.0*(tmg<0.1), x1=runif(N), x2=runif(N), z=runif(N))\nexpnms=paste0(\"x\", 1:2)\nf = survival::Surv(time, d)~x1 + x2\n(fit1 <- survival::coxph(f, data = dat))\n(obj <- qgcomp.cox.noboot(f, expnms = expnms, data = dat))\n## Not run: \n##D \n##D # weighted analysis\n##D dat$w = runif(N)\n##D qdata = quantize(dat, expnms=expnms)\n##D (obj2 <- qgcomp.cox.noboot(f, expnms = expnms, data = dat, weight=w))\n##D obj2$fit\n##D survival::coxph(f, data = qdata$data, weight=w)\n##D \n##D # not run: bootstrapped version is much slower\n##D (obj2 <- qgcomp.cox.boot(f, expnms = expnms, data = dat, B=200, MCsize=20000))\n## End(Not run)\n\n\n"} {"package":"qgcomp","topic":"qgcomp.glm.boot","snippet":"### Name: qgcomp.glm.boot\n### Title: Quantile g-computation for continuous and binary outcomes\n### Aliases: qgcomp.glm.boot gcomp.boot qgcomp.boot\n\n### ** Examples\n\nset.seed(30)\n# continuous outcome\ndat <- data.frame(y=rnorm(100), x1=runif(100), x2=runif(100), z=runif(100))\n# Conditional linear slope\nqgcomp.glm.noboot(y ~ z + x1 + x2, expnms = c('x1', 'x2'), data=dat, q=4, family=gaussian())\n# Marginal linear slope (population average slope, for a purely linear,\n# additive model this will equal the conditional)\n ## Not run: \n##D qgcomp.glm.boot(f=y ~ z + x1 + x2, expnms = c('x1', 'x2'), data=dat, q=4,\n##D family=gaussian(), B=200) # B should be at least 200 in actual examples\n##D # no intercept model\n##D qgcomp.glm.boot(f=y ~ -1+z + x1 + x2, expnms = c('x1', 'x2'), data=dat, q=4,\n##D family=gaussian(), B=200) # B should be at least 200 in actual examples\n##D \n##D # Note that these give different answers! In the first, the estimate is conditional on Z,\n##D # but in the second, Z is marginalized over via standardization. The estimates\n##D # can be made approximately the same by centering Z (for linear models), but\n##D # the conditional estimate will typically have lower standard errors.\n##D dat$z = dat$z - mean(dat$z)\n##D \n##D # Conditional linear slope\n##D qgcomp.glm.noboot(y ~ z + x1 + x2, expnms = c('x1', 'x2'), data=dat, q=4, family=gaussian())\n##D # Marginal linear slope (population average slope, for a purely linear,\n##D # additive model this will equal the conditional)\n##D \n##D qgcomp.glm.boot(f=y ~ z + x1 + x2, expnms = c('x1', 'x2'), data=dat, q=4,\n##D family=gaussian(), B=200) # B should be at least 200 in actual examples\n##D \n##D # Population average mixture slope which accounts for non-linearity and interactions\n##D qgcomp.glm.boot(y ~ z + x1 + x2 + I(x1^2) + I(x2*x1), family=\"gaussian\",\n##D expnms = c('x1', 'x2'), data=dat, q=4, B=200)\n##D \n##D # generally non-linear/non-addiive underlying models lead to non-linear mixture slopes\n##D qgcomp.glm.boot(y ~ z + x1 + x2 + I(x1^2) + I(x2*x1), family=\"gaussian\",\n##D expnms = c('x1', 'x2'), data=dat, q=4, B=200, deg=2)\n##D \n##D # binary outcome\n##D dat <- data.frame(y=rbinom(50,1,0.5), x1=runif(50), x2=runif(50), z=runif(50))\n##D \n##D # Conditional mixture OR\n##D qgcomp.glm.noboot(y ~ z + x1 + x2, family=\"binomial\", expnms = c('x1', 'x2'),\n##D data=dat, q=2)\n##D \n##D #Marginal mixture OR (population average OR - in general, this will not equal the\n##D # conditional mixture OR due to non-collapsibility of the OR)\n##D qgcomp.glm.boot(y ~ z + x1 + x2, family=\"binomial\", expnms = c('x1', 'x2'),\n##D data=dat, q=2, B=3, rr=FALSE)\n##D \n##D # Population average mixture RR\n##D qgcomp.glm.boot(y ~ z + x1 + x2, family=\"binomial\", expnms = c('x1', 'x2'),\n##D data=dat, q=2, rr=TRUE, B=3)\n##D \n##D # Population average mixture RR, indicator variable representation of x2\n##D # note that I(x==...) operates on the quantile-based category of x,\n##D # rather than the raw value\n##D res = qgcomp.glm.boot(y ~ z + x1 + I(x2==1) + I(x2==2) + I(x2==3),\n##D family=\"binomial\", expnms = c('x1', 'x2'), data=dat, q=4, rr=TRUE, B=200)\n##D res$fit\n##D plot(res)\n##D \n##D # now add in a non-linear MSM\n##D res2 = qgcomp.glm.boot(y ~ z + x1 + I(x2==1) + I(x2==2) + I(x2==3),\n##D family=\"binomial\", expnms = c('x1', 'x2'), data=dat, q=4, rr=TRUE, B=200,\n##D degree=2)\n##D res2$fit\n##D res2$msmfit # correct point estimates, incorrect standard errors\n##D res2 # correct point estimates, correct standard errors\n##D plot(res2)\n##D # Log risk ratio per one IQR change in all exposures (not on quantile basis)\n##D dat$x1iqr <- dat$x1/with(dat, diff(quantile(x1, c(.25, .75))))\n##D dat$x2iqr <- dat$x2/with(dat, diff(quantile(x2, c(.25, .75))))\n##D # note that I(x>...) now operates on the untransformed value of x,\n##D # rather than the quantized value\n##D res2 = qgcomp.glm.boot(y ~ z + x1iqr + I(x2iqr>0.1) + I(x2>0.4) + I(x2>0.9),\n##D family=\"binomial\", expnms = c('x1iqr', 'x2iqr'), data=dat, q=NULL, rr=TRUE, B=200,\n##D degree=2)\n##D res2\n##D # using parallel processing\n##D \n##D qgcomp.glm.boot(y ~ z + x1iqr + I(x2iqr>0.1) + I(x2>0.4) + I(x2>0.9),\n##D family=\"binomial\", expnms = c('x1iqr', 'x2iqr'), data=dat, q=NULL, rr=TRUE, B=200,\n##D degree=2, parallel=TRUE, parplan=TRUE)\n##D \n##D \n##D # weighted model\n##D N=5000\n##D dat4 <- data.frame(id=seq_len(N), x1=runif(N), x2=runif(N), z=runif(N))\n##D dat4$y <- with(dat4, rnorm(N, x1*z + z, 1))\n##D dat4$w=runif(N) + dat4$z*5\n##D qdata = quantize(dat4, expnms = c(\"x1\", \"x2\"), q=4)$data\n##D # first equivalent models with no covariates\n##D qgcomp.glm.noboot(f=y ~ x1 + x2, expnms = c('x1', 'x2'), data=dat4, q=4, family=gaussian())\n##D qgcomp.glm.noboot(f=y ~ x1 + x2, expnms = c('x1', 'x2'), data=dat4, q=4, family=gaussian(),\n##D weights=w)\n##D \n##D set.seed(13)\n##D qgcomp.glm.boot(f=y ~ x1 + x2, expnms = c('x1', 'x2'), data=dat4, q=4, family=gaussian(),\n##D weights=w)\n##D # using the correct model\n##D set.seed(13)\n##D qgcomp.glm.boot(f=y ~ x1*z + x2, expnms = c('x1', 'x2'), data=dat4, q=4, family=gaussian(),\n##D weights=w, id=\"id\")\n##D (qgcfit <- qgcomp.glm.boot(f=y ~ z + x1 + x2, expnms = c('x1', 'x2'), data=dat4, q=4,\n##D family=gaussian(), weights=w))\n##D qgcfit$fit\n##D summary(glm(y ~ z + x1 + x2, data = qdata, weights=w))\n## End(Not run)\n\n\n"} {"package":"qgcomp","topic":"qgcomp.glm.noboot","snippet":"### Name: qgcomp.glm.noboot\n### Title: Quantile g-computation for continuous, binary, and count\n### outcomes under linearity/additivity\n### Aliases: qgcomp.glm.noboot gcomp.noboot qgcomp.noboot\n\n### ** Examples\n\nset.seed(50)\n# linear model\ndat <- data.frame(y=runif(50,-1,1), x1=runif(50), x2=runif(50), z=runif(50))\nqgcomp.glm.noboot(f=y ~ z + x1 + x2, expnms = c('x1', 'x2'), data=dat, q=2, family=gaussian())\n# not intercept model\nqgcomp.glm.noboot(f=y ~-1+ z + x1 + x2, expnms = c('x1', 'x2'), data=dat, q=2, family=gaussian())\n# logistic model\ndat2 <- data.frame(y=rbinom(50, 1,0.5), x1=runif(50), x2=runif(50), z=runif(50))\nqgcomp.glm.noboot(f=y ~ z + x1 + x2, expnms = c('x1', 'x2'), data=dat2, q=2, family=binomial())\n# poisson model\ndat3 <- data.frame(y=rpois(50, .5), x1=runif(50), x2=runif(50), z=runif(50))\nqgcomp.glm.noboot(f=y ~ z + x1 + x2, expnms = c('x1', 'x2'), data=dat3, q=2, family=poisson())\n# weighted model\nN=5000\ndat4 <- data.frame(y=runif(N), x1=runif(N), x2=runif(N), z=runif(N))\ndat4$w=runif(N)*2\nqdata = quantize(dat4, expnms = c(\"x1\", \"x2\"))$data\n(qgcfit <- qgcomp.glm.noboot(f=y ~ z + x1 + x2, expnms = c('x1', 'x2'), data=dat4, q=4,\n family=gaussian(), weights=w))\nqgcfit$fit\nglm(y ~ z + x1 + x2, data = qdata, weights=w)\n\n\n"} {"package":"qgcomp","topic":"qgcomp.hurdle.boot","snippet":"### Name: qgcomp.hurdle.boot\n### Title: Quantile g-computation for hurdle count outcomes\n### Aliases: qgcomp.hurdle.boot\n\n### ** Examples\n\nset.seed(50)\nn=500\ndat <- data.frame(y=rbinom(n, 1, 0.5)*rpois(n, 1.2), x1=runif(n), x2=runif(n), z=runif(n))\n# poisson count model, mixture in both portions\n## Not run: \n##D # warning: the examples below can take a long time to run\n##D res = qgcomp.hurdle.boot(f=y ~ x1 + x2 | x1 + x2, expnms = c('x1', 'x2'), \n##D data=dat, q=4, dist=\"poisson\", B=1000, MCsize=10000, parallel=TRUE, parplan=TRUE)\n##D qgcomp.hurdle.noboot(f=y ~ x1 + x2 | x1 + x2, expnms = c('x1', 'x2'), \n##D data=dat, q=4, dist=\"poisson\")\n##D res\n##D \n##D # accuracy for small MCsize is suspect (compare coefficients between boot/noboot versions), \n##D # so re-check with MCsize set to larger value (this takes a long time to run)\n##D res2 = qgcomp.hurdle.boot(f=y ~ x1 + x2 | x1 + x2, expnms = c('x1', 'x2'), \n##D data=dat, q=4, dist=\"poisson\", B=1000, MCsize=50000, parallel=TRUE, parplan=TRUE)\n##D res2\n##D plot(density(res2$bootsamps[4,]))\n##D \n##D # negative binomial count model, mixture and covariate in both portions\n##D qgcomp.hurdle.boot(f=y ~ z + x1 + x2 | z + x1 + x2, expnms = c('x1', 'x2'), \n##D data=dat, q=4, dist=\"negbin\", B=10, MCsize=10000) \n##D \n##D # weighted analysis (NOTE THIS DOES NOT WORK WITH parallel=TRUE!)\n##D dat$w = runif(n)*5\n##D qgcomp.hurdle.noboot(f=y ~ z + x1 + x2 | x1 + x2, expnms = c('x1', 'x2'), \n##D data=dat, q=4, dist=\"poisson\", weights=w)\n##D # You may see this: \n##D # Warning message:\n##D # In eval(family$initialize) : non-integer #successes in a binomial glm!\n##D qgcomp.hurdle.boot(f=y ~ x1 + x2 | x1 + x2, expnms = c('x1', 'x2'), \n##D data=dat, q=4, dist=\"poisson\", B=5, MCsize=50000, parallel=FALSE, weights=w)\n##D # Log rr per one IQR change in all exposures (not on quantile basis)\n##D dat$x1iqr <- dat$x1/with(dat, diff(quantile(x1, c(.25, .75))))\n##D dat$x2iqr <- dat$x2/with(dat, diff(quantile(x2, c(.25, .75))))\n##D # note that I(x>...) now operates on the untransformed value of x,\n##D # rather than the quantized value\n##D res2 = qgcomp.hurdle.boot(f=y ~ z + x1iqr + x2iqr + I(x2iqr>0.1) + \n##D I(x2iqr>0.4) + I(x2iqr>0.9) | x1iqr + x2iqr, \n##D expnms = c('x1iqr', 'x2iqr'), \n##D data=dat, q=NULL, B=2, degree=2, MCsize=2000, dist=\"poisson\")\n##D res2\n## End(Not run)\n\n\n"} {"package":"qgcomp","topic":"qgcomp.hurdle.noboot","snippet":"### Name: qgcomp.hurdle.noboot\n### Title: Quantile g-computation for hurdle count outcomes under\n### linearity/additivity\n### Aliases: qgcomp.hurdle.noboot\n\n### ** Examples\n\nset.seed(50)\nn=100\ndat <- data.frame(y=rbinom(n, 1, 0.5)*rpois(n, 1.2), x1=runif(n), x2=runif(n), z=runif(n))\n\n# poisson count model, mixture in both portions\nqgcomp.hurdle.noboot(f=y ~ z + x1 + x2 | x1 + x2, expnms = c('x1', 'x2'), \n data=dat, q=2, dist=\"poisson\")\n \n# negative binomial count model, mixture and covariate in both portions\nqgcomp.hurdle.noboot(f=y ~ z + x1 + x2 | z + x1 + x2, expnms = c('x1', 'x2'), \n data=dat, q=2, dist=\"negbin\") \nqgcomp.hurdle.noboot(f=y ~ z + x1 + x2, expnms = c('x1', 'x2'), \n data=dat, q=2, dist=\"negbin\") # equivalent\n \n# negative binomial count model, mixture only in the 'count' portion of the model\nqgcomp.hurdle.noboot(f=y ~ z + x1 + x2 | z, expnms = c('x1', 'x2'), data=dat, q=2, dist=\"negbin\")\n\n# weighted analysis\ndat$w = runif(n)*5\nqgcomp.hurdle.noboot(f=y ~ z + x1 + x2 | x1 + x2, expnms = c('x1', 'x2'), \n data=dat, q=2, dist=\"poisson\", weights=w)\n# Expect this: \n# Warning message:\n# In eval(family$initialize) : non-integer #successes in a binomial glm!\n\n\n\n"} {"package":"qgcomp","topic":"qgcomp.multinomial.boot","snippet":"### Name: qgcomp.multinomial.boot\n### Title: Quantile g-computation for multinomial outcomes\n### Aliases: qgcomp.multinomial.boot\n\n### ** Examples\n\ndata(\"metals\") # from qgcomp package\n# create categorical outcome from the existing continuous outcome (usually, one will already exist)\nmetals$ycat = factor(quantize(metals, \"y\",q=4)$data$y, levels=c(\"0\", \"1\", \"2\", \"3\"), \n labels=c(\"cct\", \"ccg\", \"aat\", \"aag\")) \n# restrict to smaller dataset for simplicity\nsmallmetals = metals[,c(\"ycat\", \"arsenic\", \"lead\", \"cadmium\", \"mage35\")]\n\n### 1: Define mixture and underlying model ####\nmixture = c(\"arsenic\", \"lead\", \"cadmium\")\nf0 = ycat ~ arsenic + lead + cadmium # the multinomial model \n# (be sure that factor variables are properly coded ahead of time in the dataset)\nrr = qgcomp.multinomial.boot(\n f0, \n expnms = mixture,\n q=4, \n data = smallmetals, \n B = 5, # set to higher values in real examples\n MCsize = 100, # set to higher values in small samples\n )\n\nrr2 = qgcomp.multinomial.noboot(\n f0, \n expnms = mixture,\n q=4, \n data = smallmetals\n )\n \n ### 5: Create summary qgcomp object for nice printing ####\n \n summary(rr, tests=c(\"H\")) # include homogeneity test\n \n # 95% confidence intervals\n #confint(rr, level=0.95)\n #rr$breaks # quantile cutpoints for exposures\n # homogeneity_test(rr)\n #joint_test(rr)\n\nqdat = simdata_quantized(\n outcometype=\"multinomial\", \n n=10000, corr=c(-.9), coef=cbind(c(.2,-.2,0,0), c(.1,.1,.1,.1)), \n q = 4\n)\n\n rr_sim = qgcomp.multinomial.noboot(\n y~x1+x2+x3+x4, \n expnms = c(\"x1\", \"x2\", \"x3\", \"x4\"),\n q=4, \n data = qdat\n )\n \n rr_sim2 = qgcomp.multinomial.boot(\n y~x1+x2+x3+x4, \n expnms = c(\"x1\", \"x2\", \"x3\", \"x4\"),\n q=4, \n data = qdat,\n B=1\n )\n\n\n\n"} {"package":"qgcomp","topic":"qgcomp.multinomial.noboot","snippet":"### Name: qgcomp.multinomial.noboot\n### Title: Quantile g-computation for multinomial outcomes\n### Aliases: qgcomp.multinomial.noboot\n\n### ** Examples\n\ndata(\"metals\") # from qgcomp package\n# create categorical outcome from the existing continuous outcome (usually, one will already exist)\nmetals$ycat = factor(quantize(metals, \"y\",q=4)$data$y, levels=c(\"0\", \"1\", \"2\", \"3\"), \n labels=c(\"cct\", \"ccg\", \"aat\", \"aag\")) \n# restrict to smaller dataset for simplicity\nsmallmetals = metals[,c(\"ycat\", \"arsenic\", \"lead\", \"cadmium\", \"mage35\")]\n\n### 1: Define mixture and underlying model ####\nmixture = c(\"arsenic\", \"lead\", \"cadmium\")\nf0 = ycat ~ arsenic + lead + cadmium # the multinomial model \n# (be sure that factor variables are properly coded ahead of time in the dataset)\n\nrr = qgcomp.multinomial.noboot(\n f0, \n expnms = mixture,\n q=4, \n data = smallmetals, \n )\n \n ### 5: Create summary qgcomp object for nice printing ####\n \n summary(rr, tests=c(\"H\")) # include homogeneity test\n \n # 95% confidence intervals\n confint(rr, level=0.95)\n rr$breaks # quantile cutpoints for exposures\n # homogeneity_test(rr)\n joint_test(rr)\n\n\n\n"} {"package":"qgcomp","topic":"qgcomp.partials","snippet":"### Name: qgcomp.partials\n### Title: Partial effect sizes, confidence intervals, hypothesis tests\n### Aliases: qgcomp.partials\n\n### ** Examples\n\nset.seed(123223)\ndat = qgcomp::simdata_quantized(n=1000, outcomtype=\"continuous\", cor=c(.75, 0), \n b0=0, coef=c(0.25,-0.25,0,0), q=4)\ncor(dat)\n# overall fit (more or less null due to counteracting exposures)\n(overall <- qgcomp.glm.noboot(f=y~., q=NULL, expnms=c(\"x1\", \"x2\", \"x3\", \"x4\"), data=dat))\n\n# partial effects using 40% training/60% validation split\ntrainidx <- sample(1:nrow(dat), round(nrow(dat)*0.4))\nvalididx <- setdiff(1:nrow(dat),trainidx)\ntraindata = dat[trainidx,]\nvaliddata = dat[valididx,]\nsplitres <- qgcomp.partials(fun=\"qgcomp.glm.noboot\", f=y~., q=NULL, \n traindata=traindata,validdata=validdata, expnms=c(\"x1\", \"x2\", \"x3\", \"x4\"))\nsplitres\n## Not run: \n##D # under the null, both should give null results\n##D set.seed(123223)\n##D dat = simdata_quantized(n=1000, outcomtype=\"continuous\", cor=c(.75, 0), \n##D b0=0, coef=c(0,0,0,0), q=4)\n##D # 40% training/60% validation\n##D trainidx2 <- sample(1:nrow(dat), round(nrow(dat)*0.4))\n##D valididx2 <- setdiff(1:nrow(dat),trainidx2)\n##D traindata2 <- dat[trainidx2,]\n##D validdata2 <- dat[valididx2,]\n##D splitres2 <- qgcomp.partials(fun=\"qgcomp.glm.noboot\", f=y~., \n##D q=NULL, traindata=traindata2,validdata=validdata2, expnms=c(\"x1\", \"x2\", \"x3\", \"x4\"))\n##D splitres2\n##D \n##D # 60% training/40% validation\n##D trainidx3 <- sample(1:nrow(dat), round(nrow(dat)*0.6))\n##D valididx3 <- setdiff(1:nrow(dat),trainidx3)\n##D traindata3 <- dat[trainidx3,]\n##D validdata3 <- dat[valididx3,]\n##D splitres3 <- qgcomp.partials(fun=\"qgcomp.glm.noboot\", f=y~., q=NULL, \n##D traindata=traindata3,validdata=validdata3, expnms=c(\"x1\", \"x2\", \"x3\", \"x4\"))\n##D splitres3\n##D \n##D # survival outcome\n##D set.seed(50)\n##D N=1000\n##D dat = simdata_quantized(n=1000, outcomtype=\"survival\", cor=c(.75, 0, 0, 0, 1), \n##D b0=0, coef=c(1,0,0,0,0,1), q=4)\n##D names(dat)[which(names(dat==\"x5\"))] = \"z\"\n##D trainidx4 <- sample(1:nrow(dat), round(nrow(dat)*0.6))\n##D valididx4 <- setdiff(1:nrow(dat),trainidx4)\n##D traindata4 <- dat[trainidx4,]\n##D validdata4 <- dat[valididx4,]\n##D expnms=paste0(\"x\", 1:5)\n##D f = survival::Surv(time, d)~x1 + x2 + x3 + x4 + x5 + z\n##D (fit1 <- survival::coxph(f, data = dat))\n##D (overall <- qgcomp.cox.noboot(f, expnms = expnms, data = dat))\n##D (splitres4 <- qgcomp.partials(fun=\"qgcomp.cox.noboot\", f=f, q=4,\n##D traindata=traindata4,validdata=validdata4,\n##D expnms=expnms))\n##D \n##D # zero inflated count outcome\n##D set.seed(50)\n##D n=1000\n##D dat <- data.frame(y= (yany <- rbinom(n, 1, 0.5))*(ycnt <- rpois(n, 1.2)), x1=runif(n)+ycnt*0.2, \n##D x2=runif(n)-ycnt*0.2, x3=runif(n),\n##D x4=runif(n) , z=runif(n))\n##D # poisson count model, mixture in both portions, but note that the qgcomp.partials\n##D # function defines the \"positive\" variables only by the count portion of the model\n##D (overall5 <- qgcomp.zi.noboot(f=y ~ z + x1 + x2 + x3 + x4 | x1 + x2 + x3 + x4 + z, \n##D expnms = c(\"x1\", \"x2\", \"x3\", \"x4\"), \n##D data=dat, q=4, dist=\"poisson\"))\n##D \n##D trainidx5 <- sample(1:nrow(dat), round(nrow(dat)*0.6))\n##D valididx5 <- setdiff(1:nrow(dat),trainidx5)\n##D traindata5 <- dat[trainidx5,]\n##D validdata5 <- dat[valididx5,]\n##D splitres5 <- qgcomp.partials(fun=\"qgcomp.zi.noboot\", \n##D f=y ~ x1 + x2 + x3 + x4 + z | x1 + x2 + x3 + x4 + z, q=4, \n##D traindata=traindata5, validdata=validdata5, \n##D expnms=c(\"x1\", \"x2\", \"x3\", \"x4\"))\n##D splitres5\n##D \n## End(Not run)\n\n\n"} {"package":"qgcomp","topic":"qgcomp.survcurve.boot","snippet":"### Name: qgcomp.survcurve.boot\n### Title: Survival curve data from a qgcomp survival fit\n### Aliases: qgcomp.survcurve.boot\n\n### ** Examples\n\nset.seed(50)\nN=200\ndat <- data.frame(time=(tmg <- pmin(.1,rweibull(N, 10, 0.1))), \n d=1.0*(tmg<0.1), x1=runif(N), x2=runif(N), z=runif(N))\nexpnms=paste0(\"x\", 1:2)\nf = survival::Surv(time, d)~x1 + x2\n(fit1 <- survival::coxph(f, data = dat))\n(obj <- qgcomp.cox.noboot(f, expnms = expnms, data = dat))\n## Not run: \n## Not run: \n##D (obj2 <- qgcomp.cox.boot(f, expnms = expnms, data = dat, B=10, MCsize=20000))\n##D curves = cox.survcurve.boot(obj2)\n##D rbind(head(curves$mdfq),tail(curves$mdfq))\n## End(Not run)\n\n\n\n\n"} {"package":"qgcomp","topic":"qgcomp.zi.boot","snippet":"### Name: qgcomp.zi.boot\n### Title: Quantile g-computation for zero-inflated count outcomes\n### Aliases: qgcomp.zi.boot\n\n### ** Examples\n\nset.seed(50)\nn=100\ndat <- data.frame(y=rbinom(n, 1, 0.5)*rpois(n, 1.2), x1=runif(n), x2=runif(n), z=runif(n))\n# poisson count model, mixture in both portions\n## Not run: \n##D # warning: the examples below can take a long time to run\n##D res = qgcomp.zi.boot(f=y ~ x1 + x2 | x1 + x2, expnms = c('x1', 'x2'), \n##D data=dat, q=4, dist=\"poisson\", B=1000, MCsize=10000, parallel=TRUE, parplan=TRUE)\n##D qgcomp.zi.noboot(f=y ~ x1 + x2 | x1 + x2, expnms = c('x1', 'x2'), \n##D data=dat, q=4, dist=\"poisson\")\n##D res\n##D \n##D # accuracy for small MCsize is suspect (compare coefficients between boot/noboot versions), \n##D # so re-check with MCsize set to larger value (this takes a long time to run)\n##D res2 = qgcomp.zi.boot(f=y ~ x1 + x2 | x1 + x2, expnms = c('x1', 'x2'), \n##D data=dat, q=4, dist=\"poisson\", B=1000, MCsize=50000, parallel=TRUE, parplan=TRUE)\n##D res2\n##D plot(density(res2$bootsamps[4,]))\n##D \n##D # negative binomial count model, mixture and covariate in both portions\n##D qgcomp.zi.boot(f=y ~ z + x1 + x2 | z + x1 + x2, expnms = c('x1', 'x2'), \n##D data=dat, q=4, dist=\"negbin\", B=10, MCsize=10000) \n##D \n##D # weighted analysis (NOTE THIS DOES NOT WORK WITH parallel=TRUE!)\n##D dat$w = runif(n)*5\n##D qgcomp.zi.noboot(f=y ~ z + x1 + x2 | x1 + x2, expnms = c('x1', 'x2'), \n##D data=dat, q=4, dist=\"poisson\", weights=w)\n##D # Expect this: \n##D # Warning message:\n##D # In eval(family$initialize) : non-integer #successes in a binomial glm!\n##D qgcomp.zi.boot(f=y ~ x1 + x2 | x1 + x2, expnms = c('x1', 'x2'), \n##D data=dat, q=4, dist=\"poisson\", B=5, MCsize=50000, parallel=FALSE, weights=w)\n##D # Log rr per one IQR change in all exposures (not on quantile basis)\n##D dat$x1iqr <- dat$x1/with(dat, diff(quantile(x1, c(.25, .75))))\n##D dat$x2iqr <- dat$x2/with(dat, diff(quantile(x2, c(.25, .75))))\n##D # note that I(x>...) now operates on the untransformed value of x,\n##D # rather than the quantized value\n##D res2 = qgcomp.zi.boot(y ~ z + x1iqr + x2iqr + I(x2iqr>0.1) + I(x2>0.4) + I(x2>0.9) | x1iqr + x2iqr, \n##D family=\"binomial\", expnms = c('x1iqr', 'x2iqr'), data=dat, q=NULL, B=2, \n##D degree=2, MCsize=200, dist=\"poisson\")\n##D res2\n## End(Not run)\n\n\n"} {"package":"qgcomp","topic":"qgcomp.zi.noboot","snippet":"### Name: qgcomp.zi.noboot\n### Title: Quantile g-computation for zero-inflated count outcomes under\n### linearity/additivity\n### Aliases: qgcomp.zi.noboot\n\n### ** Examples\n\nset.seed(50)\nn=100\ndat <- data.frame(y=rbinom(n, 1, 0.5)*rpois(n, 1.2), x1=runif(n), x2=runif(n), z=runif(n))\n\n# poisson count model, mixture in both portions\nqgcomp.zi.noboot(f=y ~ z + x1 + x2 | x1 + x2, expnms = c('x1', 'x2'), \n data=dat, q=2, dist=\"poisson\")\n \n# negative binomial count model, mixture and covariate in both portions\nqgcomp.zi.noboot(f=y ~ z + x1 + x2 | z + x1 + x2, expnms = c('x1', 'x2'), \n data=dat, q=2, dist=\"negbin\") \nqgcomp.zi.noboot(f=y ~ z + x1 + x2, expnms = c('x1', 'x2'), \n data=dat, q=2, dist=\"negbin\") # equivalent\n \n# negative binomial count model, mixture only in the 'count' portion of the model\nqgcomp.zi.noboot(f=y ~ z + x1 + x2 | z, expnms = c('x1', 'x2'), data=dat, q=2, dist=\"negbin\")\n\n# weighted analysis\ndat$w = runif(n)*5\nqgcomp.zi.noboot(f=y ~ z + x1 + x2 | x1 + x2, expnms = c('x1', 'x2'), \n data=dat, q=2, dist=\"poisson\", weights=w)\n# Expect this: \n# Warning message:\n# In eval(family$initialize) : non-integer #successes in a binomial glm!\n\n\n\n"} {"package":"qgcomp","topic":"quantize","snippet":"### Name: quantize\n### Title: Quantizing exposure data\n### Aliases: quantize\n\n### ** Examples\n\nset.seed(1232)\ndat = data.frame(y=runif(100), x1=runif(100), x2=runif(100), z=runif(100))\nqdata = quantize(data=dat, expnms=c(\"x1\", \"x2\"), q=4)\ntable(qdata$data$x1)\ntable(qdata$data$x2)\nsummary(dat[c(\"y\", \"z\")]);summary(qdata$data[c(\"y\", \"z\")]) # not touched\ndat = data.frame(y=runif(100), x1=runif(100), x2=runif(100), z=runif(100))\n# using 'breaks' requires specifying min and max (the qth quantile)\n# example with theoretical quartiles (could be other relevant values)\nqdata2 = quantize(data=dat, expnms=c(\"x1\", \"x2\"),\n breaks=list(c(-1e64, .25, .5, .75, 1e64),\n c(-1e64, .25, .5, .75, 1e64)\n ))\ntable(qdata2$data$x1)\ntable(qdata2$data$x2)\n\n\n"} {"package":"qgcomp","topic":"se_comb","snippet":"### Name: se_comb\n### Title: Calculate standard error of weighted linear combination of\n### random variables\n### Aliases: se_comb\n\n### ** Examples\n\nvcov = rbind(c(1.2, .9),c(.9, 2.0))\ncolnames(vcov) <- rownames(vcov) <- expnms <- c(\"x1\", \"x2\")\nse_comb(expnms, vcov, c(1, 0))^2 # returns the given variance\nse_comb(expnms, vcov, c(1, 1)) # default linear MSM fit: all exposures\n# have equal weight\nse_comb(expnms, vcov, c(.3, .1)) # used when one exposure contributes\n # to the overall fit more than others = d(msmeffect)/dx\n\n\n"} {"package":"qgcomp","topic":"simdata_quantized","snippet":"### Name: simdata_quantized\n### Title: Simulate quantized exposures for testing methods\n### Aliases: simdata_quantized\n\n### ** Examples\n\nset.seed(50)\nqdat = simdata_quantized(\n outcometype=\"continuous\", \n n=10000, corr=c(.9,.3), coef=c(1,1,0,0), \n q = 8\n)\ncor(qdat)\nqdat = simdata_quantized(\n outcometype=\"continuous\", \n n=10000, corr=c(-.9,.3), coef=c(1,2,0,0), \n q = 4\n)\ncor(qdat)\ntable(qdat$x1)\nqgcomp.glm.noboot(y~.,data=qdat)\n\nqdat = simdata_quantized(\n outcometype=\"multinomial\", \n n=10000, corr=c(-.9), coef=cbind(c(1,-1,0,0), c(1,.2,0,0)), \n q = 4\n)\n\n\n\n"} {"package":"qgcomp","topic":"split_data","snippet":"### Name: split_data\n### Title: Perform sample splitting\n### Aliases: split_data\n\n### ** Examples\n\ndata(metals)\nset.seed(1231124)\nspl = split_data(metals)\nXnm <- c(\n 'arsenic','barium','cadmium','calcium','chromium','copper',\n 'iron','lead','magnesium','manganese','mercury','selenium','silver',\n 'sodium','zinc'\n)\ndim(spl$traindata) # 181 observations = 40% of total\ndim(spl$validdata) # 271 observations = 60% of total\nsplitres <- qgcomp.partials(fun=\"qgcomp.glm.noboot\", f=y~., q=4, \n traindata=spl$traindata,validdata=spl$validdata, expnms=Xnm)\nsplitres\n\n# also used to compare linear vs. non-linear fits (useful if you have enough data)\nset.seed(1231)\nspl = split_data(metals, prop.train=.5)\nlin = qgcomp.glm.boot(f=y~., q=4, expnms=Xnm, B=5, data=spl$traindata)\nnlin1 = qgcomp.glm.boot(f=y~. + I(manganese^2) + I(calcium^2), expnms=Xnm, deg=2, \n q=4, B=5, data=spl$traindata)\nnlin2 = qgcomp.glm.boot(f=y~. + I(arsenic^2) + I(cadmium^2), expnms=Xnm, deg=2, \n q=4, B=5, data=spl$traindata)\nAIC(lin);AIC(nlin1);AIC(nlin2)\n# linear has lowest training AIC, so base final fit off that (and bootstrap not needed)\nqgcomp.glm.noboot(f=y~., q=4, expnms=Xnm, data=spl$validdata)\n\n\n"} {"package":"qgcomp","topic":"vc_comb","snippet":"### Name: vc_comb\n### Title: Calculate covariance matrix between one random variable and a\n### linear combination of random variables\n### Aliases: vc_comb\n\n### ** Examples\n\nvcov = rbind(c(0.010051348, -0.0039332248, -0.0036965571),\n c(-0.003933225, 0.0051807876, 0.0007706792),\n c(-0.003696557, 0.0007706792, 0.0050996587))\ncolnames(vcov) <- rownames(vcov) <- c(\"(Intercept)\", \"x1\", \"x2\")\nexpnms <- rownames(vcov)[2:3]\naname = rownames(vcov)[1]\nvc_comb(aname, expnms, vcov) # returns the given covariance matrix\n\n\n"} {"package":"qgcomp","topic":"zimsm_fit","snippet":"### Name: zimsm_fit\n### Title: Secondary prediction method for the (zero-inflated) qgcomp MSM.\n### Aliases: zimsm_fit\n\n### ** Examples\n\nset.seed(50)\nn=100\n## Not run: \n##D dat <- data.frame(y=rbinom(n, 1, 0.5)*rpois(n, 1.2), x1=runif(n), x2=runif(n), z=runif(n))\n##D expnms = c(\"x1\", \"x2\")\n##D q = 4\n##D qdata = quantize(dat, q=q, expnms=expnms)$data\n##D f = y ~ x1 + x2 + z | 1\n##D msmfit <- zimsm_fit(f, qdata, intvals=(1:q)-1, expnms, main=TRUE,\n##D degree=1, id=NULL, MCsize=10000, containmix=list(count=TRUE, zero=FALSE), \n##D x=FALSE)\n##D msmfit$msmfit\n## End(Not run)\n\n\n"} {"package":"ipwErrorY","topic":"Est2Replicates","snippet":"### Name: Est2Replicates\n### Title: Estimation of ATE with Two Replicates\n### Aliases: Est2Replicates\n\n### ** Examples\n\n#create a dataset with sensitivity=0.95 and specificity=0.85\nset.seed(100)\nX1=rnorm(2000) \nA=rbinom(2000,1,1/(1+exp(-0.2-X1)))\nY=rbinom(2000,1,1/(1+exp(-0.2-A-X1)))\ny1=which(Y==1)\ny0=which(Y==0) \nYast1=Y\nYast1[y1]=rbinom(length(y1),1,0.95)\nYast1[y0]=rbinom(length(y0),1,0.15)\nYast2=Y\nYast2[y1]=rbinom(length(y1),1,0.95) \nYast2[y0]=rbinom(length(y0),1,0.15)\nda=data.frame(A=A,X1=X1,Yast1=Yast1,Yast2=Yast2)\nhead(da)\n#apply the correction method assuming specificity=0.85\nEst2Replicates(da,\"A\",c(\"Yast1\",\"Yast2\"),\"X1\",\"known specificity\",NULL,0.85,NULL,0.95)\n\n\n\n"} {"package":"ipwErrorY","topic":"EstValidation","snippet":"### Name: EstValidation\n### Title: Estimation of ATE with Validation Data\n### Aliases: EstValidation\n\n### ** Examples\n\n#create main data and validation data with sensitivity=0.95 and specificity=0.85\nset.seed(100)\nX1=rnorm(1200) \nA=rbinom(1200,1,1/(1+exp(-0.2-X1)))\nY=rbinom(1200,1,1/(1+exp(-0.2-A-X1)))\ny1=which(Y==1)\ny0=which(Y==0) \nYast=Y\nYast[y1]=rbinom(length(y1),1,0.95)\nYast[y0]=rbinom(length(y0),1,0.15)\nmainda=data.frame(A=A,X1=X1,Yast=Yast)\nX1=rnorm(800) \nA=rbinom(800,1,1/(1+exp(-0.2-X1)))\nY=rbinom(800,1,1/(1+exp(-0.2-A-X1)))\ny1=which(Y==1)\ny0=which(Y==0) \nYast=Y\nYast[y1]=rbinom(length(y1),1,0.95)\nYast[y0]=rbinom(length(y0),1,0.15)\nvalidationda=data.frame(A=A,X1=X1,Y=Y,Yast=Yast)\nhead(mainda)\nhead(validationda)\n#apply the optimal linear combination correction method\nEstValidation(mainda,validationda,\"A\",\"Yast\",\"X1\",\"Y\",0.95)\n\n\n\n"} {"package":"ipwErrorY","topic":"KnownError","snippet":"### Name: KnownError\n### Title: Estimation of ATE with Known Error\n### Aliases: KnownError\n\n### ** Examples\n\n#create a dataset with sensitivity=0.95 and specificity=0.85\nset.seed(100)\nX1=rnorm(2000) \nA=rbinom(2000,1,1/(1+exp(-0.2-X1)))\nY=rbinom(2000,1,1/(1+exp(-0.2-A-X1)))\ny1=which(Y==1)\ny0=which(Y==0) \nYast=Y\nYast[y1]=rbinom(length(y1),1,0.95)\nYast[y0]=rbinom(length(y0),1,0.15)\nda=data.frame(X1=X1,A=A,Yast=Yast)\nhead(da)\n#apply the correction method with sensitivity=0.95 and specificity=0.85\nKnownError(da,\"A\",\"Yast\",\"X1\",0.95,0.85,0.95)\n\n\n\n"} {"package":"ipwErrorY","topic":"KnownErrorDR","snippet":"### Name: KnownErrorDR\n### Title: Doubly Robust Estimation of ATE with Known Error\n### Aliases: KnownErrorDR\n\n### ** Examples\n\n#create a dataset with sensitivity=0.95 and specificity=0.85\nset.seed(100)\nX=rnorm(2000) \nxx=X^2\nA=rbinom(2000,1,1/(1+exp(-0.1-X-0.2*xx)))\nY=rbinom(2000,1,1/(1+exp(1-A-0.5*X-xx)))\ny1=which(Y==1)\ny0=which(Y==0) \nY[y1]=rbinom(length(y1),1,0.95)\nY[y0]=rbinom(length(y0),1,0.15)\nYast=Y\nda=data.frame(A=A,X=X,xx=xx,Yast=Yast)\nhead(da)\n#apply the doubly robust correction method with sensitivity=0.95 and specificity=0.85\nKnownErrorDR(da,\"A\",\"Yast\",c(\"X\",\"xx\"),c(\"X\",\"xx\"),0.95,0.85,FALSE,0.95)\n\n\n\n"} {"package":"profile","topic":"read_rprof","snippet":"### Name: read_rprof\n### Title: File I/O for profiler data\n### Aliases: read_rprof read_pprof write_rprof write_pprof\n\n### ** Examples\n\nrprof_file <- system.file(\"samples/rprof/1.out\", package = \"profile\")\nds <- read_rprof(rprof_file)\nds\nif (requireNamespace(\"RProtoBuf\", quietly = TRUE)) {\n pprof_file <- tempfile(\"profile\", fileext = \".pb.gz\")\n write_pprof(ds, pprof_file)\n}\n\n\n"} {"package":"profile","topic":"validate_profile","snippet":"### Name: validate_profile\n### Title: Definition of the profile data format\n### Aliases: validate_profile dm_from_profile\n\n### ** Examples\n\nrprof_file <- system.file(\"samples/rprof/1.out\", package = \"profile\")\nds <- read_rprof(rprof_file)\nvalidate_profile(ds)\n\nbad_ds <- ds\nbad_ds$samples <- NULL\ntry(validate_profile(bad_ds))\n\nif (rlang::is_installed(\"dm\")) {\n dm <- dm_from_profile(ds)\n print(dm)\n dm::dm_draw(dm)\n}\n\n\n"} {"package":"rmarkdown","topic":"available_templates","snippet":"### Name: available_templates\n### Title: List available R Markdown template in a package\n### Aliases: available_templates\n\n### ** Examples\n\n# List rmarkdown templates & create a draft\navailable_templates()\n\n# List rticles templates\navailable_templates(\"rticles\")\n\n\n"} {"package":"rmarkdown","topic":"beamer_presentation","snippet":"### Name: beamer_presentation\n### Title: Convert to a Beamer presentation\n### Aliases: beamer_presentation\n\n### ** Examples\n\n## Not run: \n##D \n##D library(rmarkdown)\n##D \n##D # simple invocation\n##D render(\"pres.Rmd\", beamer_presentation())\n##D \n##D # specify an option for incremental rendering\n##D render(\"pres.Rmd\", beamer_presentation(incremental = TRUE))\n## End(Not run)\n\n\n"} {"package":"rmarkdown","topic":"context_document","snippet":"### Name: context_document\n### Title: Convert to a ConTeXt document\n### Aliases: context_document\n\n### ** Examples\n\n## Not run: \n##D library(rmarkdown)\n##D \n##D # simple invocation\n##D render(\"input.Rmd\", context_document())\n## End(Not run)\n\n\n"} {"package":"rmarkdown","topic":"convert_ipynb","snippet":"### Name: convert_ipynb\n### Title: Convert a Jupyter/IPython notebook to an R Markdown document\n### Aliases: convert_ipynb\n\n### ** Examples\n\n# this is not a real ipynb file, but illustrates what convert_ipynb() does\nnb_data <- list(\n cells = list(\n list(cell_type = 'markdown', source = 'Hi **Markdown**!'),\n list(cell_type = 'code', source = 'print(\"Hi R Markdown!\")')\n ),\n metadata = list(\n kernelspec = list(language = 'python')\n )\n)\nnb_file = tempfile(fileext = '.ipynb')\njsonlite::write_json(nb_data, nb_file, auto_unbox = TRUE, pretty = TRUE)\nxfun::file_string(nb_file) # show file content\n\n# convert to R Markdown\nnb_rmd = rmarkdown:::convert_ipynb(nb_file)\nxfun::file_string(nb_rmd)\n\n\n"} {"package":"rmarkdown","topic":"draft","snippet":"### Name: draft\n### Title: Create a new document based on a template\n### Aliases: draft\n\n### ** Examples\n\n## Not run: \n##D rmarkdown::draft(\"Q4Report.Rmd\",\n##D template=\"/opt/rmd/templates/quarterly_report\")\n##D \n##D rmarkdown::draft(\"Q4Report.Rmd\",\n##D template=\"quarterly_report\", package=\"pubtools\")\n## End(Not run)\n\n\n"} {"package":"rmarkdown","topic":"find_pandoc","snippet":"### Name: find_pandoc\n### Title: Find the 'pandoc' executable\n### Aliases: find_pandoc\n\n### ** Examples\n\nrmarkdown::find_pandoc()\nrmarkdown::find_pandoc(dir = '~/Downloads/Pandoc')\nrmarkdown::find_pandoc(version = '2.7.3')\n\n\n"} {"package":"rmarkdown","topic":"html_document","snippet":"### Name: html_document\n### Title: Convert to an HTML document\n### Aliases: html_document\n\n### ** Examples\n\n## Not run: \n##D library(rmarkdown)\n##D \n##D render(\"input.Rmd\", html_document())\n##D \n##D render(\"input.Rmd\", html_document(toc = TRUE))\n## End(Not run)\n\n\n"} {"package":"rmarkdown","topic":"includes","snippet":"### Name: includes\n### Title: Include content within output\n### Aliases: includes includes_to_pandoc_args\n\n### ** Examples\n\n## Not run: \n##D library(rmarkdown)\n##D \n##D html_document(includes = includes(before_body = \"header.htm\"))\n##D \n##D pdf_document(includes = includes(after_body = \"footer.tex\"))\n## End(Not run)\n\n\n"} {"package":"rmarkdown","topic":"md_document","snippet":"### Name: md_document\n### Title: Convert to a markdown document\n### Aliases: md_document\n\n### ** Examples\n\n## Not run: \n##D library(rmarkdown)\n##D \n##D render(\"input.Rmd\", md_document())\n##D \n##D render(\"input.Rmd\", md_document(variant = \"markdown_github\"))\n## End(Not run)\n\n\n"} {"package":"rmarkdown","topic":"metadata","snippet":"### Name: metadata\n### Title: The YAML metadata of the current R Markdown document\n### Aliases: metadata\n### Keywords: datasets\n\n### ** Examples\n\nrmarkdown::metadata\n\n\n"} {"package":"rmarkdown","topic":"odt_document","snippet":"### Name: odt_document\n### Title: Convert to an OpenDocument Text (ODT) document\n### Aliases: odt_document\n\n### ** Examples\n\n## Not run: \n##D library(rmarkdown)\n##D \n##D # simple invocation\n##D render(\"input.Rmd\", odt_document())\n##D \n##D # specify an option for syntax highlighting\n##D render(\"input.Rmd\", odt_document(highlight = \"zenburn\"))\n## End(Not run)\n\n\n"} {"package":"rmarkdown","topic":"output_format","snippet":"### Name: output_format\n### Title: Define an R Markdown output format\n### Aliases: output_format\n\n### ** Examples\n\n## Not run: \n##D output_format(knitr = knitr_options(opts_chunk = list(dev = 'png')),\n##D pandoc = pandoc_options(to = \"html\"))\n## End(Not run)\n\n\n"} {"package":"rmarkdown","topic":"output_format_dependency","snippet":"### Name: output_format_dependency\n### Title: Define and merge an R Markdown's output format dependency\n### Aliases: output_format_dependency\n\n### ** Examples\n\n# Implicitly add lua filters from within a chunk\n# This relies on (implicit) printing of the dependency in a chunk via\n# knitr::knit_print()`\noutput_format_dependency(\n \"lua_filter1\",\n pandoc = list(lua_filters = \"example1.lua\")\n)\n\n# Explicitly add lua filters from within a chunk\nknitr::knit_meta_add(list(output_format_dependency(\n \"lua_filter2\",\n pandoc = list(lua_filters = \"example2.lua\")\n)))\n\n# List the available dependencies\n# Note that the list may include dependencies with duplicated names. In that\n# case, the first one is merged to the output format and the others are\n# discarded.\nstr(knitr::knit_meta(\"output_format_dependency\", clean = FALSE))\n\n\n\n"} {"package":"rmarkdown","topic":"pandoc_args","snippet":"### Name: pandoc_args\n### Title: Functions for generating pandoc command line arguments\n### Aliases: pandoc_args pandoc_variable_arg pandoc_metadata_arg\n### pandoc_metadata_file_arg pandoc_include_args pandoc_highlight_args\n### pandoc_latex_engine_args pandoc_toc_args pandoc_citeproc_args\n### pandoc_lua_filter_args\n\n### ** Examples\n\n## Not run: \n##D library(rmarkdown)\n##D \n##D pandoc_include_args(before_body = \"header.htm\")\n##D pandoc_include_args(before_body = \"header.tex\")\n##D \n##D pandoc_highlight_args(\"kate\")\n##D \n##D pandoc_latex_engine_args(\"pdflatex\")\n##D \n##D pandoc_toc_args(toc = TRUE, toc_depth = 2)\n## End(Not run)\n\n\n"} {"package":"rmarkdown","topic":"pandoc_available","snippet":"### Name: pandoc_available\n### Title: Check pandoc availability and version\n### Aliases: pandoc_available pandoc_version\n\n### ** Examples\n\n## Not run: \n##D library(rmarkdown)\n##D \n##D if (pandoc_available())\n##D cat(\"pandoc\", as.character(pandoc_version()), \"is available!\\n\")\n##D \n##D if (pandoc_available(\"1.12.3\"))\n##D cat(\"required version of pandoc is available!\\n\")\n## End(Not run)\n\n\n"} {"package":"rmarkdown","topic":"pandoc_convert","snippet":"### Name: pandoc_convert\n### Title: Convert a document with pandoc\n### Aliases: pandoc_convert\n\n### ** Examples\n\n## Not run: \n##D library(rmarkdown)\n##D \n##D # convert markdown to various formats\n##D pandoc_convert(\"input.md\", to = \"html\")\n##D pandoc_convert(\"input.md\", to = \"latex\")\n##D \n##D # process citations\n##D pandoc_convert(\"input.md\", to = \"html\", citeproc = TRUE)\n##D \n##D # add some pandoc options\n##D pandoc_convert(\"input.md\", to = \"latex\", options = c(\"--listings\"))\n## End(Not run)\n\n\n"} {"package":"rmarkdown","topic":"pdf_document","snippet":"### Name: pdf_document\n### Title: Convert to a PDF/LaTeX document\n### Aliases: pdf_document latex_document latex_fragment\n\n### ** Examples\n\n## Not run: \n##D library(rmarkdown)\n##D \n##D # simple invocation\n##D render(\"input.Rmd\", pdf_document())\n##D \n##D # specify an option for latex engine\n##D render(\"input.Rmd\", pdf_document(latex_engine = \"lualatex\"))\n##D \n##D # add a table of contents and pass an option to pandoc\n##D render(\"input.Rmd\", pdf_document(toc = TRUE, \"--listings\"))\n## End(Not run)\n\n\n"} {"package":"rmarkdown","topic":"pkg_file_lua","snippet":"### Name: pkg_file_lua\n### Title: Get the full paths of Lua filters in an R package\n### Aliases: pkg_file_lua\n\n### ** Examples\n\n# list all Lua filters stored in the rmarkdown package\npkg_file_lua()\n# get a specific filter\npkg_file_lua(c(\"pagebreak.lua\", \"latex_div.lua\"))\n\n\n"} {"package":"rmarkdown","topic":"publish_site","snippet":"### Name: publish_site\n### Title: Publish an R Markdown Website\n### Aliases: publish_site\n\n### ** Examples\n\n## Not run: \n##D library(rmarkdown)\n##D publish_site()\n## End(Not run)\n\n\n\n"} {"package":"rmarkdown","topic":"render","snippet":"### Name: render\n### Title: Render R Markdown\n### Aliases: render\n\n### ** Examples\n\n## Not run: \n##D library(rmarkdown)\n##D \n##D # Render the default (first) format defined in the file\n##D render(\"input.Rmd\")\n##D \n##D # Render all formats defined in the file\n##D render(\"input.Rmd\", \"all\")\n##D \n##D # Render a single format, using parameters for \\code{html_document} from\n##D # the YAML header parameters.\n##D render(\"input.Rmd\", \"html_document\")\n##D \n##D # Render a single format, ignoring parameters for \\code{html_document} in\n##D # the YAML header. Any parameters not passed as arguments to\n##D # \\code{html_document()} will be assigned to their default values, regardless\n##D # of anything in the YAML header\n##D render(\"input.Rmd\", html_document(toc = TRUE, toc_depth = 2))\n##D \n##D # Render multiple formats\n##D render(\"input.Rmd\", c(\"html_document\", \"pdf_document\"))\n## End(Not run)\n\n\n"} {"package":"rmarkdown","topic":"render_delayed","snippet":"### Name: render_delayed\n### Title: Delay Rendering for an Expression\n### Aliases: render_delayed\n\n### ** Examples\n\n## Not run: \n##D # Add the following code to an R Markdown document\n##D \n##D div(Sys.time())\n##D \n##D render_delayed({\n##D Sys.sleep(3) # simulate an expensive computation\n##D div(Sys.time())\n##D })\n##D \n##D div(Sys.time())\n## End(Not run)\n\n\n"} {"package":"rmarkdown","topic":"rmarkdown_format","snippet":"### Name: rmarkdown_format\n### Title: R Markdown input format definition\n### Aliases: rmarkdown_format from_rmarkdown\n\n### ** Examples\n\n## Not run: \n##D rmarkdown_format(\"-implicit_figures\")\n## End(Not run)\n\n\n"} {"package":"rmarkdown","topic":"rtf_document","snippet":"### Name: rtf_document\n### Title: Convert to an RTF document\n### Aliases: rtf_document\n\n### ** Examples\n\n## Not run: \n##D \n##D library(rmarkdown)\n##D \n##D # simple invocation\n##D render(\"input.Rmd\", rtf_document())\n##D \n##D # specify table of contents option\n##D render(\"input.Rmd\", rtf_document(toc = TRUE))\n## End(Not run)\n\n\n"} {"package":"rmarkdown","topic":"run","snippet":"### Name: run\n### Title: Run a Shiny document\n### Aliases: run\n\n### ** Examples\n\n## Not run: \n##D # Run the Shiny document \"index.Rmd\" in the current directory\n##D rmarkdown::run()\n##D \n##D # Run the Shiny document \"shiny_doc.Rmd\" on port 8241\n##D rmarkdown::run(\"shiny_doc.Rmd\", shiny_args = list(port = 8241))\n## End(Not run)\n\n\n"} {"package":"rmarkdown","topic":"slidy_presentation","snippet":"### Name: slidy_presentation\n### Title: Convert to a slidy presentation\n### Aliases: slidy_presentation\n\n### ** Examples\n\n## Not run: \n##D library(rmarkdown)\n##D \n##D # simple invocation\n##D render(\"pres.Rmd\", slidy_presentation())\n##D \n##D # specify an option for incremental rendering\n##D render(\"pres.Rmd\", slidy_presentation(incremental = TRUE))\n## End(Not run)\n\n\n"} {"package":"rmarkdown","topic":"word_document","snippet":"### Name: word_document\n### Title: Convert to an MS Word document\n### Aliases: word_document\n\n### ** Examples\n\n## Not run: \n##D library(rmarkdown)\n##D \n##D # simple invocation\n##D render(\"input.Rmd\", word_document())\n##D \n##D # specify an option for syntax highlighting\n##D render(\"input.Rmd\", word_document(highlight = \"zenburn\"))\n## End(Not run)\n\n\n"} {"package":"vegdata","topic":"cwm","snippet":"### Name: cwm\n### Title: Indicate site conditions with community weighted mean values of\n### traits or with mode of gradient classes (sum of species amplitudes).\n### Aliases: cwm\n\n### ** Examples\n\n ## Not run: \n##D db <- 'elbaue'\n##D veg <- tv.veg(db, cover.transform='sqrt', check.critical = FALSE)\n##D site <- tv.site(db, verbose = FALSE)\n##D #' Exclude plots with very high water level fluctuation\n##D veg <- veg[site$SDGL < 60,]\n##D veg <- veg[,colSums(veg) > 0]\n##D site <- site[site$SDGL < 60,]\n##D #' Load species trait value database\n##D traits <- tv.traits(db)\n##D \n##D #' Mean indicator values of Ellenberg F values\n##D mEIV_F <- isc(veg, trait.db = traits, ivname = 'OEK_F', method = 'mean')\n##D plot(site$MGL, mEIV_F, xlab = 'Mean groundwater level')\n##D \n##D #' Mode (most frequent level) of Ellenberg F values\n##D library(reshape)\n##D traitmat <- cast(traits, LETTERCODE ~ OEK_F)\n##D traitmat <- traitmat[,-14]\n##D ilevel <- isc(veg, trait.db = traitmat, ivname = as.character(1:11), method = 'mode')\n##D boxplot(site$MGL ~ ordered(ilevel, levels = levels(ilevel)[c(2,4,3,5,6:10,1)]))\n##D \n## End(Not run)\n\n\n\n"} {"package":"vegdata","topic":"db_download","snippet":"### Name: db_download\n### Title: Download taxonomic databases\n### Aliases: db_download db_download_eurosl db_download_germansl\n\n### ** Examples\n\n## Not run: \n##D # EuroSL\n##D # db_download_eurosl()\n##D # src_eurosl()\n##D \n##D # GermanSL\n##D # db_download_germansl()\n##D # db_download_germansl(overwrite=TRUE) # overwrite - download again\n##D # src_germansl()\n##D \n## End(Not run)\n\n\n"} {"package":"vegdata","topic":"sql_collect","snippet":"### Name: sql_collect\n### Title: Query and get data back into a data.frame\n### Aliases: sql_collect\n\n### ** Examples\n\n## Not run: \n##D src <- src_germansl()\n##D sql_collect(src, \"select * from GermanSL limit 5\")\n##D ## or pipe the src to sql_collect\n##D src %>% sql_collect(\"select * from GermanSL limit 5\")\n## End(Not run)\n\n\n"} {"package":"vegdata","topic":"src_vegdata","snippet":"### Name: src_vegdata\n### Title: src - dplyr src objects\n### Aliases: src_vegdata src_eurosl src_germansl\n\n### ** Examples\n\n## Not run: \n##D # src_eurosl()\n##D # src_germansl()\n## End(Not run)\n\n\n"} {"package":"vegdata","topic":"syntab","snippet":"### Name: syntab\n### Title: Syntaxonomic frequency tables\n### Aliases: syntab print.syntab\n\n### ** Examples\n\n## Not run: \n##D elbaue <- tv.veg('elbaue')\n##D elbaue.env <- tv.site('elbaue')\n##D clust <- vector('integer', nrow(elbaue.env))\n##D clust[elbaue.env$MGL < -50 & elbaue.env$SDGL < 50] <- 1\n##D clust[elbaue.env$MGL < -50 & elbaue.env$SDGL >= 50] <- 2\n##D clust[elbaue.env$MGL >= -50 & elbaue.env$SDGL >= 50] <- 3\n##D clust[elbaue.env$MGL >= -50 & elbaue.env$SDGL < 50] <- 4\n##D levels(clust) <- c('dry.ld','dry.hd', 'wet.hd','wet.ld')\n##D traits <- tv.traits()\n##D m <- match(rownames(st$syntab), traits$LETTERCODE, nomatch = 0)\n##D trait <- traits[m, c(\"OEK_F\",\"OEK_N\")]\n##D rownames(trait) <- traits$LETTERCODE[m]\n##D st <- syntab(elbaue, clust, mupa=TRUE)\n##D print(st, limit=30, trait=trait)\n##D #' Manipulation of the syntaxonomic table\n##D sttable <- st$syntab\n##D sttable <- sttable[sttable$p.value < 0.05 & !is.na(sttable$p.value),\n##D !names(sttable) %in% c('stat')]\n##D taxa <- tax(rownames(sttable))\n##D rownames(sttable) <- taxa[match(rownames(sttable), taxa$LETTERCODE, nomatch = 0),'TaxonName']\n##D write.csv(sttable, 'sttable.csv')\n## End(Not run)\n\n\n\n"} {"package":"vegdata","topic":"taxval","snippet":"### Name: taxval\n### Title: Handling of taxonomy in vegetation data.\n### Aliases: taxval\n\n### ** Examples\n\n## Not run: \n##D # Turboveg installation needed\n##D obs <- taxval(db='taxatest')\n##D # For explanations see vignette('vegdata').\n##D \n##D veg <- tv.veg('taxatest')\n##D veg <- comb.species(veg, c('ARMEM-E','ARMEM-H'))\n## End(Not run)\n\n\n\n"} {"package":"vegdata","topic":"tdb_cache","snippet":"### Name: tdb_cache\n### Title: Caching\n### Aliases: tdb_cache\n\n### ** Examples\n\n## Not run: \n##D tdb_cache\n##D \n##D # list files in cache\n##D tdb_cache$list()\n##D \n##D # delete certain database files\n##D # tdb_cache$delete(\"file path\")\n##D # tdb_cache$list()\n##D \n##D # delete all files in cache\n##D # tdb_cache$delete_all()\n##D # tdb_cache$list()\n## End(Not run)\n\n\n"} {"package":"vegdata","topic":"tv.obs","snippet":"### Name: tv.obs\n### Title: Dataframe of plot-species observations directly from Turboveg\n### Aliases: tv.obs\n### Keywords: Turboveg\n\n### ** Examples\n\n ## Not run: \n##D # Turboveg installation needed\n##D obs <- tv.obs('taxatest')\n##D head(obs)\n##D \n## End(Not run)\n\n\n\n"} {"package":"vegdata","topic":"tv.veg","snippet":"### Name: tv.veg\n### Title: Tabulates vegetation tables from Turboveg database\n### Aliases: tv.veg\n### Keywords: manip misc\n\n### ** Examples\n\n ## Not run: \n##D vignette(\"vegdata\")\n##D #' If you have Turboveg installed on your computer try for a beginning\n##D #' tv.veg('databasename', tax=FALSE).\n##D args(tv.veg)\n##D help('taxval')\n##D \n##D veg <- tv.veg('taxatest')\n##D names(veg)\n##D tv.veg('taxatest', uncertain=list('DET_CERT', data.frame(0:2,c('pres','agg','agg'))),\n##D pseudo=list(lc.0,'LAYER'), genus = 'delete')\n##D \n## End(Not run)\n\n\n\n"} {"package":"iNEXT","topic":"ChaoRichness","snippet":"### Name: ChaoRichness\n### Title: Estimation of species richness\n### Aliases: ChaoRichness\n\n### ** Examples\n\ndata(spider)\nChaoRichness(spider$Girdled, datatype=\"abundance\")\n\n\n"} {"package":"iNEXT","topic":"ChaoShannon","snippet":"### Name: ChaoShannon\n### Title: Estimation of Shannon entropy/diversity\n### Aliases: ChaoShannon\n\n### ** Examples\n\ndata(spider)\nChaoShannon(spider$Girdled, datatype=\"abundance\")\n\n\n"} {"package":"iNEXT","topic":"ChaoSimpson","snippet":"### Name: ChaoSimpson\n### Title: Estimation of Gini-Simpson index or Simpson diversity\n### Aliases: ChaoSimpson\n\n### ** Examples\n\ndata(spider)\nChaoSimpson(spider$Girdled, datatype=\"abundance\")\n\n\n"} {"package":"iNEXT","topic":"DataInfo","snippet":"### Name: DataInfo\n### Title: Exhibit basic data information\n### Aliases: DataInfo\n\n### ** Examples\n\ndata(spider)\nDataInfo(spider, datatype=\"abundance\")\n\n\n"} {"package":"iNEXT","topic":"as.abucount","snippet":"### Name: as.abucount\n### Title: Transform abundance raw data to abundance row-sum counts (iNEXT\n### input format)\n### Aliases: as.abucount\n\n### ** Examples\n\ndata(ciliates)\nlapply(ciliates, as.abucount)\n\n\n\n"} {"package":"iNEXT","topic":"as.incfreq","snippet":"### Name: as.incfreq\n### Title: Transform incidence raw data to incidence frequencies (iNEXT\n### input format)\n### Aliases: as.incfreq\n\n### ** Examples\n\ndata(ciliates)\nlapply(ciliates, as.incfreq)\n\n\n\n"} {"package":"iNEXT","topic":"bird","snippet":"### Name: bird\n### Title: Bird data (datatype = \"abundance\")\n### Aliases: bird\n### Keywords: datasets\n\n### ** Examples\n\ndata(bird)\n## Not run: \n##D out <- iNEXT(bird, datatype=\"abundance\")\n##D ggiNEXT(out)\n## End(Not run)\n\n\n"} {"package":"iNEXT","topic":"ciliates","snippet":"### Name: ciliates\n### Title: Ciliates data (datatype = \"incidence_raw\")\n### Aliases: ciliates\n### Keywords: datasets\n\n### ** Examples\n\ndata(ciliates)\n## Not run: \n##D out <- iNEXT(ciliates, datatype = \"incidence_raw\")\n##D ggiNEXT(out)\n## End(Not run)\n\n\n"} {"package":"iNEXT","topic":"estimateD","snippet":"### Name: estimateD\n### Title: Compute species diversity with a particular level of sample\n### size/coverage\n### Aliases: estimateD\n\n### ** Examples\n\ndata(spider)\nout1 <- estimateD(spider, q = c(0,1,2), datatype = \"abundance\", base=\"size\")\nout1\n## Not run: \n##D out2 <- estimateD(spider, q = c(0,1,2), datatype = \"abundance\", base=\"coverage\")\n##D out2\n##D \n##D data(ant)\n##D out <- estimateD(ant, q = c(0,1,2), datatype = \"incidence_freq\", base=\"coverage\", \n##D level=0.985, conf=0.95)\n##D out\n## End(Not run)\n\n\n"} {"package":"iNEXT","topic":"fortify.iNEXT","snippet":"### Name: fortify.iNEXT\n### Title: Fortify method for classes from the iNEXT package.\n### Aliases: fortify.iNEXT\n\n### ** Examples\n\ndata(spider)\n# single-assemblage abundance data\nout1 <- iNEXT(spider$Girdled, q=0, datatype=\"abundance\")\nggplot2::fortify(out1, type=1)\n\n\n"} {"package":"iNEXT","topic":"ggiNEXT","snippet":"### Name: ggiNEXT\n### Title: ggplot2 extension for an iNEXT object\n### Aliases: ggiNEXT ggiNEXT.iNEXT ggiNEXT.default\n\n### ** Examples\n\n# single-assemblage abundance data\ndata(spider)\nout1 <- iNEXT(spider$Girdled, q=0, datatype=\"abundance\")\nggiNEXT(x=out1, type=1)\nggiNEXT(x=out1, type=2)\nggiNEXT(x=out1, type=3)\n\n## Not run: \n##D # single-assemblage incidence data with three orders q\n##D data(ant)\n##D size <- round(seq(10, 500, length.out=20))\n##D y <- iNEXT(ant$h500m, q=c(0,1,2), datatype=\"incidence_freq\", size=size, se=FALSE)\n##D ggiNEXT(y, se=FALSE, color.var=\"Order.q\")\n##D \n##D # multiple-assemblage abundance data with three orders q\n##D z <- iNEXT(spider, q=c(0,1,2), datatype=\"abundance\")\n##D ggiNEXT(z, facet.var=\"Assemblage\", color.var=\"Order.q\")\n##D ggiNEXT(z, facet.var=\"Both\", color.var=\"Both\")\n## End(Not run)\n\n\n"} {"package":"iNEXT","topic":"iNEXT","snippet":"### Name: iNEXT\n### Title: iNterpolation and EXTrapolation of Hill numbers\n### Aliases: iNEXT\n\n### ** Examples\n\n## Not run: \n##D ## example for abundance based data (list of vector)\n##D data(spider)\n##D out1 <- iNEXT(spider, q=c(0,1,2), datatype=\"abundance\")\n##D out1$DataInfo # showing basic data information.\n##D out1$AsyEst # showing asymptotic diversity estimates.\n##D out1$iNextEst$size_based \n##D # showing diversity estimates with rarefied and extrapolated samples; \n##D # confidence limits are obtained for fixed sample size.\n##D \n##D out1$iNextEst$coverage_based \n##D # showing diversity estimates with rarefied and extrapolated samples;\n##D # confidence limits are obtained for fixed sample coverage.\n## End(Not run)\n## example for abundance based data (data.frame)\ndata(bird)\nout2 <- iNEXT(bird, q=0, datatype=\"abundance\")\nout2\n\n## Not run: \n##D ## example for incidence frequencies based data (list of data.frame)\n##D data(ant)\n##D t <- round(seq(10, 500, length.out=20))\n##D out3 <- iNEXT(ant$h500m, q=1, datatype=\"incidence_freq\", size=t, se=FALSE)\n##D out3$iNextEst\n## End(Not run)\n\n\n"} {"package":"iNEXT","topic":"plot.iNEXT","snippet":"### Name: plot.iNEXT\n### Title: Plotting iNEXT object\n### Aliases: plot.iNEXT\n\n### ** Examples\n\ndata(spider)\n# single-assemblage abundance data\nout1 <- iNEXT(spider$Girdled, q=0, datatype=\"abundance\")\nplot(x=out1, type=1)\nplot(x=out1, type=2)\nplot(x=out1, type=3)\n\n\n\n"} {"package":"LCAvarsel","topic":"LCAvarsel","snippet":"### Name: LCAvarsel\n### Title: Variable selection for latent class analysis\n### Aliases: LCAvarsel print.LCAvarsel\n\n### ** Examples\n\n## Not run: \n##D # few simple examples\n##D data(carcinoma, package = \"poLCA\")\n##D sel1 <- LCAvarsel(carcinoma) # Fop et al. (2017) method with no swap step\n##D sel2 <- LCAvarsel(carcinoma, swap = TRUE) # Fop et al. (2017) method with swap step\n##D sel3 <- LCAvarsel(carcinoma, search = \"forward\", \n##D independence = TRUE) # Dean and Raftery(2010) method\n##D sel4 <- LCAvarsel(carcinoma, search = \"ga\") # stochastic evolutionary search\n##D \n##D # an example with a concomitant covariate \n##D data(election, package = \"poLCA\")\n##D elec <- election[, cbind(\"MORALG\", \"CARESG\", \"KNOWG\", \"LEADG\", \"DISHONG\", \"INTELG\",\n##D \"MORALB\", \"CARESB\", \"KNOWB\", \"LEADB\", \"DISHONB\", \"INTELB\")]\n##D party <- election$PARTY\n##D fit <- fitLCA(elec, G = 3, X = party)\n##D sel <- LCAvarsel(elec, G = 3, X = party, parallel = TRUE)\n##D pidmat <- cbind(1, 1:7)\n##D exb1 <- exp(pidmat %*% fit$coeff)\n##D exb2 <- exp(pidmat %*% sel$model$coeff)\n##D par(mfrow = c(1,2))\n##D matplot(1:7, ( cbind(1, exb1)/(1 + rowSums(exb1)) ),\n##D ylim = c(0,1), type = \"l\",\n##D main = \"Party ID as a predictor of candidate affinity class\",\n##D xlab = \"Party ID: strong Democratic (1) to strong Republican (7)\",\n##D ylab = \"Probability of latent class membership\", \n##D lwd = 2 , col = 1)\n##D matplot(1:7, ( cbind(1, exb2)/(1 + rowSums(exb2)) ),\n##D ylim = c(0,1), type = \"l\",\n##D main = \"Party ID as a predictor of candidate affinity class\",\n##D xlab = \"Party ID: strong Democratic (1) to strong Republican (7)\",\n##D ylab = \"Probability of latent class membership\", \n##D lwd = 2 , col = 1)\n##D # compare\n##D compareCluster(fit$class, sel$model$class)\n## End(Not run)\n\n\n"} {"package":"LCAvarsel","topic":"compareCluster","snippet":"### Name: compareCluster\n### Title: Clustering comparison criteria\n### Aliases: compareCluster\n\n### ** Examples\n\ncl1 <- sample(1:3, 100, replace = TRUE)\ncl2 <- sample(letters[1:4], 100, replace = TRUE)\ncompareCluster(cl1, cl2)\ncompareCluster(cl1, cl1) # perfect matching\n\n\n"} {"package":"LCAvarsel","topic":"controlLCA","snippet":"### Name: control-parameters\n### Title: Set control parameters for various purposes\n### Aliases: controlLCA controlReg controlGA\n\n### ** Examples\n\ndata(carcinoma, package = \"poLCA\")\n# increase number of replicates and decrease tolerance value\nfit <- fitLCA(carcinoma, ctrlLCA = controlLCA(nrep = 10, tol = 1e-07))\n\n\n"} {"package":"LCAvarsel","topic":"fitLCA","snippet":"### Name: fitLCA\n### Title: Latent class analysis model\n### Aliases: fitLCA print.fitLCA\n\n### ** Examples\n\ndata(gss82, package = \"poLCA\")\nmaxG(gss82, 1:7) # not all latent class models can be fitted\nfit <- fitLCA(gss82, G = 1:4)\n\n## Not run: \n##D # diminish tolerance and increase number of replicates\n##D fit2 <- fitLCA(gss82, G = 1:4, ctrlLCA = controlLCA(tol = 1e-06, nrep = 10))\n## End(Not run)\n\n# the example with a single covariate as in ?poLCA\ndata(election, package = \"poLCA\")\nelec <- election[, cbind(\"MORALG\", \"CARESG\", \"KNOWG\", \"LEADG\", \"DISHONG\", \"INTELG\",\n \"MORALB\", \"CARESB\", \"KNOWB\", \"LEADB\", \"DISHONB\", \"INTELB\")]\nparty <- election$PARTY\nfit <- fitLCA(elec, G = 3, X = party)\npidmat <- cbind(1, 1:7)\nexb <- exp(pidmat %*% fit$coeff)\nmatplot(1:7, ( cbind(1, exb)/(1 + rowSums(exb)) ),\n ylim = c(0,1), type = \"l\",\n main = \"Party ID as a predictor of candidate affinity class\",\n xlab = \"Party ID: strong Democratic (1) to strong Republican (7)\",\n ylab = \"Probability of latent class membership\", \n lwd = 2 , col = 1)\n\n\n"} {"package":"LCAvarsel","topic":"maxG","snippet":"### Name: maxG\n### Title: Maximum number of latent classes\n### Aliases: maxG\n\n### ** Examples\n\ndata(carcinoma, package = \"poLCA\")\nmaxG(carcinoma, 1:4)\nmaxG(carcinoma, 2:3)\nmaxG(carcinoma, 5) # the model is not identifiable\n\n\n"} {"package":"LLM","topic":"llm","snippet":"### Name: llm\n### Title: Create Logit Leaf Model\n### Aliases: llm\n\n### ** Examples\n\n## Load PimaIndiansDiabetes dataset from mlbench package\nif (requireNamespace(\"mlbench\", quietly = TRUE)) {\n library(\"mlbench\")\n}\ndata(\"PimaIndiansDiabetes\")\n## Split in training and test (2/3 - 1/3)\nidtrain <- c(sample(1:768,512))\nPimaTrain <-PimaIndiansDiabetes[idtrain,]\nPimatest <-PimaIndiansDiabetes[-idtrain,]\n## Create the LLM\nPima.llm <- llm(X = PimaTrain[,-c(9)],Y = PimaTrain$diabetes,\n threshold_pruning = 0.25,nbr_obs_leaf = 100)\n\n\n\n"} {"package":"LLM","topic":"llm.cv","snippet":"### Name: llm.cv\n### Title: Runs v-fold cross validation with LLM\n### Aliases: llm.cv\n\n### ** Examples\n\n## Load PimaIndiansDiabetes dataset from mlbench package\nif (requireNamespace(\"mlbench\", quietly = TRUE)) {\n library(\"mlbench\")\n}\ndata(\"PimaIndiansDiabetes\")\n## Create the LLM with 5-cv\nPima.llm <- llm.cv(X = PimaIndiansDiabetes[,-c(9)],Y = PimaIndiansDiabetes$diabetes, cv=5,\n threshold_pruning = 0.25,nbr_obs_leaf = 100)\n\n\n"} {"package":"LLM","topic":"predict.llm","snippet":"### Name: predict.llm\n### Title: Create Logit Leaf Model Prediction\n### Aliases: predict.llm\n\n### ** Examples\n\n## Load PimaIndiansDiabetes dataset from mlbench package\nif (requireNamespace(\"mlbench\", quietly = TRUE)) {\n library(\"mlbench\")\n}\ndata(\"PimaIndiansDiabetes\")\n## Split in training and test (2/3 - 1/3)\nidtrain <- c(sample(1:768,512))\nPimaTrain <-PimaIndiansDiabetes[idtrain,]\nPimatest <-PimaIndiansDiabetes[-idtrain,]\n## Create the LLM\nPima.llm <- llm(X = PimaTrain[,-c(9)],Y = PimaTrain$diabetes,\n threshold_pruning = 0.25,nbr_obs_leaf = 100)\n## Use the model on the test dataset to make a prediction\nPimaPrediction <- predict.llm(object = Pima.llm, X = Pimatest[,-c(9)])\n## Optionally add the dependent to calculate performance statistics such as AUC\n# PimaPrediction <- cbind(PimaPrediction, \"diabetes\" = Pimatest[,\"diabetes\"])\n\n\n"} {"package":"LLM","topic":"table.cat.llm.html","snippet":"### Name: table.cat.llm.html\n### Title: Create the HTML code for Logit Leaf Model visualization\n### Aliases: table.cat.llm.html\n\n### ** Examples\n\n## Load PimaIndiansDiabetes dataset from mlbench package\nif (requireNamespace(\"mlbench\", quietly = TRUE)) {\n library(\"mlbench\")\n}\ndata(\"PimaIndiansDiabetes\")\n## Split in training and test (2/3 - 1/3)\nidtrain <- c(sample(1:768,512))\nPimaTrain <- PimaIndiansDiabetes[idtrain,]\nPimatest <- PimaIndiansDiabetes[-idtrain,]\n## Create the LLM\nPima.llm <- llm(X = PimaTrain[,-c(9)],Y = PimaTrain$diabetes,\n threshold_pruning = 0.25,nbr_obs_leaf = 100)\n## Define the variable categories (note: the categories are only created for demonstration)\nvar_cat_df <- as.data.frame(cbind(names(PimaTrain[,-c(9)]),\nc(\"cat_a\",\"cat_a\",\"cat_a\",\"cat_a\",\"cat_b\",\"cat_b\",\"cat_b\",\"cat_b\")), stringsAsFactors = FALSE)\nnames(var_cat_df) <- c(\"iv\", \"cat\")\n## Save the output of the model to a html file\nPima.Viz <- table.cat.llm.html(object = Pima.llm,category_var_df= var_cat_df,\n headertext = \"This is an example of the LLM model\",\nfootertext = \"Enjoy the package!\")\n## Optionaly write it to your working directory\n# write(Pima.Viz, \"Visualization_LLM_on_PimaIndiansDiabetes.html\")\n\n\n"} {"package":"LLM","topic":"table.llm.html","snippet":"### Name: table.llm.html\n### Title: Create the HTML code for Logit Leaf Model visualization\n### Aliases: table.llm.html\n\n### ** Examples\n\n## Load PimaIndiansDiabetes dataset from mlbench package\nif (requireNamespace(\"mlbench\", quietly = TRUE)) {\n library(\"mlbench\")\n}\ndata(\"PimaIndiansDiabetes\")\n## Split in training and test (2/3 - 1/3)\nidtrain <- c(sample(1:768,512))\nPimaTrain <-PimaIndiansDiabetes[idtrain,]\nPimatest <-PimaIndiansDiabetes[-idtrain,]\n## Create the LLM\nPima.llm <- llm(X = PimaTrain[,-c(9)],Y = PimaTrain$diabetes,\n threshold_pruning = 0.25,nbr_obs_leaf = 100)\n## Save the output of the model to a html file\nPima.Viz <- table.llm.html(object = Pima.llm, headertext = \"This is an example of the LLM model\",\nfootertext = \"Enjoy the package!\")\n## Optionaly write it to your working directory\n# write(Pima.Viz, \"Visualization_LLM_on_PimaIndiansDiabetes.html\")\n\n\n"} {"package":"remotePARTS","topic":"print.remoteTS","snippet":"### Name: print.remoteTS\n### Title: S3 print method for remoteTS class\n### Aliases: print.remoteTS summary.remoteTS print.mapTS summary.mapTS\n### smry_funM smry_funV\n\n### ** Examples\n\n# simulate dummy data\n time.points = 9 # time series length\n map.width = 5 # square map width\n coords = expand.grid(x = 1:map.width, y = 1:map.width) # coordinate matrix\n ## create empty spatiotemporal variables:\n X <- matrix(NA, nrow = nrow(coords), ncol = time.points) # response\n Z <- matrix(NA, nrow = nrow(coords), ncol = time.points) # predictor\n # setup first time point:\n Z[, 1] <- .05*coords[,\"x\"] + .2*coords[,\"y\"]\n X[, 1] <- .5*Z[, 1] + rnorm(nrow(coords), 0, .05) #x at time t\n ## project through time:\n for(t in 2:time.points){\n Z[, t] <- Z[, t-1] + rnorm(map.width^2)\n X[, t] <- .2*X[, t-1] + .1*Z[, t] + .05*t + rnorm(nrow(coords), 0 , .25)\n }\n\n ## Pixel CLS\n tmp.df = data.frame(x = X[1, ], t = nrow(X), z = Z[1, ])\n CLS <- fitCLS(x ~ z, data = tmp.df)\n print(CLS)\n summary(CLS)\n residuals(CLS)\n coef(CLS)\n logLik(CLS)\n fitted(CLS)\n # plot(CLS) # doesn't work\n\n ## Pixel AR\n AR <- fitAR(x ~ z, data = tmp.df)\n print(AR)\n summary(AR)\n coef(AR)\n residuals(AR)\n logLik(AR)\n fitted(AR)\n # plot(AR) # doesn't work\n\n ## Map CLS\n CLS.map <- fitCLS_map(X, coords, y ~ Z, X.list = list(Z = Z), lag.x = 0, resids.only = TRUE)\n print(CLS.map)\n summary(CLS.map)\n residuals(CLS.map)\n # plot(CLS.map)# doesn't work\n\n CLS.map <- fitCLS_map(X, coords, y ~ Z, X.list = list(Z = Z), lag.x = 0, resids.only = FALSE)\n print(CLS.map)\n summary(CLS.map)\n coef(CLS.map)\n residuals(CLS.map)\n # logLik(CLS.map) # doesn't work\n fitted(CLS.map)\n # plot(CLS.map) # doesn't work\n\n ## Map AR\n AR.map <- fitAR_map(X, coords, y ~ Z, X.list = list(Z = Z), resids.only = TRUE)\n print(AR.map)\n summary(AR.map)\n residuals(AR.map)\n # plot(AR.map)# doesn't work\n\n AR.map <- fitAR_map(X, coords, y ~ Z, X.list = list(Z = Z), resids.only = FALSE)\n print(AR.map)\n summary(AR.map)\n coef(AR.map)\n residuals(AR.map)\n # logLik(AR.map) # doesn't work\n fitted(AR.map)\n # plot(AR.map) # doesn't work\n\n\n\n"} {"package":"remotePARTS","topic":"check_posdef","snippet":"### Name: check_posdef\n### Title: Check if a matrix is positive definite\n### Aliases: check_posdef\n\n### ** Examples\n\n\n# distance matrix\nM = distm_scaled(expand.grid(x = 1:3, y = 1:3))\n\n# check if it is positive definitive\ncheck_posdef(M)\n\n# check if the covariance matrix is positive definitive\ncheck_posdef(covar_exp(M, .1))\n\n# non-symmetric matrix\ncheck_posdef(matrix(1:9, 3, 3))\n\n# non-square matrix\ncheck_posdef(matrix(1:6, 3, 2))\n\n\n\n"} {"package":"remotePARTS","topic":"covar_taper","snippet":"### Name: covar_taper\n### Title: Tapered-spherical distance-based covariance function\n### Aliases: covar_taper covar_exp covar_exppow\n\n### ** Examples\n\n\n# simulate dummy data\nmap.width = 5 # square map width\ncoords = expand.grid(x = 1:map.width, y = 1:map.width) # coordinate matrix\n\n# calculate distance\nD = geosphere::distm(coords) # distance matrix\n\n# visualize covariance matrix\nimage(covar_taper(D, theta = .5*max(D)))\n\n# plot tapered covariance function\ncurve(covar_taper(x, theta = .5), from = 0, to = 1);abline(v = 0.5, lty = 2, col = \"grey80\")\n\n\n# visualize covariance matrix\nimage(covar_exp(D, range = .2*max(D)))\n\n# plot exponential function with different ranges\ncurve(covar_exp(x, range = .2), from = 0, to = 1)\ncurve(covar_exp(x, range = .1), from = 0, to = 1, col = \"blue\", add = TRUE)\nlegend(\"topright\", legend = c(\"range = 0.2\", \"range = 0.1\"), col = c(\"black\", \"blue\"), lty = 1)\n\n\n# visualize Exponential covariance matrix\nimage(covar_exppow(D, range = .2*max(D), shape = 1))\n\n# visualize Exponential-power covariance matrix\nimage(covar_exppow(D, range = .2*max(D), shape = .5))\n\n# plot exponential power function with different shapes\ncurve(covar_exppow(x, range = .2, shape = 1), from = 0, to = 1)\ncurve(covar_exppow(x, range = .2, shape = .5), from = 0, to = 1, col = \"blue\", add = TRUE)\nlegend(\"topright\", legend = c(\"shape = 1.0\", \"shape = 0.5\"), col = c(\"black\", \"blue\"), lty = 1)\n\n\n\n"} {"package":"remotePARTS","topic":"distm_km","snippet":"### Name: distm_km\n### Title: Calculate a distance matrix from coordinates\n### Aliases: distm_km distm_scaled\n\n### ** Examples\n\nmap.width = 3 # square map width\ncoords = expand.grid(x = 1:map.width, y = 1:map.width) # coordinate matrix\ndistm_scaled(coords) # calculate relative distance matrix\n\n\n\n"} {"package":"remotePARTS","topic":"fitAR","snippet":"### Name: fitAR\n### Title: AR regressions by REML\n### Aliases: fitAR AR_fun\n\n### ** Examples\n\n\n# simulate dummy data\nt = 1:30 # times series\nZ = rnorm(30) # random independent variable\nx = .2*Z + (.05*t) # generate dependent effects\nx[2:30] = x[2:30] + .2*x[1:29] # add autocorrelation\n\n# fit the AR model, using Z as a covariate\n(AR = fitAR(x ~ Z))\n\n# get specific components\nAR$residuals\nAR$coefficients\nAR$pval\n\n# now using time as a covariate\n(AR.time <- fitAR(x ~ t))\n\n# source variable from a dataframe\ndf = data.frame(y = x, t.scaled = t/30, Z = Z)\nfitAR(y ~ t.scaled + Z, data = df)\n\n## Methods\nsummary(AR)\nresiduals(AR)\ncoefficients(AR)\n\n\n\n"} {"package":"remotePARTS","topic":"fitAR_map","snippet":"### Name: fitAR_map\n### Title: Map-level AR REML\n### Aliases: fitAR_map\n\n### ** Examples\n\n# simulate dummy data\n time.points = 9 # time series length\n map.width = 5 # square map width\n coords = expand.grid(x = 1:map.width, y = 1:map.width) # coordinate matrix\n ## create empty spatiotemporal variables:\n X <- matrix(NA, nrow = nrow(coords), ncol = time.points) # response\n Z <- matrix(NA, nrow = nrow(coords), ncol = time.points) # predictor\n# setup first time point:\n Z[, 1] <- .05*coords[,\"x\"] + .2*coords[,\"y\"]\n X[, 1] <- .5*Z[, 1] + rnorm(nrow(coords), 0, .05) #x at time t\n ## project through time:\n for(t in 2:time.points){\n Z[, t] <- Z[, t-1] + rnorm(map.width^2)\n X[, t] <- .2*X[, t-1] + .1*Z[, t] + .05*t + rnorm(nrow(coords), 0 , .25)\n }\n\n# visualize dummy data (NOT RUN)\nlibrary(ggplot2);library(dplyr)\ndata.frame(coords, X) %>%\n reshape2::melt(id.vars = c(\"x\", \"y\")) %>%\n ggplot(aes(x = x, y = y, fill = value)) +\n geom_tile() +\n facet_wrap(~variable)\n\n# fit AR, showing all output\nfitAR_map(X, coords, formula = y ~ t, resids.only = TRUE)\n\n# fit AR with temporal and spatiotemporal predictors\n(AR.map <- fitAR_map(X, coords, formula = y ~ t + Z, X.list = list(t = 1:ncol(X),\n Z = Z), resids.only = FALSE))\n## extract some values\nAR.map$coefficients # coefficients\nAR.map$logLik # log-likelihoods\n\n## Methods\nsummary(AR.map)\nresiduals(AR.map)\ncoefficients(AR.map)\n\n\n\n"} {"package":"remotePARTS","topic":"fitCLS","snippet":"### Name: fitCLS\n### Title: CLS for time series\n### Aliases: fitCLS\n\n### ** Examples\n\n\n# simulate dummy data\nt = 1:30 # times series\nZ = rnorm(30) # random independent variable\nx = .2*Z + (.05*t) # generate dependent effects\nx[2:30] = x[2:30] + .2*x[1:29] # add autocorrelation\nx = x + rnorm(30, 0, .01)\ndf = data.frame(x, t, Z) # collect in data frame\n\n# fit a CLS model with previous x, t, and Z as predictors\n## note, this model does not follow the underlying process.\n### See below for a better fit.\n(CLS <- fitCLS(x ~ t + Z, data = df))\n\n# extract other values\nCLS$MSE #MSE\nCLS$logLik #log-likelihood\n\n# fit with no lag in independent variables (as simulated):\n(CLS2 <- fitCLS(x ~ t + Z, df, lag.x = 0))\nsummary(CLS2)\n\n# no lag in x\nfitCLS(x ~ t + Z, df, lag.y = 0)\n\n# visualize the lag\n## large lag in x\nfitCLS(x ~ t + Z, df, lag.y = 2, lag.x = 0, debug = TRUE)$lag\n## large lag in Z\nfitCLS(x ~ t + Z, df, lag.y = 0, lag.x = 2, debug = TRUE)$lag\n\n# # throws errors (NOT RUN)\n# fitCLS(x ~ t + Z, df, lag.y = 28) # longer lag than time\n# fitCLS(cbind(x, rnorm(30)) ~ t + Z, df) # matrix response\n\n## Methods\nsummary(CLS)\nresiduals(CLS)\n\n\n\n"} {"package":"remotePARTS","topic":"fitCLS_map","snippet":"### Name: fitCLS_map\n### Title: Map-level CLS for time series\n### Aliases: fitCLS_map\n\n### ** Examples\n\n\n# simulate dummy data\ntime.points = 9 # time series length\nmap.width = 5 # square map width\ncoords = expand.grid(x = 1:map.width, y = 1:map.width) # coordinate matrix\n## create empty spatiotemporal variables:\nX <- matrix(NA, nrow = nrow(coords), ncol = time.points) # response\nZ <- matrix(NA, nrow = nrow(coords), ncol = time.points) # predictor\n# setup first time point:\nZ[, 1] <- .05*coords[,\"x\"] + .2*coords[,\"y\"]\nX[, 1] <- .5*Z[, 1] + rnorm(nrow(coords), 0, .05) #x at time t\n## project through time:\nfor(t in 2:time.points){\n Z[, t] <- Z[, t-1] + rnorm(map.width^2)\n X[, t] <- .2*X[, t-1] + .1*Z[, t] + .05*t + rnorm(nrow(coords), 0 , .25)\n}\n\n# # visualize dummy data (NOT RUN)\n# library(ggplot2);library(dplyr)\n# data.frame(coords, X) %>%\n# reshape2::melt(id.vars = c(\"x\", \"y\")) %>%\n# ggplot(aes(x = x, y = y, fill = value)) +\n# geom_tile() +\n# facet_wrap(~variable)\n\n# fit CLS, showing all output\nfitCLS_map(X, coords, formula = y ~ t, resids.only = TRUE)\n\n# fit CLS with temporal and spatiotemporal predictors\n(CLS.map <- fitCLS_map(X, coords, formula = y ~ t + Z,\n X.list = list(t = 1:ncol(X), Z = Z),\n resids.only = FALSE))\n## extract some values\nCLS.map$coefficients # coefficients\nCLS.map$logLik # log-likelihoods\n\n## Methods\nsummary(CLS.map)\nresiduals(CLS.map)\ncoefficients(CLS.map)\n\n\n\n"} {"package":"remotePARTS","topic":"fitCor","snippet":"### Name: fitCor\n### Title: Estimate spatial parameters from time series residuals\n### Aliases: fitCor\n\n### ** Examples\n\n\n# simulate dummy data\nset.seed(19)\ntime.points = 30 # time series length\nmap.width = 8 # square map width\ncoords = expand.grid(x = 1:map.width, y = 1:map.width) # coordinate matrix\n\n## create empty spatiotemporal variables:\nX <- matrix(NA, nrow = nrow(coords), ncol = time.points) # response\nZ <- matrix(NA, nrow = nrow(coords), ncol = time.points) # predictor\n\n## setup first time point:\nZ[, 1] <- .05*coords[,\"x\"] + .2*coords[,\"y\"]\nX[, 1] <- .5*Z[, 1] + rnorm(nrow(coords), 0, .05) #x at time t\n\n## project through time:\nfor(t in 2:time.points){\n Z[, t] <- Z[, t-1] + rnorm(map.width^2)\n X[, t] <- .2*X[, t-1] + .1*Z[, t] + .05*t + rnorm(nrow(coords), 0 , .25)\n}\n\nAR.map = fitAR_map(X, coords, formula = y ~ Z, X.list = list(Z = Z), resids.only = FALSE)\n\n# using pre-defined covariance function\n## exponential covariance\nfitCor(AR.map$residuals, coords, covar_FUN = \"covar_exp\", start = list(range = .1))\n\n## exponential-power covariance\nfitCor(AR.map$residuals, coords, covar_FUN = \"covar_exppow\", start = list(range = .1, shape = .2))\n\n# user-specified covariance function\nfitCor(AR.map$residuals, coords, covar_FUN = function(d, r){d^r}, start = list(r = .1))\n\n# un-scaled distances:\nfitCor(AR.map$residuals, coords, distm_FUN = \"distm_km\", start = list(r = 106))\n\n# specify which pixels to use, for reproducibility\nfitCor(AR.map$residuals, coords, index = 1:64)$spcor #all\nfitCor(AR.map$residuals, coords, index = 1:20)$spcor #first 20\nfitCor(AR.map$residuals, coords, index = 21:64)$spcor # last 43\n# randomly select pixels\nfitCor(AR.map$residuals, coords, fit.n = 20)$spcor #random 20\nfitCor(AR.map$residuals, coords, fit.n = 20)$spcor # different random 20\n\n\n\n"} {"package":"remotePARTS","topic":"fitGLS","snippet":"### Name: fitGLS\n### Title: Fit a PARTS GLS model.\n### Aliases: fitGLS\n\n### ** Examples\n\n\n## read data\ndata(ndvi_AK10000)\ndf = ndvi_AK10000[seq_len(200), ] # first 200 rows\n\n## fit covariance matrix\nV = covar_exp(distm_scaled(cbind(df$lng, df$lat)), range = .01)\n\n## run GLS\n(GLS = fitGLS(CLS_coef ~ 0 + land, data = df, V = V))\n\n## with F-test calculations to compare with the NULL model\n(GLS.F = fitGLS(CLS_coef ~ 0 + land, data = df, V = V, no.F = FALSE))\n\n## find ML nugget\nfitGLS(CLS_coef ~ 0 + land, data = df, V = V, no.F = FALSE, nugget = NA)\n\n## calculate V internally\ncoords = cbind(df$lng, df$lat)\nfitGLS(CLS_coef ~ 0 + land, data = df, logLik.only = FALSE, coords = coords,\n distm_FUN = \"distm_scaled\", covar_FUN = \"covar_exp\", covar.pars = list(range = .01))\n\n## use inverse cholesky\nfitGLS(CLS_coef ~ 0 + land, data = df, invCholV = invert_chol(V))\n\n## save inverse cholesky matrix\ninvchol = fitGLS(CLS_coef ~ 0 + land, data = df, V = V, save.invchol = TRUE)$invcholV\n\n## re-use inverse cholesky instead of V\nfitGLS(CLS_coef ~ 0 + land, data = df, invCholV = invchol)\n\n## Log-likelihood (fast)\nfitGLS(CLS_coef ~ 0 + land, data = df, V = V, logLik.only = TRUE)\n\n\n\n"} {"package":"remotePARTS","topic":"fitGLS_opt","snippet":"### Name: fitGLS_opt\n### Title: Fit a PARTS GLS model, with maximum likelihood spatial\n### parameters\n### Aliases: fitGLS_opt\n\n### ** Examples\n\n## No test: \n## read data\ndata(ndvi_AK10000)\ndf = ndvi_AK10000[seq_len(200), ] # first 200 rows\n\n## estimate nugget and range (very slow)\nfitGLS_opt(formula = CLS_coef ~ 0 + land, data = df,\n coords = df[, c(\"lng\", \"lat\")], start = c(range = .1, nugget = 0),\n opt.only = TRUE)\n\n## estimate range only, fixed nugget at 0, and fit full GLS (slow)\nfitGLS_opt(formula = CLS_coef ~ 0 + land, data = df,\n coords = df[, c(\"lng\", \"lat\")],\n start = c(range = .1), fixed = c(\"nugget\" = 0),\n method = \"Brent\", lower = 0, upper = 1)\n\n## constrain nugget to 0 and 1\nlogit <- function(p) {log(p / (1 - p))}\ninv_logit <- function(l) {1 / (1 + exp(-l))}\n\nfitGLS_opt(formula = CLS_coef ~ 0 + land, data = df,\n coords = df[, c(\"lng\", \"lat\")],\n start = c(range = .1, nugget = 1e-10),\n trans = list(nugget = logit), backtrans = list(nugget = inv_logit),\n opt.only = TRUE)\n## End(No test)\n\n\n"} {"package":"remotePARTS","topic":"invert_chol","snippet":"### Name: invert_chol\n### Title: Invert the cholesky decomposition of V\n### Aliases: invert_chol\n\n### ** Examples\n\nM <- crossprod(matrix(1:6, 3))\n\n# without a nugget:\ninvert_chol(M)\n\n# with a nugget:\ninvert_chol(M, nugget = 0.2)\n\n\n"} {"package":"remotePARTS","topic":"MC_GLSpart","snippet":"### Name: MC_GLSpart\n### Title: fit a parallel partitioned GLS\n### Aliases: MC_GLSpart MCGLS_partsummary multicore_fitGLS_partition\n### fitGLS_partition part_data part_csv\n\n### ** Examples\n\n## read data\ndata(ndvi_AK10000)\ndf = ndvi_AK10000[seq_len(1000), ] # first 1000 rows\n\n## create partition matrix\npm = sample_partitions(nrow(df), npart = 3)\n\n## fit GLS with fixed nugget\npartGLS = fitGLS_partition(formula = CLS_coef ~ 0 + land, partmat = pm,\n data = df, nugget = 0, do.t.test = TRUE)\n\n## hypothesis tests\nchisqr(partGLS) # explanatory power of model\nt.test(partGLS) # significance of predictors\n\n## now with a numeric predictor\nfitGLS_partition(formula = CLS_coef ~ lat, partmat = pm, data = df, nugget = 0)\n\n## No test: \n## fit ML nugget for each partition (slow)\n(partGLS.opt = fitGLS_partition(formula = CLS_coef ~ 0 + land, partmat = pm,\n data = df, nugget = NA))\npartGLS.opt$part$nuggets # ML nuggets\n\n# Certain model structures may not be useful:\n## 0 intercept with numeric predictor (produces NAs) and gives a warning in statistical tests\nfitGLS_partition(formula = CLS_coef ~ 0 + lat, partmat = pm, data = df, nugget = 0)\n\n## intercept-only, gives warning\nfitGLS_partition(formula = CLS_coef ~ 1, partmat = pm, data = df, nugget = 0,\n do.chisqr.test = FALSE)\n## End(No test)\n## part_data examples\npart_data(1:20, CLS_coef ~ 0 + land, data = ndvi_AK10000)\n\n## No test: \n## part_csv examples - ## CAUTION: examples for part_csv() include manipulation side-effects:\n# first, create a .csv file from ndviAK\ndata(ndvi_AK10000)\nfile.path = file.path(tempdir(), \"ndviAK10000-remotePARTS.csv\")\nwrite.csv(ndvi_AK10000, file = file.path)\n\n# build a partition from the first 30 pixels in the file\npart_csv(1:20, formula = CLS_coef ~ 0 + land, file = file.path)\n\n# now with a random 20 pixels\npart_csv(sample(3000, 20), formula = CLS_coef ~ 0 + land, file = file.path)\n\n# remove the example csv file from disk\nfile.remove(file.path)\n## End(No test)\n\n\n\n"} {"package":"remotePARTS","topic":"sample_partitions","snippet":"### Name: sample_partitions\n### Title: Randomly sample a partition matrix for partitioned GLS\n### Aliases: sample_partitions\n\n### ** Examples\n\n# dummy data with 100 pixels and 20 time points\ndat.M <- matrix(rnorm(100*20), ncol = 20)\n\n# 4 partitions (exhaustive)\nsample_partitions(npix = nrow(dat.M), npart = 4)\n\n# partitions with 10 pixels each (exhaustive)\nsample_partitions(npix = nrow(dat.M), partsize = 10)\n\n# 4 partitions each with 10 pixels (non-exhaustive, produces warning)\nsample_partitions(npix = nrow(dat.M), npart = 4, partsize = 10)\n\n# index of 50 pixels to use as subset\nsub.indx <- c(1:10, 21:25, 30:62, 70:71)\n\n# 5 partitions (exhaustive) from only the specified pixel subset\nsample_partitions(npix = nrow(dat.M), npart = 5, pixels = sub.indx)\n\n\n\n"} {"package":"ddml","topic":"crosspred","snippet":"### Name: crosspred\n### Title: Cross-Predictions using Stacking.\n### Aliases: crosspred\n\n### ** Examples\n\n# Construct variables from the included Angrist & Evans (1998) data\ny = AE98[, \"worked\"]\nX = AE98[, c(\"morekids\", \"age\",\"agefst\",\"black\",\"hisp\",\"othrace\",\"educ\")]\n\n# Compute cross-predictions using stacking with base learners ols and lasso.\n# Two stacking approaches are simultaneously computed: Equally\n# weighted (ensemble_type = \"average\") and MSPE-minimizing with weights\n# in the unit simplex (ensemble_type = \"nnls1\"). Predictions for each\n# learner are also calculated.\ncrosspred_res <- crosspred(y, X,\n learners = list(list(fun = ols),\n list(fun = mdl_glmnet)),\n ensemble_type = c(\"average\",\n \"nnls1\",\n \"singlebest\"),\n compute_predictions_bylearner = TRUE,\n sample_folds = 2,\n cv_folds = 2,\n silent = TRUE)\ndim(crosspred_res$oos_fitted) # = length(y) by length(ensemble_type)\ndim(crosspred_res$oos_fitted_bylearner) # = length(y) by length(learners)\n\n\n"} {"package":"ddml","topic":"crossval","snippet":"### Name: crossval\n### Title: Estimator of the Mean Squared Prediction Error using\n### Cross-Validation.\n### Aliases: crossval\n\n### ** Examples\n\n# Construct variables from the included Angrist & Evans (1998) data\ny = AE98[, \"worked\"]\nX = AE98[, c(\"morekids\", \"age\",\"agefst\",\"black\",\"hisp\",\"othrace\",\"educ\")]\n\n# Compare ols, lasso, and ridge using 4-fold cross-validation\ncv_res <- crossval(y, X,\n learners = list(list(fun = ols),\n list(fun = mdl_glmnet),\n list(fun = mdl_glmnet,\n args = list(alpha = 0))),\n cv_folds = 4,\n silent = TRUE)\ncv_res$mspe\n\n\n"} {"package":"ddml","topic":"ddml_ate","snippet":"### Name: ddml_ate\n### Title: Estimator of the Average Treatment Effect.\n### Aliases: ddml_ate\n\n### ** Examples\n\n# Construct variables from the included Angrist & Evans (1998) data\ny = AE98[, \"worked\"]\nD = AE98[, \"morekids\"]\nX = AE98[, c(\"age\",\"agefst\",\"black\",\"hisp\",\"othrace\",\"educ\")]\n\n# Estimate the average treatment effect using a single base learner, ridge.\nate_fit <- ddml_ate(y, D, X,\n learners = list(what = mdl_glmnet,\n args = list(alpha = 0)),\n sample_folds = 2,\n silent = TRUE)\nsummary(ate_fit)\n\n# Estimate the average treatment effect using short-stacking with base\n# learners ols, lasso, and ridge.\nate_fit <- ddml_ate(y, D, X,\n learners = list(list(fun = ols),\n list(fun = mdl_glmnet),\n list(fun = mdl_glmnet,\n args = list(alpha = 0))),\n ensemble_type = 'nnls',\n shortstack = TRUE,\n sample_folds = 2,\n silent = TRUE)\nsummary(ate_fit)\n\n\n"} {"package":"ddml","topic":"ddml_fpliv","snippet":"### Name: ddml_fpliv\n### Title: Estimator for the Flexible Partially Linear IV Model.\n### Aliases: ddml_fpliv\n\n### ** Examples\n\n# Construct variables from the included Angrist & Evans (1998) data\ny = AE98[, \"worked\"]\nD = AE98[, \"morekids\"]\nZ = AE98[, \"samesex\", drop = FALSE]\nX = AE98[, c(\"age\",\"agefst\",\"black\",\"hisp\",\"othrace\",\"educ\")]\n\n# Estimate the partially linear IV model using a single base learner: Ridge.\nfpliv_fit <- ddml_fpliv(y, D, Z, X,\n learners = list(what = mdl_glmnet,\n args = list(alpha = 0)),\n sample_folds = 2,\n silent = TRUE)\nsummary(fpliv_fit)\n\n\n"} {"package":"ddml","topic":"ddml_late","snippet":"### Name: ddml_late\n### Title: Estimator of the Local Average Treatment Effect.\n### Aliases: ddml_late\n\n### ** Examples\n\n# Construct variables from the included Angrist & Evans (1998) data\ny = AE98[, \"worked\"]\nD = AE98[, \"morekids\"]\nZ = AE98[, \"samesex\"]\nX = AE98[, c(\"age\",\"agefst\",\"black\",\"hisp\",\"othrace\",\"educ\")]\n\n# Estimate the local average treatment effect using a single base learner,\n# ridge.\nlate_fit <- ddml_late(y, D, Z, X,\n learners = list(what = mdl_glmnet,\n args = list(alpha = 0)),\n sample_folds = 2,\n silent = TRUE)\nsummary(late_fit)\n\n# Estimate the local average treatment effect using short-stacking with base\n# learners ols, lasso, and ridge.\nlate_fit <- ddml_late(y, D, Z, X,\n learners = list(list(fun = ols),\n list(fun = mdl_glmnet),\n list(fun = mdl_glmnet,\n args = list(alpha = 0))),\n ensemble_type = 'nnls',\n shortstack = TRUE,\n sample_folds = 2,\n silent = TRUE)\nsummary(late_fit)\n\n\n"} {"package":"ddml","topic":"ddml_pliv","snippet":"### Name: ddml_pliv\n### Title: Estimator for the Partially Linear IV Model.\n### Aliases: ddml_pliv\n\n### ** Examples\n\n# Construct variables from the included Angrist & Evans (1998) data\ny = AE98[, \"worked\"]\nD = AE98[, \"morekids\"]\nZ = AE98[, \"samesex\"]\nX = AE98[, c(\"age\",\"agefst\",\"black\",\"hisp\",\"othrace\",\"educ\")]\n\n# Estimate the partially linear IV model using a single base learner, ridge.\npliv_fit <- ddml_pliv(y, D, Z, X,\n learners = list(what = mdl_glmnet,\n args = list(alpha = 0)),\n sample_folds = 2,\n silent = TRUE)\nsummary(pliv_fit)\n\n\n"} {"package":"ddml","topic":"ddml_plm","snippet":"### Name: ddml_plm\n### Title: Estimator for the Partially Linear Model.\n### Aliases: ddml_plm\n\n### ** Examples\n\n# Construct variables from the included Angrist & Evans (1998) data\ny = AE98[, \"worked\"]\nD = AE98[, \"morekids\"]\nX = AE98[, c(\"age\",\"agefst\",\"black\",\"hisp\",\"othrace\",\"educ\")]\n\n# Estimate the partially linear model using a single base learner, ridge.\nplm_fit <- ddml_plm(y, D, X,\n learners = list(what = mdl_glmnet,\n args = list(alpha = 0)),\n sample_folds = 2,\n silent = TRUE)\nsummary(plm_fit)\n\n# Estimate the partially linear model using short-stacking with base learners\n# ols, lasso, and ridge\nplm_fit <- ddml_plm(y, D, X,\n learners = list(list(fun = ols),\n list(fun = mdl_glmnet),\n list(fun = mdl_glmnet,\n args = list(alpha = 0))),\n ensemble_type = 'nnls',\n shortstack = TRUE,\n sample_folds = 2,\n silent = TRUE)\nsummary(plm_fit)\n\n\n"} {"package":"ddml","topic":"mdl_glmnet","snippet":"### Name: mdl_glmnet\n### Title: Wrapper for 'glmnet::glmnet()'.\n### Aliases: mdl_glmnet\n\n### ** Examples\n\nglmnet_fit <- mdl_glmnet(rnorm(100), matrix(rnorm(1000), 100, 10))\nclass(glmnet_fit)\n\n\n"} {"package":"ddml","topic":"mdl_ranger","snippet":"### Name: mdl_ranger\n### Title: Wrapper for 'ranger::ranger()'.\n### Aliases: mdl_ranger\n\n### ** Examples\n\nranger_fit <- mdl_ranger(rnorm(100), matrix(rnorm(1000), 100, 10))\nclass(ranger_fit)\n\n\n"} {"package":"ddml","topic":"mdl_xgboost","snippet":"### Name: mdl_xgboost\n### Title: Wrapper for 'xgboost::xgboost()'.\n### Aliases: mdl_xgboost\n\n### ** Examples\n\nxgboost_fit <- mdl_xgboost(rnorm(50), matrix(rnorm(150), 50, 3),\n nrounds = 1)\nclass(xgboost_fit)\n\n\n"} {"package":"ddml","topic":"ols","snippet":"### Name: ols\n### Title: Ordinary least squares.\n### Aliases: ols\n\n### ** Examples\n\nols_fit <- ols(rnorm(100), cbind(rnorm(100), rnorm(100)), const = TRUE)\nols_fit$coef\n\n\n"} {"package":"ddml","topic":"shortstacking","snippet":"### Name: shortstacking\n### Title: Predictions using Short-Stacking.\n### Aliases: shortstacking\n\n### ** Examples\n\n# Construct variables from the included Angrist & Evans (1998) data\ny = AE98[, \"worked\"]\nX = AE98[, c(\"morekids\", \"age\",\"agefst\",\"black\",\"hisp\",\"othrace\",\"educ\")]\n\n# Compute predictions using shortstacking with base learners ols and lasso.\n# Two stacking approaches are simultaneously computed: Equally\n# weighted (ensemble_type = \"average\") and MSPE-minimizing with weights\n# in the unit simplex (ensemble_type = \"nnls1\"). Predictions for each\n# learner are also calculated.\nshortstack_res <- shortstacking(y, X,\n learners = list(list(fun = ols),\n list(fun = mdl_glmnet)),\n ensemble_type = c(\"average\",\n \"nnls1\",\n \"singlebest\"),\n sample_folds = 2,\n silent = TRUE)\ndim(shortstack_res$oos_fitted) # = length(y) by length(ensemble_type)\ndim(shortstack_res$oos_fitted_bylearner) # = length(y) by length(learners)\n\n\n"} {"package":"ddml","topic":"summary.ddml_ate","snippet":"### Name: summary.ddml_ate\n### Title: Inference Methods for Treatment Effect Estimators.\n### Aliases: summary.ddml_ate summary.ddml_late\n\n### ** Examples\n\n# Construct variables from the included Angrist & Evans (1998) data\ny = AE98[, \"worked\"]\nD = AE98[, \"morekids\"]\nX = AE98[, c(\"age\",\"agefst\",\"black\",\"hisp\",\"othrace\",\"educ\")]\n\n# Estimate the average treatment effect using a single base learner, ridge.\nate_fit <- ddml_ate(y, D, X,\n learners = list(what = mdl_glmnet,\n args = list(alpha = 0)),\n sample_folds = 2,\n silent = TRUE)\nsummary(ate_fit)\n\n\n"} {"package":"ddml","topic":"summary.ddml_fpliv","snippet":"### Name: summary.ddml_fpliv\n### Title: Inference Methods for Partially Linear Estimators.\n### Aliases: summary.ddml_fpliv summary.ddml_pliv summary.ddml_plm\n\n### ** Examples\n\n# Construct variables from the included Angrist & Evans (1998) data\ny = AE98[, \"worked\"]\nD = AE98[, \"morekids\"]\nX = AE98[, c(\"age\",\"agefst\",\"black\",\"hisp\",\"othrace\",\"educ\")]\n\n# Estimate the partially linear model using a single base learner, ridge.\nplm_fit <- ddml_plm(y, D, X,\n learners = list(what = mdl_glmnet,\n args = list(alpha = 0)),\n sample_folds = 2,\n silent = TRUE)\nsummary(plm_fit)\n\n\n"} {"package":"quoradsR","topic":"fetch_quorads","snippet":"### Name: fetch_quorads\n### Title: fetch_quorads A function to fetch quora Ads data from the\n### windsor.ai API\n### Aliases: fetch_quorads\n\n### ** Examples\n\n## Not run: \n##D my_quorads_data <- fetch_quorads(api_key = \"your api key\",\n##D date_from = \"2022-10-01\",\n##D date_to = \"2022-10-02\",\n##D fields = c(\"campaign\", \"clicks\",\n##D \"spend\", \"impressions\", \"date\"))\n## End(Not run)\n\n\n"} {"package":"jjb","topic":"acc","snippet":"### Name: acc\n### Title: Accuracy of the Model\n### Aliases: acc\n\n### ** Examples\n\n# Set seed for reproducibility\nset.seed(100)\n\n# Generate data\nn = 1e2\n\ny = round(runif(n))\nyhat = round(runif(n))\n\n# Compute\no = acc(y, yhat)\n\n\n"} {"package":"jjb","topic":"celsius_to_fahrenheit","snippet":"### Name: celsius_to_fahrenheit\n### Title: Celsius to Fahrenheit Conversion\n### Aliases: celsius_to_fahrenheit\n\n### ** Examples\n\n\ncelsius_to_fahrenheit(33)\n\ncelsius_to_fahrenheit(0)\n\n\n"} {"package":"jjb","topic":"celsius_to_kelvin","snippet":"### Name: celsius_to_kelvin\n### Title: Celsius to Kelvin Conversion\n### Aliases: celsius_to_kelvin\n\n### ** Examples\n\ncelsius_to_kelvin(92)\n\ncelsius_to_kelvin(32)\n\n\n"} {"package":"jjb","topic":"char_at","snippet":"### Name: char_at\n### Title: Character at Position _i_\n### Aliases: char_at\n\n### ** Examples\n\n# Example string\ns = \"statistics\"\n\n# Single character\nchar_at(s, 1)\n\n# Vectorized position\nchar_at(s, c(2, 3))\n\n\n"} {"package":"jjb","topic":"circle_matrix","snippet":"### Name: circle_matrix\n### Title: Create a circle pattern within a matrix\n### Aliases: circle_matrix\n\n### ** Examples\n\n# Generate a basic circle matrix\ncircle_matrix(10, 10, 3, 4, 2)\n\n# Generate two circles within the matrix\ncircle_matrix(10, 20, c(3,6), c(4,6), c(2,2))\n\n# Different fills\ncircle_matrix(10, 20, c(3,6), c(4,6), c(2,2), f = c(1,2))\n\n\n"} {"package":"jjb","topic":"convert_cols","snippet":"### Name: convert_cols\n### Title: Convert Multiple Columns of a 'data.frame' All at once\n### conversion of a 'data.frame' from current column types to alternates.\n### Aliases: convert_cols\n\n### ** Examples\n\n\nn = 100\n\nst = sample(LETTERS, n, replace = TRUE)\nsr = sample(letters, n, replace = TRUE)\nnum = rnorm(n)\n\nd = data.frame(x = st, y = num, z = sr, stringsAsFactors = FALSE)\n\n# Convert all columns\n\no = convert_cols(d,c(\"f\", \"c\", \"f\"))\n\n# Convert a subset\nd[, c(1, 3)] = convert_cols(d[, c(1, 3)], c(\"f\", \"f\"))\n\n\n"} {"package":"jjb","topic":"external_graphs","snippet":"### Name: external_graphs\n### Title: Change Default Graphing Device from RStudio\n### Aliases: external_graphs\n\n### ** Examples\n\n## No test: \n# Turn on external graphs\nexternal_graphs()\n\n# Turn off external graphs\nexternal_graphs(FALSE)\n## End(No test)\n\n\n"} {"package":"jjb","topic":"fahrenheit_to_celsius","snippet":"### Name: fahrenheit_to_celsius\n### Title: Fahrenheit to Celsius Conversion\n### Aliases: fahrenheit_to_celsius\n\n### ** Examples\n\n\nfahrenheit_to_celsius(92)\n\nfahrenheit_to_celsius(32)\n\n\n"} {"package":"jjb","topic":"fahrenheit_to_kelvin","snippet":"### Name: fahrenheit_to_kelvin\n### Title: Fahrenheit to Kelvin to Conversion\n### Aliases: fahrenheit_to_kelvin\n\n### ** Examples\n\nfahrenheit_to_kelvin(92)\n\nfahrenheit_to_kelvin(32)\n\n\n"} {"package":"jjb","topic":"feature_scaling","snippet":"### Name: feature_scaling\n### Title: Feature Scaling\n### Aliases: feature_scaling feature_rescale feature_derescale feature_norm\n### feature_denorm feature_standardize feature_destandardize\n\n### ** Examples\n\n\n# Rescaling Features\ntemperatures = c(94.2, 88.1, 32, 0)\n\ntemp_min = min(temperatures)\ntemp_max = max(temperatures)\n\ntemperatures_norm = feature_rescale(temp_min, temp_max)\ntemperatures_denorm = feature_derescale(temperatures_norm, temp_min, temp_max)\n\nall.equal(temperatures, temperatures_denorm)\n \n# Norming Features\nx = 1:10\n\nx_norm = sqrt(sum(x^2))\n\nx_norm_std = feature_norm(x, x_norm)\n\nx_recover = feature_denorm(x_norm_std, x_norm)\nall.equal(x, x_recover)\n\n# Standardizing Features\nx = 1:10\n\nx_mean = mean(x)\nx_sd = sd(x)\n\nx_std = feature_standardize(x, x_mean, x_sd)\nx_recovery = feature_destandardize(x, x_mean, x_sd)\n\nall.equal(x, x_recovery)\n\n\n"} {"package":"jjb","topic":"floor_and_cap","snippet":"### Name: floor_and_cap\n### Title: Floor and Cap a Numeric Variable\n### Aliases: floor_and_cap\n\n### ** Examples\n\n\n# One case version\nn = 100\n\nx = rnorm(n)\n\nx[n - 1] = -99999\nx[n] = 10000\n\ny = floor_and_cap(x)\n\n# Dataset example\n\nd = data.frame(x, y = rnorm(n))\n\no = sapply(d, floor_and_cap)\n\n\n"} {"package":"jjb","topic":"int_to_hex","snippet":"### Name: int_to_hex\n### Title: Convert 0-255 to a Hex number\n### Aliases: int_to_hex\n\n### ** Examples\n\nint_to_hex(22)\n\n\n"} {"package":"jjb","topic":"is_rstudio","snippet":"### Name: is_rstudio\n### Title: Is R Open in RStudio?\n### Aliases: is_rstudio\n\n### ** Examples\n\nis_rstudio()\n\n\n"} {"package":"jjb","topic":"is_whole","snippet":"### Name: is_whole\n### Title: Integer Check\n### Aliases: is_whole\n\n### ** Examples\n\nis_whole(2.3)\nis_whole(4)\nis_whole(c(1,2,3))\nis_whole(c(.4,.5,.6))\nis_whole(c(7,.8,9))\n\n\n"} {"package":"jjb","topic":"kelvin_to_celsius","snippet":"### Name: kelvin_to_celsius\n### Title: Kelvin to Celsius Conversion\n### Aliases: kelvin_to_celsius\n\n### ** Examples\n\nkelvin_to_celsius(92)\n\nkelvin_to_celsius(32)\n\n\n"} {"package":"jjb","topic":"kelvin_to_fahrenheit","snippet":"### Name: kelvin_to_fahrenheit\n### Title: Kelvin to Fahrenheit Conversion\n### Aliases: kelvin_to_fahrenheit\n\n### ** Examples\n\nkelvin_to_fahrenheit(92)\n\nkelvin_to_fahrenheit(32)\n\n\n"} {"package":"jjb","topic":"lagged","snippet":"### Name: lagged\n### Title: Lag Vector Values\n### Aliases: lagged\n\n### ** Examples\n\nx = rnorm(10)\n\nlagged(x, 2)\n\n\n"} {"package":"jjb","topic":"max_n","snippet":"### Name: max_n\n### Title: Maxima and Minima _n_ elements\n### Aliases: max_n min_n\n\n### ** Examples\n\n\nx = 1:10\n\n# Defaults to traditional max\n# This is more costly to compute than using the regular max function.\nmax_n(x) \n\n# Retrieve top two observations (highest first)\nmax_n(x, 2)\n\n# Missing values have no effect on the sorting procedure\nx[9] = NA\nmax_n(x, 3)\n\n# Defaults to traditional min.\n# This is more costly to compute than using the regular min function.\nmin_n(x)\nmin(x)\n\n# Retrieve bottom two observations (lowest first)\nmin_n(x, 2)\n\n# Missing values have no effect on the sorting procedure\nx[2] = NA\nmin_n(x, 3)\n\n\n"} {"package":"jjb","topic":"mkdir","snippet":"### Name: mkdir\n### Title: Make Directory\n### Aliases: mkdir\n\n### ** Examples\n\n## No test: \n# Make directory from working directory\nmkdir(\"toad\")\n\n## This assumes the computer is on Windows and the C drive exists.\n# Make directory from absolute path\nmkdir(\"C:/path/to/dir/toad\")\n## End(No test)\n\n\n"} {"package":"jjb","topic":"mse","snippet":"### Name: mse\n### Title: Mean Squared Error (MSE)\n### Aliases: mse\n\n### ** Examples\n\n# Set seed for reproducibility\nset.seed(100)\n\n# Generate data\nn = 1e2\n\ny = rnorm(n)\nyhat = rnorm(n, 0.5)\n\n# Compute\no = mse(y, yhat)\n\n\n"} {"package":"jjb","topic":"pad_number","snippet":"### Name: pad_number\n### Title: Pad Numeric Numbers\n### Aliases: pad_number\n\n### ** Examples\n\n# Padding applied\npad_number(8:10)\n\n# No padding applied\npad_number(2:3)\n\n# Pads non-negative number with 0.\n# This needs to be improved slightly...\npad_number(-1:1)\n\n\n"} {"package":"jjb","topic":"rgb_to_hex","snippet":"### Name: rgb_to_hex\n### Title: Convert RGB Value to Hexadecimal\n### Aliases: rgb_to_hex\n\n### ** Examples\n\n# Hexadecimal with pound sign\nrgb_to_hex(255,255,255)\n\n# Heaxadecimal without pound sign\nrgb_to_hex(255,255,255,FALSE)\n\n\n"} {"package":"jjb","topic":"rmse","snippet":"### Name: rmse\n### Title: Root Mean Squared Error (RMSE)\n### Aliases: rmse\n\n### ** Examples\n\n# Set seed for reproducibility\nset.seed(100)\n\n# Generate data\nn = 1e2\n\ny = rnorm(n)\nyhat = rnorm(n, 0.5)\n\n# Compute\no = mse(y, yhat)\n\n\n"} {"package":"jjb","topic":"shade","snippet":"### Name: shade\n### Title: Shade an RGB value\n### Aliases: shade\n\n### ** Examples\n\nshade(c(22, 150, 230), shade_factor = 0.5)\n\n\n"} {"package":"jjb","topic":"system_graphic_driver","snippet":"### Name: system_graphic_driver\n### Title: Natural Graphics Driver for Operating System\n### Aliases: system_graphic_driver\n\n### ** Examples\n\n# Returns a string depending on test platform\nsystem_graphic_driver()\n\n\n"} {"package":"jjb","topic":"tint","snippet":"### Name: tint\n### Title: Tint an RGB value\n### Aliases: tint\n\n### ** Examples\n\ntint(c(22, 150, 230), tint_factor = 0.5)\n\n\n"} {"package":"jjb","topic":"tr","snippet":"### Name: tr\n### Title: Obtain the Trace of a Square Matrix\n### Aliases: tr\n\n### ** Examples\n\n# I_2 matrix\ntr(diag(2))\n\n\n"} {"package":"jjb","topic":"url_title","snippet":"### Name: url_title\n### Title: Create a \"safe\" url title\n### Aliases: url_title\n\n### ** Examples\n\nurl_title(\"My Name is Jaime!\")\n\n\n"} {"package":"minidown","topic":"download_rmd_button","snippet":"### Name: download_rmd_button\n### Title: Generate an HTML widget to download input Rmd file\n### Aliases: download_rmd_button\n\n### ** Examples\n\nset.seed(1L)\n\ninput <- tempfile()\nwriteLines(\"\", input)\ndownload_rmd_button(input)\n\n# Requires zip command\nif (interactive()) {\n input <- tempdir()\n download_rmd_button(input, embed = xfun::embed_dir)\n}\n\n\n"} {"package":"minidown","topic":"mini_document","snippet":"### Name: mini_document\n### Title: Convert to an HTML document powered by the lightweight CSS\n### framework.\n### Aliases: mini_document\n\n### ** Examples\n\n## Not run: \n##D library(rmarkdown)\n##D library(minidown)\n##D render(\"input.Rmd\", mini_document)\n## End(Not run)\n\n\n"} {"package":"deseats","topic":"BV4.1","snippet":"### Name: BV4.1\n### Title: Trend and Seasonality Estimation Using the Berlin Procedure 4.1\n### Aliases: BV4.1\n\n### ** Examples\n\n\nXt <- log(EXPENDITURES)\nest <- BV4.1(Xt)\nest\n\n\n\n"} {"package":"deseats","topic":"animate,deseats-method","snippet":"### Name: animate,deseats-method\n### Title: Animate Locally Weighted Regression Results\n### Aliases: animate,deseats-method\n\n### ** Examples\n\n## No test: \n### Creating the animation might take a while\nXt <- log(EXPENDITURES)\nsmoothing_options <- set_options(order_poly = 3)\nest <- deseats(Xt, smoothing_options = smoothing_options)\nanimate(est)\n## End(No test)\n\n\n\n"} {"package":"deseats","topic":"arma_to_ar","snippet":"### Name: arma_to_ar\n### Title: AR Representation of an ARMA Model\n### Aliases: arma_to_ar\n\n### ** Examples\n\nar <- c(1.2, -0.4)\nma <- c(0.5)\narma_to_ar(ar = ar, ma = ma, max_i = 100)\n\n\n\n"} {"package":"deseats","topic":"arma_to_ma","snippet":"### Name: arma_to_ma\n### Title: MA Representation of an ARMA Model\n### Aliases: arma_to_ma\n\n### ** Examples\n\nar <- c(1.2, -0.4)\nma <- c(0.5)\narma_to_ma(ar = ar, ma = ma, max_i = 100)\n\n\n\n"} {"package":"deseats","topic":"autoplot,decomp-method","snippet":"### Name: autoplot,decomp-method\n### Title: Plot Method for Decomposition Results in the Style of ggplot2\n### Aliases: autoplot,decomp-method\n\n### ** Examples\n\n## No test: \nXt <- log(EXPENDITURES)\nest <- deseats(Xt)\nautoplot(est, which = 3)\n## End(No test)\n\n\n\n"} {"package":"deseats","topic":"autoplot,deseats_fc-method","snippet":"### Name: autoplot,deseats_fc-method\n### Title: 'ggplot2' Plot Method for Class '\"deseats_fc\"'\n### Aliases: autoplot,deseats_fc-method\n\n### ** Examples\n\n## No test: \nest <- s_semiarma(log(EXPENDITURES))\nfc <- predict(est, n.ahead = 4)\nfc_e <- expo(fc)\nautoplot(fc_e)\n## End(No test)\n\n\n\n"} {"package":"deseats","topic":"autoplot,hfilter-method","snippet":"### Name: autoplot,hfilter-method\n### Title: 'ggplot2' Plot Method for the Results of a Hamilton Filter\n### Aliases: autoplot,hfilter-method\n\n### ** Examples\n\nest <- hamilton_filter(log(EXPENDITURES))\nautoplot(est, which = 3, col = c(1, 6))\nautoplot(est, which = 4)\n\n\n\n"} {"package":"deseats","topic":"bwidth,deseats-method","snippet":"### Name: bwidth,deseats-method\n### Title: Retrieve the Used Bandwidth from an Estimation Object\n### Aliases: bwidth,deseats-method bwidth,s_semiarma-method\n\n### ** Examples\n\n## No test: \nXt <- log(EXPENDITURES)\nsmoothing_options <- set_options(order_poly = 3)\nest <- deseats(Xt, smoothing_options = smoothing_options)\nbwidth(est)\n## End(No test)\n\n\n\n"} {"package":"deseats","topic":"bwidth_confint","snippet":"### Name: bwidth_confint\n### Title: Bootstrapping Confidence Intervals for Locally Weighted\n### Regression Bandwidths\n### Aliases: bwidth_confint\n\n### ** Examples\n\n## No test: \nxt <- log(EXPENDITURES)\nest <- deseats(xt, set_options(order_poly = 3))\nconf <- bwidth_confint(est, npaths = 200, num_cores = 2)\nconf\n## End(No test)\n\n\n\n"} {"package":"deseats","topic":"create.gain","snippet":"### Name: create.gain\n### Title: Create Gain Function from a Linear Time Series Filter\n### Aliases: create.gain\n\n### ** Examples\n\n\n# Moving average with smoothing over three values\na <- 1 / 3\ngain_ma <- create.gain(rep(a, 3))\nlambda <- seq(0, 0.5, 0.001)\nGF <- gain_ma(lambda)\nplot(lambda, GF, type = \"l\")\n\n\n\n# First differences filter\nb <- c(1, -1)\ngain_diff <- create.gain(b)\nlambda <- seq(0, 0.5, 0.001)\nGF2 <- gain_diff(lambda)\nplot(lambda, GF2, type = \"l\")\n\n## No test: \n# For a fully data-driven local linear trend + \n# trigonometric polynomial seasonality\n# (Note: we get various filters for different observation time points)\n\nxt <- EXPENDITURES\nest <- deseats(log(xt), set_options(order_poly = 3))\nws <- est@weights[, , \"Combined\"]\nl <- (length(ws[, 1]) - 1) / 2\n\nlambda <- seq(0, 0.5, 0.001)\nmat <- matrix(0, ncol = length(lambda), nrow = l + 1)\ncolF <- colorRampPalette(c(\"deepskyblue4\", \"deepskyblue\"))\ncols <- colF(l)\n\nfor (j in 1:(l + 1)) {\n\n gainF <- create.gain(ws[j, ], zero.at = j)\n mat[j, ] <- gainF(lambda)\n\n}\n\nmatplot(lambda, t(mat), type = paste0(rep(\"l\", l + 1), collapse = \"\"),\n lty = rep(1, l + 1), col = cols)\ntitle(\n main = paste0(\n \"Gain functions for the applied data-driven locally weighted \",\n \"regression\\napproach at boundary points and the first interior \",\n \"point\"\n )\n)\n\n# Same example as before but not for the trend but for the detrending filters\n# (Note: we get various filters for different observation time points)\n\nll <- l * 2 + 1\nmat2 <- mat\n\nfor (j in 1:(l + 1)) {\n\n zero.vec <- rep(0, ll)\n zero.vec[[j]] <- 1\n gainF <- create.gain(zero.vec - ws[j, ], zero.at = j)\n mat2[j, ] <- gainF(lambda)\n\n}\n\nmatplot(lambda, t(mat2), type = paste0(rep(\"l\", l + 1), collapse = \"\"),\n lty = rep(1, l + 1), col = cols)\ntitle(\n main = paste0(\n \"Gain functions for the applied data-driven detrending filter\\n\",\n \"at boundary points and the first interior \",\n \"point\"\n )\n)\n## End(No test)\n\n\n\n"} {"package":"deseats","topic":"deseats","snippet":"### Name: deseats\n### Title: Locally Weighted Regression for Trend and Seasonality in\n### Equidistant Time Series under Short Memory\n### Aliases: deseats\n\n### ** Examples\n\n## No test: \nXt <- log(EXPENDITURES)\nsmoothing_options <- set_options(order_poly = 3)\nest <- deseats(Xt, smoothing_options = smoothing_options)\nest\nplot(est, which = 1)\n## End(No test)\n\n\n\n"} {"package":"deseats","topic":"expo,deseats_fc-method","snippet":"### Name: expo,deseats_fc-method\n### Title: Exponentiate 'deseats' Forecasts\n### Aliases: expo,deseats_fc-method\n\n### ** Examples\n\n## No test: \nest <- s_semiarma(log(EXPENDITURES), set_options(order_poly = 3))\nfc <- predict(est, n.ahead = 8)\nfc2 <- expo(fc)\nfc2\n## End(No test)\n\n\n\n"} {"package":"deseats","topic":"fitted,hfilter-method","snippet":"### Name: fitted,hfilter-method\n### Title: Fitted Components of the Hamilton Filter\n### Aliases: fitted,hfilter-method residuals,hfilter-method\n\n### ** Examples\n\nest <- hamilton_filter(log(EXPENDITURES))\nresiduals(est)\nfitted(est)\n\n\n\n"} {"package":"deseats","topic":"gain,deseats-method","snippet":"### Name: gain,deseats-method\n### Title: Obtain gain function values for DeSeaTS Trend and Detrend\n### Filters\n### Aliases: gain,deseats-method\n\n### ** Examples\n\n## No test: \nxt <- log(EXPENDITURES)\nest <- deseats(xt)\n\nlambda <- seq(0, 0.5, 0.01)\ngain_values <- gain(est, lambda = lambda)\nm <- length(gain_values$gain_trend[, 1])\nk <- (m - 1) / 2\ncolF <- colorRampPalette(c(\"deepskyblue4\", \"deepskyblue\"))\ncols <- colF(m)\n\nmatplot(lambda, t(gain_values$gain_decomb[1:(k + 1), ]), \n type = paste0(rep(\"l\", k + 1), collapse = \"\"),\n col = cols, lty = rep(1, k + 1))\ntitle(\"Gain functions of the combined detrend and deseasonalization filters\")\n\nmatplot(lambda, t(gain_values$gain_trend[1:(k + 1), ]), \n type = paste0(rep(\"l\", k + 1), collapse = \"\"),\n col = cols, lty = rep(1, k + 1))\ntitle(\"Gain functions of the trend filters\")\n\nmatplot(lambda, t(gain_values$gain_deseason[1:(k + 1), ]), \n type = paste0(rep(\"l\", k + 1), collapse = \"\"),\n col = cols, lty = rep(1, k + 1))\ntitle(\"Gain functions of the seasonal adjustment filters\")\n## End(No test) \n\n\n\n"} {"package":"deseats","topic":"hA_calc","snippet":"### Name: hA_calc\n### Title: Calculation of Theoretically Optimal Bandwidth and Its\n### Components\n### Aliases: hA_calc\n\n### ** Examples\n\n## No test: \narma <- list(ar = 0.8, sd_e = 0.01)\nm_f <- expression(13.1 + 3.1 * x + (dnorm(x / 0.15 - 0.5 / 0.15) / 0.15) / 4)\nn <- 500\np <- 1\nmu <- 1\nfrequ <- 4\ncb <- 0.05\n\nhA_calc(\n m = m_f,\n arma = arma, \n p = p,\n mu = mu,\n frequ = frequ,\n n = n,\n cb = cb\n)\n\nt <- 1:n\nxt <- t / n\nmxt <- 13.1 + 3.1 * xt + dnorm(xt, mean = 0.5, sd = 0.15) / 4\n\nS2 <- rep(c(0, 1, 0, 0), length.out = n)\nS3 <- rep(c(0, 0, 1, 0), length.out = n)\nS4 <- rep(c(0, 0, 0, 1), length.out = n)\nsxt <- -0.5 + 0.25 * S2 + 0.5 * S3 + 1.25 * S4\n\nset.seed(123)\net <- arima.sim(model = list(ar = 0.8), sd = 0.01, n = n)\nyt <- ts(mxt + sxt + et, frequency = frequ)\nplot(yt)\n\nest <- deseats(yt)\nest@bwidth\nest@sum_autocov\n## End(No test)\n\n\n\n"} {"package":"deseats","topic":"hamilton_filter","snippet":"### Name: hamilton_filter\n### Title: Time Series Filtering Using the Hamilton Filter\n### Aliases: hamilton_filter\n\n### ** Examples\n\nest <- hamilton_filter(log(EXPENDITURES))\nest\n\n\n\n"} {"package":"deseats","topic":"llin_decomp","snippet":"### Name: llin_decomp\n### Title: Decomposition of Time Series Using Local Linear Regression\n### Aliases: llin_decomp\n\n### ** Examples\n\nest <- llin_decomp(log(EXPENDITURES), bwidth_trend = 4, bwidth_season = 28)\nest\n\n\n\n"} {"package":"deseats","topic":"lm_decomp","snippet":"### Name: lm_decomp\n### Title: Decomposition of Time Series Using Linear Regression\n### Aliases: lm_decomp\n\n### ** Examples\n\nest <- lm_decomp(log(EXPENDITURES), order_poly = 3, order_poly_s = 2)\nest\n\n\n\n"} {"package":"deseats","topic":"ma_decomp","snippet":"### Name: ma_decomp\n### Title: Decomposition of Time Series Using Moving Averages\n### Aliases: ma_decomp\n\n### ** Examples\n\nest <- ma_decomp(log(EXPENDITURES), k_trend = 6, k_season = 7)\nest\n\n\n\n"} {"package":"deseats","topic":"measures","snippet":"### Name: measures\n### Title: Forecasting Accuracy Measure Calculation\n### Aliases: measures\n\n### ** Examples\n\n## No test: \nxt <- EXPENDITURES\nxt_in <- window(xt, end = c(2017, 4))\nyt <- log(xt_in)\nest <- s_semiarma(yt, set_options(order_poly = 3), inflation_rate = \"optimal\")\nfc_results <- predict(est, n.ahead = 8, expo = TRUE)\npoint_fc <- fc_results@pred\nmeasures(point_fc, xt)\n## End(No test)\n\n\n\n"} {"package":"deseats","topic":"order_poly,smoothing_options-method","snippet":"### Name: order_poly,smoothing_options-method\n### Title: Retrieve or Set Smoothing Options\n### Aliases: order_poly,smoothing_options-method\n### order_poly<-,smoothing_options-method season,smoothing_options-method\n### season<-,smoothing_options-method kernel_fun,smoothing_options-method\n### kernel_fun<-,smoothing_options-method bwidth,smoothing_options-method\n### bwidth<-,smoothing_options-method\n### boundary_method,smoothing_options-method\n### boundary_method<-,smoothing_options-method\n\n### ** Examples\n\nopts <- set_options()\nopts\norder_poly(opts)\norder_poly(opts) <- 3\nopts\n\n\n\n"} {"package":"deseats","topic":"plot,decomp-method","snippet":"### Name: plot,decomp-method\n### Title: Plot Method for Decomposition Results in the Style of Base R\n### Plots\n### Aliases: plot,decomp-method\n\n### ** Examples\n\nXt <- log(EXPENDITURES)\nest <- deseats(Xt)\nplot(est, which = 3)\n\n\n\n"} {"package":"deseats","topic":"plot,deseats_fc-method","snippet":"### Name: plot,deseats_fc-method\n### Title: Plot Method for Class '\"deseats_fc\"'\n### Aliases: plot,deseats_fc-method\n\n### ** Examples\n\n## No test: \nest <- s_semiarma(log(EXPENDITURES))\nfc <- predict(est, n.ahead = 4)\nfc_e <- expo(fc)\nplot(fc_e)\n## End(No test)\n\n\n\n"} {"package":"deseats","topic":"plot,hfilter-method","snippet":"### Name: plot,hfilter-method\n### Title: Plot Method for the Results of a Hamilton Filter\n### Aliases: plot,hfilter-method\n\n### ** Examples\n\nest <- hamilton_filter(log(EXPENDITURES))\nplot(est, which = 3, col = c(1, 6))\nplot(est, which = 4)\n\n\n\n"} {"package":"deseats","topic":"predict,s_semiarma-method","snippet":"### Name: predict,s_semiarma-method\n### Title: Point and Interval Forecasts for Seasonal Semi-ARMA Models\n### Aliases: predict,s_semiarma-method\n\n### ** Examples\n\n## No test: \nxt <- log(EXPENDITURES)\nest <- s_semiarma(xt)\npredict(est, n.ahead = 10)\n## End(No test)\n\n\n\n"} {"package":"deseats","topic":"read_ts","snippet":"### Name: read_ts\n### Title: Read in a Dataset Directly as an Object of Class '\"ts\"' or\n### '\"mts\"'\n### Aliases: read_ts\n\n### ** Examples\n\n## No test: \n### Create an example data file\na <- 1:12\nb <- 21:32\ntp <- seq(from = as.Date(\"2020-01-01\"), to = as.Date(\"2020-12-01\"), by = \"month\")\ndf <- data.frame(\n Time = tp,\n a = a,\n b = b\n)\n\nfile <- paste0(tempdir(), \"\\\\ExampleFile.csv\")\n\nwrite.table(df, file = file, quote = FALSE, sep = \",\",\n row.names = FALSE, col.names = TRUE)\n \n### Use the function to read in the data\nxt <- read_ts(file)\nxt\n## End(No test)\n\n\n\n"} {"package":"deseats","topic":"s_semiarma","snippet":"### Name: s_semiarma\n### Title: Fitting of a Seasonal Semiparametric ARMA Model\n### Aliases: s_semiarma\n\n### ** Examples\n\n## No test: \nXt <- log(EXPENDITURES)\nest <- s_semiarma(Xt)\nest\n## End(No test)\n\n\n\n"} {"package":"deseats","topic":"select_bwidth","snippet":"### Name: select_bwidth\n### Title: Optimal Bandwidth Estimation for Locally Weighted Regression in\n### Equidistant Time Series under Short Memory\n### Aliases: select_bwidth\n\n### ** Examples\n\nXt <- log(EXPENDITURES)\nselect_bwidth(Xt)\n\n\n\n"} {"package":"deseats","topic":"show,smoothing_options-method","snippet":"### Name: show,smoothing_options-method\n### Title: Show Method for Smoothing Options\n### Aliases: show,smoothing_options-method\n\n### ** Examples\n\nopts <- set_options()\nopts\n\n\n\n"} {"package":"deseats","topic":"trend,decomp-method","snippet":"### Name: trend,decomp-method\n### Title: Obtain Individual Components of a Decomposed Time Series\n### Aliases: trend,decomp-method season,decomp-method fitted,decomp-method\n### residuals,decomp-method deseasonalize,decomp-method\n### detrend,decomp-method\n\n### ** Examples\n\n## No test: \nXt <- log(EXPENDITURES)\nsmoothing_options <- set_options(order_poly = 3)\nest <- deseats(Xt, smoothing_options = smoothing_options)\ntrend_e <- trend(est) # Trend estimates\nseason_e <- season(est) # Seasonality estimates\ntrend_season_e <- fitted(est) # Trend + seasonality estimates\nresid_e <- residuals(est) # Residuals (observ. - trend - seasonality)\nts_adj <- deseasonalize(est) # Seasonally adjusted series\nts_notrend <- detrend(est) # Detrended series\n## End(No test)\n\n\n\n"} {"package":"deseats","topic":"zoo_to_ts","snippet":"### Name: zoo_to_ts\n### Title: Time Series Object Conversion from '\"zoo\"' to '\"ts\"'\n### Aliases: zoo_to_ts\n\n### ** Examples\n\n# Create example zoo-object\ntp <- seq(from = as.Date(\"2020-01-01\"), to = as.Date(\"2020-10-01\"), by = \"month\")\nxt <- zoo::zoo(1:10, order.by = tp)\nxt\n\n# Transform into ts-object\nyt <- zoo_to_ts(xt)\nyt\n\n\n\n"} {"package":"multgee","topic":"LORgee_control","snippet":"### Name: LORgee_control\n### Title: Control For The GEE Solver\n### Aliases: LORgee_control\n\n### ** Examples\n\ndata(arthritis)\nfitmod <- ordLORgee(y ~ factor(trt) + factor(baseline) + factor(time),\n data = arthritis, id = id, repeated = time)\n\n## A one-step GEE estimator\nfitmod1 <- update(fitmod, control = LORgee_control(maxiter = 1))\ncoef(fitmod)\ncoef(fitmod1)\n\n\n\n\n"} {"package":"multgee","topic":"arthritis","snippet":"### Name: arthritis\n### Title: Rheumatoid Arthritis Clinical Trial\n### Aliases: arthritis\n### Keywords: datasets\n\n### ** Examples\n\ndata(arthritis)\nstr(arthritis)\n\n\n"} {"package":"multgee","topic":"confint.LORgee","snippet":"### Name: confint.LORgee\n### Title: Confidence Intervals for Model Parameters\n### Aliases: confint.LORgee confint\n\n### ** Examples\n\nfitmod <- ordLORgee(formula = y ~ factor(time) + factor(trt) + factor(baseline),\n data = arthritis, id = id, LORstr = \"uniform\", repeated = time)\nconfint(fitmod)\n\n\n\n"} {"package":"multgee","topic":"gee_criteria","snippet":"### Name: gee_criteria\n### Title: Variable and Covariance Selection Criteria\n### Aliases: gee_criteria\n\n### ** Examples\n\ndata(arthritis)\nfitmod <- ordLORgee(formula = y ~ factor(time) + factor(trt) + factor(baseline),\ndata = arthritis, id = id, repeated = time, LORstr = \"uniform\")\nfitmod1 <- update(fitmod, formula = .~. + age + factor(sex))\ngee_criteria(fitmod, fitmod1)\n\n\n"} {"package":"multgee","topic":"housing","snippet":"### Name: housing\n### Title: Homeless Data\n### Aliases: housing\n### Keywords: datasets\n\n### ** Examples\n\ndata(housing)\nstr(housing)\n\n\n"} {"package":"multgee","topic":"intrinsic.pars","snippet":"### Name: intrinsic.pars\n### Title: Intrinsic Parameters Estimation\n### Aliases: intrinsic.pars\n\n### ** Examples\n\ndata(arthritis)\nintrinsic.pars(y, arthritis, id, time, rscale = \"ordinal\")\n## The intrinsic parameters do not vary much. The 'uniform' local odds ratios\n## structure might be a good approximation for the association pattern.\n\nset.seed(1)\ndata(housing)\nintrinsic.pars(y, housing, id, time, rscale = \"nominal\")\n## The intrinsic parameters vary. The 'RC' local odds ratios structure\n## might be a good approximation for the association pattern.\n\n\n\n"} {"package":"multgee","topic":"matrixLOR","snippet":"### Name: matrixLOR\n### Title: Creating A Probability Matrix With Specified Local Odds Ratios\n### Aliases: matrixLOR\n\n### ** Examples\n\n## Illustrating the construction of a \"fixed\" local odds ratios structure\n## using the arthritis dataset. Here, we assume a uniform local odds ratios\n## structure equal to 2 for each time pair.\n\n## Create the uniform local odds ratios structure.\nlorterm <- matrixLOR(matrix(2, 4, 4))\n\n## Create the LORterm argument.\nlorterm <- c(lorterm)\nlorterm <- matrix(c(lorterm), 3, 25, TRUE)\n\n## Fit the marginal model.\ndata(arthritis)\nfitmod <- ordLORgee(y ~ factor(trt) + factor(time) + factor(baseline),\n data = arthritis, id = id, repeated = time, LORstr = \"fixed\",\n LORterm = lorterm)\nfitmod\n\n\n\n"} {"package":"multgee","topic":"nomLORgee","snippet":"### Name: nomLORgee\n### Title: Marginal Models For Correlated Nominal Multinomial Responses\n### Aliases: nomLORgee\n\n### ** Examples\n\n## See the interpretation in Touloumis (2011).\ndata(housing)\nfitmod <- nomLORgee(y ~ factor(time) * sec, data = housing, id = id,\n repeated = time)\nsummary(fitmod)\n\n\n"} {"package":"multgee","topic":"ordLORgee","snippet":"### Name: ordLORgee\n### Title: Marginal Models For Correlated Ordinal Multinomial Responses\n### Aliases: ordLORgee\n\n### ** Examples\n\ndata(arthritis)\nintrinsic.pars(y, arthritis, id, time)\nfitmod <- ordLORgee(formula = y ~ factor(time) + factor(trt) + factor(baseline),\n data = arthritis, id = id, repeated = time, LORstr = \"uniform\")\nsummary(fitmod)\n\n\n"} {"package":"multgee","topic":"vcov.LORgee","snippet":"### Name: vcov.LORgee\n### Title: Calculate Variance-Covariance Matrix for a Fitted LORgee Object.\n### Aliases: vcov.LORgee vcov\n\n### ** Examples\n\nfitmod <- ordLORgee(formula = y ~ factor(time) + factor(trt) + factor(baseline),\n data = arthritis, id = id, repeated = time, LORstr = \"uniform\")\nvcov(fitmod, method = \"robust\")\nvcov(fitmod, method = \"naive\")\n\n\n\n"} {"package":"multgee","topic":"waldts","snippet":"### Name: waldts\n### Title: Wald Test of Nested GEE Models\n### Aliases: waldts\n\n### ** Examples\n\ndata(housing)\nset.seed(1)\nfitmod1 <- nomLORgee(y ~ factor(time) * sec, data = housing, id = id,\n repeated = time)\nset.seed(1)\nfitmod0 <- update(fitmod1, formula = y ~ factor(time) + sec)\nwaldts(fitmod0, fitmod1)\n\n\n\n"} {"package":"epca","topic":"cpve","snippet":"### Name: cpve\n### Title: Cumulative Proportion of Variance Explained (CPVE)\n### Aliases: cpve\n\n### ** Examples\n\n## use the \"swiss\" data\n## find two sparse PCs\ns.sca <- sca(swiss, 2, gamma = sqrt(ncol(swiss)))\nld <- loadings(s.sca)\ncpve(as.matrix(swiss), ld)\n\n\n\n"} {"package":"epca","topic":"dist.matrix","snippet":"### Name: dist.matrix\n### Title: Matrix Column Distance\n### Aliases: dist.matrix\n\n### ** Examples\n\nx <- diag(4)\ny <- x + rnorm(16, sd = 0.05) # add some noise\ny = t(t(y) / sqrt(colSums(y ^ 2))) ## normalize the columns\n## euclidian distance between column pairs, with minimal matches\ndist.matrix(x, y, \"euclidean\")\n\n\n\n"} {"package":"epca","topic":"inner","snippet":"### Name: inner\n### Title: Matrix Inner Product\n### Aliases: inner\n\n### ** Examples\n\nx <- matrix(1:6, 2, 3)\ny <- matrix(7:12, 2, 3)\n## The default is equivalent to `crossprod(x, y)`\ninner(x, y)\n## We can compute the pair-wise Euclidean distance of columns.\nEuclideanDistance = function(x, y) crossprod(x, y)^2\ninner(x, y, EuclideanDistance)\n\n\n\n"} {"package":"epca","topic":"misClustRate","snippet":"### Name: misClustRate\n### Title: Mis-Classification Rate (MCR)\n### Aliases: misClustRate\n\n### ** Examples\n\ntruth = rep(1:3, each = 30)\ncluster = rep(3:1, times = c(25, 32, 33))\nmisClustRate(cluster, truth)\n\n\n"} {"package":"epca","topic":"pitprops","snippet":"### Name: pitprops\n### Title: Pitprops correlation data\n### Aliases: pitprops\n### Keywords: datasets\n\n### ** Examples\n\n## No test: \n## NOT TEST\ndata(pitprops)\nggcorrplot::ggcorrplot(pitprops)\n## End(No test)\n\n\n\n"} {"package":"epca","topic":"polar","snippet":"### Name: polar\n### Title: Polar Decomposition\n### Aliases: polar\n\n### ** Examples\n\nx <- matrix(1:6, nrow = 3)\npolar_x <- polar(x)\n\n\n\n"} {"package":"epca","topic":"pve","snippet":"### Name: pve\n### Title: Proportion of Variance Explained (PVE)\n### Aliases: pve\n\n### ** Examples\n\n## use the \"swiss\" data\n## find two sparse PCs\ns.sca <- sca(swiss, 2, gamma = sqrt(ncol(swiss)))\nld <- loadings(s.sca)\npve(as.matrix(swiss), ld)\n\n\n"} {"package":"epca","topic":"rotation","snippet":"### Name: rotation\n### Title: Varimax Rotation\n### Aliases: rotation\n\n### ** Examples\n\n## use the \"swiss\" data\nfa <- factanal( ~., 2, data = swiss, rotation = \"none\")\nrotation(loadings(fa))\n\n\n"} {"package":"epca","topic":"sca","snippet":"### Name: sca\n### Title: Sparse Component Analysis\n### Aliases: sca\n\n### ** Examples\n\n## ------ example 1 ------\n## simulate a low-rank data matrix with some additive Gaussian noise\nn <- 300\np <- 50\nk <- 5 ## rank\nz <- shrinkage(polar(matrix(runif(n * k), n, k)), sqrt(n))\nb <- diag(5) * 3\ny <- shrinkage(polar(matrix(runif(p * k), p, k)), sqrt(p))\ne <- matrix(rnorm(n * p, sd = .01), n, p)\nx <- scale(z %*% b %*% t(y) + e)\n\n## perform sparse PCA\ns.sca <- sca(x, k)\ns.sca\n\n## ------ example 2 ------\n## use the `pitprops` data from the `elasticnet` package\ndata(pitprops)\n\n## find 6 sparse PCs\ns.sca <- sca(pitprops, 6, gamma = 6, is.cov = TRUE)\nprint(s.sca, verbose = TRUE)\n\n\n\n"} {"package":"epca","topic":"shrinkage","snippet":"### Name: shrinkage\n### Title: Shrinkage\n### Aliases: shrinkage\n\n### ** Examples\n\nx <- matrix(1:6, nrow = 3)\nshrink_x <- shrinkage(x, 1)\n\n\n\n"} {"package":"epca","topic":"sma","snippet":"### Name: sma\n### Title: Sparse Matrix Approximation\n### Aliases: sma\n\n### ** Examples\n\n## simulate a rank-5 data matrix with some additive Gaussian noise\nn <- 300\np <- 50\nk <- 5 ## rank\nz <- shrinkage(polar(matrix(runif(n * k), n, k)), sqrt(n))\nb <- diag(5) * 3\ny <- shrinkage(polar(matrix(runif(p * k), p, k)), sqrt(p))\ne <- matrix(rnorm(n * p, sd = .01), n, p)\nx <- scale(z %*% b %*% t(y) + e)\n\n## perform sparse matrix approximation\ns.sma <- sma(x, k)\ns.sma\n\n\n\n"} {"package":"epca","topic":"varimax.criteria","snippet":"### Name: varimax.criteria\n### Title: The varimax criterion\n### Aliases: varimax.criteria\n\n### ** Examples\n\n## use the \"swiss\" data\nfa <- factanal( ~., 2, data = swiss, rotation = \"none\")\nlds <- loadings(fa)\n\n## compute varimax criterion:\nvarimax.criteria(lds)\n\n## compute varimax criterion (after the varimax rotation):\nrlds <- rotation(lds, rotate = \"varimax\")\nvarimax.criteria(rlds)\n\n\n\n"} {"package":"epca","topic":"vgQ.absmin","snippet":"### Name: vgQ.absmin\n### Title: Gradient of Absmin Criterion\n### Aliases: vgQ.absmin\n\n### ** Examples\n\n## Not run: \n##D ## NOT RUN\n##D ## NOT for users to call.\n## End(Not run)\n\n\n"} {"package":"cropcircles","topic":"crop_circle","snippet":"### Name: crop_circle\n### Title: Cropping functions\n### Aliases: crop_circle crop_square crop_hex crop_heart crop_parallelogram\n### circle_crop hex_crop\n\n### ** Examples\n\nlibrary(cropcircles)\nlibrary(magick)\n\nimg_path <- file.path(system.file(package = \"cropcircles\"), \"images\", \"walter-jesse.png\")\nimg_cropped <- crop_circle(img_path, border_size = 6)\nimage_read(img_cropped)\n\n# justification example\n\n# center (default)\nimage_read(crop_circle(img_path, border_size = 6))\n\n# left\nimage_read(crop_circle(img_path, border_size = 6, just = \"left\"))\n\n# right\nimage_read(crop_circle(img_path, border_size = 6, just = \"right\"))\n\n\n"} {"package":"DFIT","topic":"AseIrt","snippet":"### Name: AseIrt\n### Title: Calculates the asymptotic covariance matrices for item\n### parameters according with the IRT model.\n### Aliases: AseIrt\n\n### ** Examples\n\n# # Not run\n# #\n# # data(dichotomousItemParameters)\n# # threePlParameters <- dichotomousItemParameters\n# # isNot3Pl <- ((dichotomousItemParameters[['focal']][, 3] == 0) |\n# # (dichotomousItemParameters[['reference']][, 3] == 0))\n# #\n# # threePlParameters[['focal']] <- threePlParameters[['focal']][!isNot3Pl, ]\n# # threePlParameters[['reference']] <- threePlParameters[['reference']][!isNot3Pl, ]\n# # threePlParameters[['focal']][, 3] <- threePlParameters[['focal']][, 3] + 0.1\n# # threePlParameters[['reference']][, 3] <- threePlParameters[['reference']][, 3] + 0.1\n# # threePlParameters[['focal']][, 2] <- threePlParameters[['focal']][, 2] + 1.5\n# # threePlParameters[['reference']][, 2] <- threePlParameters[['reference']][, 2] + 1.5\n# # threePlParameters[['focal']] <- threePlParameters[['focal']][-c(12, 16, 28), ]\n# # threePlParameters[['reference']] <- threePlParameters[['reference']][-c(12, 16, 28), ]\n# #\n# # threePlAse <- list()\n# # threePlAse[[\"focal\"]] <- AseIrt(itemParameters = threePlParameters[[\"focal\"]],\n# # logistic = TRUE,\n# # sampleSize = 10000,\n# # irtModel = \"3pl\")\n# # threePlAse[[\"reference\"]] <- AseIrt(itemParameters = threePlParameters[[\"reference\"]],\n# # logistic = TRUE,\n# # sampleSize = 15000,\n# # irtModel = \"3pl\")\n\n\n\n"} {"package":"DFIT","topic":"Bound3PlIpr","snippet":"### Name: Bound3PlIpr\n### Title: Takes item parameters from Ipr and forces guessing to lie\n### between 0 and 1\n### Aliases: Bound3PlIpr\n\n### ** Examples\n\n# # Not run\n# #\n# # data(dichotomousItemParameters)\n# # threePlParameters <- dichotomousItemParameters\n# # isNot3Pl <- ((dichotomousItemParameters[['focal']][, 3] == 0) |\n# # (dichotomousItemParameters[['reference']][, 3] == 0))\n# #\n# # threePlParameters[['focal']] <- threePlParameters[['focal']][!isNot3Pl, ]\n# # threePlParameters[['reference']] <- threePlParameters[['reference']][!isNot3Pl, ]\n# # threePlParameters[['focal']][, 3] <- threePlParameters[['focal']][, 3] + 0.1\n# # threePlParameters[['reference']][, 3] <- threePlParameters[['reference']][, 3] + 0.1\n# # threePlParameters[['focal']][, 2] <- threePlParameters[['focal']][, 2] + 1.5\n# # threePlParameters[['reference']][, 2] <- threePlParameters[['reference']][, 2] + 1.5\n# # threePlParameters[['focal']] <- threePlParameters[['focal']][-c(12, 16, 28), ]\n# # threePlParameters[['reference']] <- threePlParameters[['reference']][-c(12, 16, 28), ]\n# #\n# # threePlAse <- list()\n# # threePlAse[[\"focal\"]] <- AseIrt(itemParameters = threePlParameters[[\"focal\"]],\n# # logistic = TRUE,\n# # sampleSize = 10000,\n# # irtModel = \"3pl\")\n# # threePlAse[[\"reference\"]] <- AseIrt(itemParameters = threePlParameters[[\"reference\"]],\n# # logistic = TRUE,\n# # sampleSize = 15000,\n# # irtModel = \"3pl\")\n# #\n# # set.seed(41568)\n# # threePlIpr <- Ipr(itemParameters = threePlParameters, itemCovariances = threePlAse,\n# # nReplicates = 100)\n# # threePlIpr <- Bound3PlIpr(threePlIpr)\n\n\n\n"} {"package":"DFIT","topic":"Bound4PlIpr","snippet":"### Name: Bound4PlIpr\n### Title: Takes item parameters from Ipr and forces guessing to lie\n### between 0 and 1\n### Aliases: Bound4PlIpr\n\n### ** Examples\n\n# # Not run\n# #\n# # data(dichotomousItemParameters)\n# # threePlParameters <- dichotomousItemParameters\n# # isNot3Pl <- ((dichotomousItemParameters[['focal']][, 3] == 0) |\n# # (dichotomousItemParameters[['reference']][, 3] == 0))\n# #\n# # threePlParameters[['focal']] <- threePlParameters[['focal']][!isNot3Pl, ]\n# # threePlParameters[['reference']] <- threePlParameters[['reference']][!isNot3Pl, ]\n# # threePlParameters[['focal']][, 3] <- threePlParameters[['focal']][, 3] + 0.1\n# # threePlParameters[['reference']][, 3] <- threePlParameters[['reference']][, 3] + 0.1\n# # threePlParameters[['focal']][, 2] <- threePlParameters[['focal']][, 2] + 1.5\n# # threePlParameters[['reference']][, 2] <- threePlParameters[['reference']][, 2] + 1.5\n# # threePlParameters[['focal']] <- threePlParameters[['focal']][-c(12, 16, 28), ]\n# # threePlParameters[['reference']] <- threePlParameters[['reference']][-c(12, 16, 28), ]\n# #\n# # threePlAse <- list()\n# # threePlAse[[\"focal\"]] <- AseIrt(itemParameters = threePlParameters[[\"focal\"]],\n# # logistic = TRUE,\n# # sampleSize = 10000,\n# # irtModel = \"3pl\")\n# # threePlAse[[\"reference\"]] <- AseIrt(itemParameters = threePlParameters[[\"reference\"]],\n# # logistic = TRUE,\n# # sampleSize = 15000,\n# # irtModel = \"3pl\")\n# #\n# # set.seed(41568)\n# # threePlIpr <- Ipr(itemParameters = threePlParameters, itemCovariances = threePlAse,\n# # nReplicates = 100)\n# # threePlIpr <- Bound3PlIpr(threePlIpr)\n\n\n\n"} {"package":"DFIT","topic":"Cdif","snippet":"### Name: Cdif\n### Title: Calculates CDIF index for an item with given item parameters of\n### focal and reference groups.\n### Aliases: Cdif\n\n### ** Examples\n\n\n# # Not run\n# #\n# # data(dichotomousItemParameters)\n# #\n# # threePlParameters <- dichotomousItemParameters\n# # isNot3Pl <- ((dichotomousItemParameters[['focal']][, 3] == 0) |\n# # (dichotomousItemParameters[['reference']][, 3] == 0))\n# #\n# # threePlParameters[['focal']] <- threePlParameters[['focal']][!isNot3Pl, ]\n# # threePlParameters[['reference']] <- threePlParameters[['reference']][!isNot3Pl, ]\n# # threePlParameters[['focal']][, 3] <- threePlParameters[['focal']][, 3] + 0.1\n# # threePlParameters[['reference']][, 3] <- threePlParameters[['reference']][, 3] + 0.1\n# # threePlParameters[['focal']][, 2] <- threePlParameters[['focal']][, 2] + 1.5\n# # threePlParameters[['reference']][, 2] <- threePlParameters[['reference']][, 2] + 1.5\n# # threePlParameters[['focal']] <- threePlParameters[['focal']][-c(12, 16, 28), ]\n# # threePlParameters[['reference']] <- threePlParameters[['reference']][-c(12, 16, 28), ]\n# #\n# # threePlCdif <- Cdif(itemParameters = dichotomousItemParameters, irtModel = '3pl',\n# # focalAbilities = NULL, focalDistribution = \"norm\",\n# # subdivisions = 5000, logistic = TRUE)\n\n\n\n"} {"package":"DFIT","topic":"CutoffIpr","snippet":"### Name: CutoffIpr\n### Title: Cut-off points for Ipr generated estimates\n### Aliases: CutoffIpr\n\n### ** Examples\n\n# # Not run\n# #\n# # data(dichotomousItemParameters)\n# # threePlParameters <- dichotomousItemParameters\n# # isNot3Pl <- ((dichotomousItemParameters[['focal']][, 3] == 0) |\n# # (dichotomousItemParameters[['reference']][, 3] == 0))\n# #\n# # threePlParameters[['focal']] <- threePlParameters[['focal']][!isNot3Pl, ]\n# # threePlParameters[['reference']] <- threePlParameters[['reference']][!isNot3Pl, ]\n# # threePlParameters[['focal']][, 3] <- threePlParameters[['focal']][, 3] + 0.1\n# # threePlParameters[['reference']][, 3] <- threePlParameters[['reference']][, 3] + 0.1\n# # threePlParameters[['focal']][, 2] <- threePlParameters[['focal']][, 2] + 1.5\n# # threePlParameters[['reference']][, 2] <- threePlParameters[['reference']][, 2] + 1.5\n# # threePlParameters[['focal']] <- threePlParameters[['focal']][-c(12, 16, 28), ]\n# # threePlParameters[['reference']] <- threePlParameters[['reference']][-c(12, 16, 28), ]\n# #\n# # threePlAse <- list()\n# # threePlAse[[\"focal\"]] <- AseIrt(itemParameters = threePlParameters[[\"focal\"]],\n# # logistic = TRUE,\n# # sampleSize = 10000,\n# # irtModel = \"3pl\")\n# # threePlAse[[\"reference\"]] <- AseIrt(itemParameters = threePlParameters[[\"reference\"]],\n# # logistic = TRUE,\n# # sampleSize = 15000,\n# # irtModel = \"3pl\")\n# #\n# # set.seed(41568)\n# #\n# # threePlIprCutoff <- CutoffIpr(itemParameters = threePlParameters,\n# # itemCovariances = threePlAse, nullGroup = 'focal',\n# # nReplicates = 1000, statistic = 'ncdif', irtModel = '3pl')\n\n\n\n"} {"package":"DFIT","topic":"DeltaMhIrt","snippet":"### Name: DeltaMhIrt\n### Title: Obtains the ETS Delta measure for Mantel-Haneszel DIF statistic\n### effect size.\n### Aliases: DeltaMhIrt\n\n### ** Examples\n\n\ndata(dichotomousItemParameters)\nthreePlParameters <- dichotomousItemParameters\nisNot3Pl <- ((dichotomousItemParameters[['focal']][, 3] == 0) |\n (dichotomousItemParameters[['reference']][, 3] == 0))\n\nthreePlParameters[['focal']] <- threePlParameters[['focal']][!isNot3Pl, ]\nthreePlParameters[['reference']] <- threePlParameters[['reference']][!isNot3Pl, ]\nthreePlParameters[['focal']][, 3] <- threePlParameters[['focal']][, 3] + 0.1\nthreePlParameters[['reference']][, 3] <- threePlParameters[['reference']][, 3] + 0.1\nthreePlParameters[['focal']][, 2] <- threePlParameters[['focal']][, 2] + 1.5\nthreePlParameters[['reference']][, 2] <- threePlParameters[['reference']][, 2] + 1.5\nthreePlParameters[['focal']] <- threePlParameters[['focal']][-c(12, 16, 28), ]\nthreePlParameters[['reference']] <- threePlParameters[['reference']][-c(12, 16, 28), ]\n\nthreePlMh <- IrtMh(itemParameters = threePlParameters, irtModel = \"3pl\",\n focalDistribution = \"norm\", referenceDistribution = \"norm\",\n focalDistrExtra = list(mean = 0, sd = 1),\n referenceDistrExtra = list(mean = 0, sd = 1), groupRatio = 1,\n logistic = FALSE)\n\ndelta3pl <- DeltaMhIrt(threePlMh)\n\n\n\n"} {"package":"DFIT","topic":"Dtf","snippet":"### Name: Dtf\n### Title: Calculates DTF index for a set of items with given item\n### parameters of focal and reference groups.\n### Aliases: Dtf\n\n### ** Examples\n\n\n# # Not run\n# #\n# # data(dichotomousItemParameters)\n# #\n# # threePlParameters <- dichotomousItemParameters\n# # isNot3Pl <- ((dichotomousItemParameters[['focal']][, 3] == 0) |\n# # (dichotomousItemParameters[['reference']][, 3] == 0))\n# #\n# # threePlParameters[['focal']] <- threePlParameters[['focal']][!isNot3Pl, ]\n# # threePlParameters[['reference']] <- threePlParameters[['reference']][!isNot3Pl, ]\n# # threePlParameters[['focal']][, 3] <- threePlParameters[['focal']][, 3] + 0.1\n# # threePlParameters[['reference']][, 3] <- threePlParameters[['reference']][, 3] + 0.1\n# # threePlParameters[['focal']][, 2] <- threePlParameters[['focal']][, 2] + 1.5\n# # threePlParameters[['reference']][, 2] <- threePlParameters[['reference']][, 2] + 1.5\n# # threePlParameters[['focal']] <- threePlParameters[['focal']][-c(12, 16, 28), ]\n# # threePlParameters[['reference']] <- threePlParameters[['reference']][-c(12, 16, 28), ]\n# #\n# # threePlCdif <- Cdif(itemParameters = threePlParameters, irtModel = '3pl',\n# # focalAbilities = NULL, focalDistribution = \"norm\",\n# # subdivisions = 5000, logistic = TRUE)\n# # threePlDtf <- Dtf(cdif = threePlCdif)\n\n\n\n"} {"package":"DFIT","topic":"Extract2PLMirt","snippet":"### Name: Extract2PLMirt\n### Title: Extract item discrimination and difficulties and estimate\n### covariance estimates for 2PL items from a fitted mirt object for one\n### or two groups\n### Aliases: Extract2PLMirt\n\n### ** Examples\n\nlibrary(mirt)\ndata <- expand.table(LSAT7)\n(mod1 <- mirt(data, model = 1, itemtype = \"2PL\", SE = TRUE))\n(DFIT:::Extract2PLMirt(mod1))\n\n\n\n"} {"package":"DFIT","topic":"Extract3PLMirt","snippet":"### Name: Extract3PLMirt\n### Title: Extract item discrimination, difficulties, and guessing\n### parameters and estimate covariance estimates for 3PL items from a\n### fitted mirt object for one or two groups\n### Aliases: Extract3PLMirt\n\n### ** Examples\n\nlibrary(mirt)\ndata <- expand.table(LSAT7)\n(mod1 <- mirt(data, model = 1, itemtype = \"3PL\", SE = TRUE))\n(DFIT:::Extract3PLMirt(mod1))\n\n\n\n"} {"package":"DFIT","topic":"Extract4PLMirt","snippet":"### Name: Extract4PLMirt\n### Title: Extract item discrimination, difficulties, guessing, and upper\n### asymptote parameters and estimate covariance estimates for 4PL items\n### from a fitted mirt object for one or two groups\n### Aliases: Extract4PLMirt\n\n### ** Examples\n\nlibrary(mirt)\ndata <- expand.table(LSAT7)\n(mod1 <- mirt(data, model = 1, itemtype = \"4PL\", SE = TRUE))\n(DFIT:::Extract4PLMirt(mod1))\n\n\n\n"} {"package":"DFIT","topic":"ExtractGPCMMirt","snippet":"### Name: ExtractGPCMMirt\n### Title: Extract item discrimination and difficulties and estimate\n### covariance estimates for GPCM items from a fitted mirt object for one\n### or two groups\n### Aliases: ExtractGPCMMirt\n\n### ** Examples\n\nlibrary(mirt)\n(mod1 <- mirt(Science, model = 1, itemtype = \"gpcm\", SE = TRUE))\n(DFIT:::ExtractGPCMMirt(mod1))\n\n\n\n"} {"package":"DFIT","topic":"ExtractGRMMirt","snippet":"### Name: ExtractGRMMirt\n### Title: Extract item discrimination and difficulties and estimate\n### covariance estimates for GRM items from a fitted mirt object for one\n### or two groups\n### Aliases: ExtractGRMMirt\n\n### ** Examples\n\nlibrary(mirt)\n(mod1 <- mirt(Science, model = 1, itemtype = \"graded\", SE = TRUE))\n(DFIT:::ExtractGRMMirt(mod1))\n\n\n\n"} {"package":"DFIT","topic":"ExtractMirtPars","snippet":"### Name: ExtractMirtPars\n### Title: Extracts the item parameters from a unidimensional mirt model\n### Aliases: ExtractMirtPars\n\n### ** Examples\n\nlibrary(mirt)\n(mod1 <- mirt(Science, model = 1, itemtype = c(\"graded\", \"graded\", \"gpcm\", \"gpcm\"), SE = TRUE))\n(ExtractMirtPars(mod1))\n\n\n\n"} {"package":"DFIT","topic":"ExtractRaschMirt","snippet":"### Name: ExtractRaschMirt\n### Title: Extract item difficulties and item difficulty variance estimates\n### for Rasch items from a fitted mirt object for one or two groups\n### Aliases: ExtractRaschMirt\n\n### ** Examples\n\nlibrary(mirt)\ndata <- expand.table(LSAT7)\n(mod1 <- mirt(data, model = 1, itemtype = \"Rasch\", SE = TRUE))\n(DFIT:::ExtractRaschMirt(mod1))\n\n\n\n"} {"package":"DFIT","topic":"Ipr","snippet":"### Name: Ipr\n### Title: Item parameter replication\n### Aliases: Ipr\n\n### ** Examples\n\n# # Not run\n# #\n# # data(dichotomousItemParameters)\n# # threePlParameters <- dichotomousItemParameters\n# # isNot3Pl <- ((dichotomousItemParameters[['focal']][, 3] == 0) |\n# # (dichotomousItemParameters[['reference']][, 3] == 0))\n# #\n# # threePlParameters[['focal']] <- threePlParameters[['focal']][!isNot3Pl, ]\n# # threePlParameters[['reference']] <- threePlParameters[['reference']][!isNot3Pl, ]\n# # threePlParameters[['focal']][, 3] <- threePlParameters[['focal']][, 3] + 0.1\n# # threePlParameters[['reference']][, 3] <- threePlParameters[['reference']][, 3] + 0.1\n# # threePlParameters[['focal']][, 2] <- threePlParameters[['focal']][, 2] + 1.5\n# # threePlParameters[['reference']][, 2] <- threePlParameters[['reference']][, 2] + 1.5\n# # threePlParameters[['focal']] <- threePlParameters[['focal']][-c(12, 16, 28), ]\n# # threePlParameters[['reference']] <- threePlParameters[['reference']][-c(12, 16, 28), ]\n# #\n# # threePlAse <- list()\n# # threePlAse[[\"focal\"]] <- AseIrt(itemParameters = threePlParameters[[\"focal\"]],\n# # logistic = TRUE,\n# # sampleSize = 10000,\n# # irtModel = \"3pl\")\n# # threePlAse[[\"reference\"]] <- AseIrt(itemParameters = threePlParameters[[\"reference\"]],\n# # logistic = TRUE,\n# # sampleSize = 15000,\n# # irtModel = \"3pl\")\n# #\n# # set.seed(41568)\n# # threePlIpr <- Ipr(itemParameters = threePlParameters, itemCovariances = threePlAse,\n# # nReplicates = 100)\n\n\n\n"} {"package":"DFIT","topic":"IprMh","snippet":"### Name: IprMh\n### Title: Mantel Haenszel for Item parameter replication\n### Aliases: IprMh\n\n### ** Examples\n\n# # Not run\n# #\n# # data(dichotomousItemParameters)\n# # threePlParameters <- dichotomousItemParameters\n# # isNot3Pl <- ((dichotomousItemParameters[['focal']][, 3] == 0) |\n# # (dichotomousItemParameters[['reference']][, 3] == 0))\n# #\n# # threePlParameters[['focal']] <- threePlParameters[['focal']][!isNot3Pl, ]\n# # threePlParameters[['reference']] <- threePlParameters[['reference']][!isNot3Pl, ]\n# # threePlParameters[['focal']][, 3] <- threePlParameters[['focal']][, 3] + 0.1\n# # threePlParameters[['reference']][, 3] <- threePlParameters[['reference']][, 3] + 0.1\n# # threePlParameters[['focal']][, 2] <- threePlParameters[['focal']][, 2] + 1.5\n# # threePlParameters[['reference']][, 2] <- threePlParameters[['reference']][, 2] + 1.5\n# # threePlParameters[['focal']] <- threePlParameters[['focal']][-c(12, 16, 28), ]\n# # threePlParameters[['reference']] <- threePlParameters[['reference']][-c(12, 16, 28), ]\n# #\n# # threePlAse <- list()\n# # threePlAse[[\"focal\"]] <- AseIrt(itemParameters = threePlParameters[[\"focal\"]],\n# # logistic = TRUE,\n# # sampleSize = 10000,\n# # irtModel = \"3pl\")\n# # threePlAse[[\"reference\"]] <- AseIrt(itemParameters = threePlParameters[[\"reference\"]],\n# # logistic = TRUE,\n# # sampleSize = 10000,\n# # irtModel = \"3pl\")\n# #\n# # set.seed(41568)\n# # threePlIpr <- Ipr(itemParameters = threePlParameters, itemCovariances = threePlAse,\n# # nReplicates = 100)\n# #\n# # threePlMhIpr <- IprMh(itemParameterList = threePlIpr, irtModel = '3pl', logistic = TRUE)\n\n\n\n"} {"package":"DFIT","topic":"IprNcdif","snippet":"### Name: IprNcdif\n### Title: NCDIF for Item parameter replication\n### Aliases: IprNcdif\n\n### ** Examples\n\n# # Not run\n# #\n# # data(dichotomousItemParameters)\n# # threePlParameters <- dichotomousItemParameters\n# # isNot3Pl <- ((dichotomousItemParameters[['focal']][, 3] == 0) |\n# # (dichotomousItemParameters[['reference']][, 3] == 0))\n# #\n# # threePlParameters[['focal']] <- threePlParameters[['focal']][!isNot3Pl, ]\n# # threePlParameters[['reference']] <- threePlParameters[['reference']][!isNot3Pl, ]\n# # threePlParameters[['focal']][, 3] <- threePlParameters[['focal']][, 3] + 0.1\n# # threePlParameters[['reference']][, 3] <- threePlParameters[['reference']][, 3] + 0.1\n# # threePlParameters[['focal']][, 2] <- threePlParameters[['focal']][, 2] + 1.5\n# # threePlParameters[['reference']][, 2] <- threePlParameters[['reference']][, 2] + 1.5\n# # threePlParameters[['focal']] <- threePlParameters[['focal']][-c(12, 16, 28), ]\n# # threePlParameters[['reference']] <- threePlParameters[['reference']][-c(12, 16, 28), ]\n# #\n# # threePlAse <- list()\n# # threePlAse[[\"focal\"]] <- AseIrt(itemParameters = threePlParameters[[\"focal\"]],\n# # logistic = TRUE,\n# # sampleSize = 10000,\n# # irtModel = \"3pl\")\n# # threePlAse[[\"reference\"]] <- AseIrt(itemParameters = threePlParameters[[\"reference\"]],\n# # logistic = TRUE,\n# # sampleSize = 10000,\n# # irtModel = \"3pl\")\n# #\n# # set.seed(41568)\n# # threePlIpr <- Ipr(itemParameters = threePlParameters, itemCovariances = threePlAse,\n# # nReplicates = 100)\n# #\n# # threePlNcdifIpr <- IprNcdif(itemParameterList = threePlIpr, irtModel = '3pl', logistic = TRUE)\n\n\n\n"} {"package":"DFIT","topic":"IprSam","snippet":"### Name: IprSam\n### Title: Signed Area Measure for Item parameter replication\n### Aliases: IprSam\n\n### ** Examples\n\n# # Not run\n# #\n# # data(dichotomousItemParameters)\n# # threePlParameters <- dichotomousItemParameters\n# # isNot3Pl <- ((dichotomousItemParameters[['focal']][, 3] == 0) |\n# # (dichotomousItemParameters[['reference']][, 3] == 0))\n# #\n# # threePlParameters[['focal']] <- threePlParameters[['focal']][!isNot3Pl, ]\n# # threePlParameters[['reference']] <- threePlParameters[['reference']][!isNot3Pl, ]\n# # threePlParameters[['focal']][, 3] <- threePlParameters[['focal']][, 3] + 0.1\n# # threePlParameters[['reference']][, 3] <- threePlParameters[['reference']][, 3] + 0.1\n# # threePlParameters[['focal']][, 2] <- threePlParameters[['focal']][, 2] + 1.5\n# # threePlParameters[['reference']][, 2] <- threePlParameters[['reference']][, 2] + 1.5\n# # threePlParameters[['focal']] <- threePlParameters[['focal']][-c(12, 16, 28), ]\n# # threePlParameters[['reference']] <- threePlParameters[['reference']][-c(12, 16, 28), ]\n# #\n# # threePlAse <- list()\n# # threePlAse[[\"focal\"]] <- AseIrt(itemParameters = threePlParameters[[\"focal\"]],\n# # logistic = TRUE,\n# # sampleSize = 10000,\n# # irtModel = \"3pl\")\n# # threePlAse[[\"reference\"]] <- AseIrt(itemParameters = threePlParameters[[\"reference\"]],\n# # logistic = TRUE,\n# # sampleSize = 10000,\n# # irtModel = \"3pl\")\n# #\n# # set.seed(41568)\n# # threePlIpr <- Ipr(itemParameters = threePlParameters, itemCovariances = threePlAse,\n# # nReplicates = 100)\n# #\n# # threePlSamIpr <- IprSam(itemParameterList = threePlIpr, irtModel = '3pl', logistic = TRUE)\n\n\n\n"} {"package":"DFIT","topic":"IprUam","snippet":"### Name: IprUam\n### Title: Unsigned Area Measure for Item parameter replication\n### Aliases: IprUam\n\n### ** Examples\n\n# # Not run\n# #\n# # data(dichotomousItemParameters)\n# # threePlParameters <- dichotomousItemParameters\n# # isNot3Pl <- ((dichotomousItemParameters[['focal']][, 3] == 0) |\n# # (dichotomousItemParameters[['reference']][, 3] == 0))\n# #\n# # threePlParameters[['focal']] <- threePlParameters[['focal']][!isNot3Pl, ]\n# # threePlParameters[['reference']] <- threePlParameters[['reference']][!isNot3Pl, ]\n# # threePlParameters[['focal']][, 3] <- threePlParameters[['focal']][, 3] + 0.1\n# # threePlParameters[['reference']][, 3] <- threePlParameters[['reference']][, 3] + 0.1\n# # threePlParameters[['focal']][, 2] <- threePlParameters[['focal']][, 2] + 1.5\n# # threePlParameters[['reference']][, 2] <- threePlParameters[['reference']][, 2] + 1.5\n# # threePlParameters[['focal']] <- threePlParameters[['focal']][-c(12, 16, 28), ]\n# # threePlParameters[['reference']] <- threePlParameters[['reference']][-c(12, 16, 28), ]\n# #\n# # threePlAse <- list()\n# # threePlAse[[\"focal\"]] <- AseIrt(itemParameters = threePlParameters[[\"focal\"]],\n# # logistic = TRUE,\n# # sampleSize = 10000,\n# # irtModel = \"3pl\")\n# # threePlAse[[\"reference\"]] <- AseIrt(itemParameters = threePlParameters[[\"reference\"]],\n# # logistic = TRUE,\n# # sampleSize = 10000,\n# # irtModel = \"3pl\")\n# #\n# # set.seed(41568)\n# # threePlIpr <- Ipr(itemParameters = threePlParameters, itemCovariances = threePlAse,\n# # nReplicates = 100)\n# #\n# # threePlUamIpr <- IprUam(itemParameterList = threePlIpr, irtModel = '3pl', logistic = TRUE)\n\n\n\n"} {"package":"DFIT","topic":"IrtMh","snippet":"### Name: IrtMh\n### Title: Calculates the Mantel-Haenszel theoretical parameter when a\n### dichotomous IRT model holds\n### Aliases: IrtMh\n\n### ** Examples\n\n\ndata(dichotomousItemParameters)\nthreePlParameters <- dichotomousItemParameters\nisNot3Pl <- ((dichotomousItemParameters[['focal']][, 3] == 0) |\n (dichotomousItemParameters[['reference']][, 3] == 0))\n\nthreePlParameters[['focal']] <- threePlParameters[['focal']][!isNot3Pl, ]\nthreePlParameters[['reference']] <- threePlParameters[['reference']][!isNot3Pl, ]\nthreePlParameters[['focal']][, 3] <- threePlParameters[['focal']][, 3] + 0.1\nthreePlParameters[['reference']][, 3] <- threePlParameters[['reference']][, 3] + 0.1\nthreePlParameters[['focal']][, 2] <- threePlParameters[['focal']][, 2] + 1.5\nthreePlParameters[['reference']][, 2] <- threePlParameters[['reference']][, 2] + 1.5\nthreePlParameters[['focal']] <- threePlParameters[['focal']][-c(12, 16, 28), ]\nthreePlParameters[['reference']] <- threePlParameters[['reference']][-c(12, 16, 28), ]\n\nthreePlMh <- IrtMh(itemParameters = threePlParameters, irtModel = \"3pl\",\n focalDistribution = \"norm\", referenceDistribution = \"norm\",\n focalDistrExtra = list(mean = 0, sd = 1),\n referenceDistrExtra = list(mean = 0, sd = 1), groupRatio = 1,\n logistic = FALSE)\n\n\n\n"} {"package":"DFIT","topic":"Ncdif","snippet":"### Name: Ncdif\n### Title: Calculates NCDIF index for an item with given item parameters of\n### focal and reference groups.\n### Aliases: Ncdif\n\n### ** Examples\n\n\ndata(dichotomousItemParameters)\n\nthreePlParameters <- dichotomousItemParameters\nisNot3Pl <- ((dichotomousItemParameters[['focal']][, 3] == 0) |\n (dichotomousItemParameters[['reference']][, 3] == 0))\n\nthreePlParameters[['focal']] <- threePlParameters[['focal']][!isNot3Pl, ]\nthreePlParameters[['reference']] <- threePlParameters[['reference']][!isNot3Pl, ]\nthreePlParameters[['focal']][, 3] <- threePlParameters[['focal']][, 3] + 0.1\nthreePlParameters[['reference']][, 3] <- threePlParameters[['reference']][, 3] + 0.1\nthreePlParameters[['focal']][, 2] <- threePlParameters[['focal']][, 2] + 1.5\nthreePlParameters[['reference']][, 2] <- threePlParameters[['reference']][, 2] + 1.5\nthreePlParameters[['focal']] <- threePlParameters[['focal']][-c(12, 16, 28), ]\nthreePlParameters[['reference']] <- threePlParameters[['reference']][-c(12, 16, 28), ]\n\nthreePlNcdif <- Ncdif(itemParameters = threePlParameters, irtModel = '3pl',\n focalAbilities = NULL, focalDistribution = \"norm\",\n subdivisions = 5000, logistic = TRUE)\n\n\n\n"} {"package":"DFIT","topic":"PlotNcdif","snippet":"### Name: PlotNcdif\n### Title: Plot the item characteristic (expected score) curve for focal\n### and reference groups for the iiItem along with a representation of\n### the focal group density.\n### Aliases: PlotNcdif\n\n### ** Examples\n\n\ndata(dichotomousItemParameters)\n\nthreePlParameters <- dichotomousItemParameters\nisNot3Pl <- ((dichotomousItemParameters[['focal']][, 3] == 0) |\n (dichotomousItemParameters[['reference']][, 3] == 0))\n\nthreePlParameters[['focal']] <- threePlParameters[['focal']][!isNot3Pl, ]\nthreePlParameters[['reference']] <- threePlParameters[['reference']][!isNot3Pl, ]\nthreePlParameters[['focal']][, 3] <- threePlParameters[['focal']][, 3] + 0.1\nthreePlParameters[['reference']][, 3] <- threePlParameters[['reference']][, 3] + 0.1\nthreePlParameters[['focal']][, 2] <- threePlParameters[['focal']][, 2] + 1.5\nthreePlParameters[['reference']][, 2] <- threePlParameters[['reference']][, 2] + 1.5\nthreePlParameters[['focal']] <- threePlParameters[['focal']][-c(12, 16, 28), ]\nthreePlParameters[['reference']] <- threePlParameters[['reference']][-c(12, 16, 28), ]\n\n# # Non Uniform - != guess DIF item\nPlotNcdif(iiItem = 22, itemParameters = threePlParameters, irtModel = \"3pl\",\n plotDensity = FALSE, main = \"Item 22 Non uniform and different guessing DIF. 3PL\")\n\n# # Uniform - != guess DIF item\nPlotNcdif(iiItem = 15, itemParameters = threePlParameters, irtModel = \"3pl\",\n plotDensity = FALSE, main = \"Item 15 Uniform and different guessing DIF. 3PL\")\n\n\n\n"} {"package":"DFIT","topic":"SignedArea","snippet":"### Name: SignedArea\n### Title: Calculates Raju's Signed Area Measure index for an item with\n### given item parameters of focal and reference groups.\n### Aliases: SignedArea\n\n### ** Examples\n\n\ndata(dichotomousItemParameters)\n\nthreePlParameters <- dichotomousItemParameters\nisNot3Pl <- ((dichotomousItemParameters[['focal']][, 3] == 0) |\n (dichotomousItemParameters[['reference']][, 3] == 0))\n\nthreePlParameters[['focal']] <- threePlParameters[['focal']][!isNot3Pl, ]\nthreePlParameters[['reference']] <- threePlParameters[['reference']][!isNot3Pl, ]\nthreePlParameters[['focal']][, 3] <- threePlParameters[['focal']][, 3] + 0.1\nthreePlParameters[['reference']][, 3] <- threePlParameters[['reference']][, 3] + 0.1\nthreePlParameters[['focal']][, 2] <- threePlParameters[['focal']][, 2] + 1.5\nthreePlParameters[['reference']][, 2] <- threePlParameters[['reference']][, 2] + 1.5\nthreePlParameters[['focal']] <- threePlParameters[['focal']][-c(12, 16, 28), ]\nthreePlParameters[['reference']] <- threePlParameters[['reference']][-c(12, 16, 28), ]\n\nsam3pl <- SignedArea(itemParameters = threePlParameters, irtModel = \"3pl\",\n subdivisions = 5000, logistic = TRUE)\n\n\n\n"} {"package":"DFIT","topic":"UnsignedArea","snippet":"### Name: UnsignedArea\n### Title: Calculates Raju's Unsigned Area Measure index for an item with\n### given item parameters of focal and reference groups.\n### Aliases: UnsignedArea\n\n### ** Examples\n\n\ndata(dichotomousItemParameters)\n\nthreePlParameters <- dichotomousItemParameters\nisNot3Pl <- ((dichotomousItemParameters[['focal']][, 3] == 0) |\n (dichotomousItemParameters[['reference']][, 3] == 0))\n\nthreePlParameters[['focal']] <- threePlParameters[['focal']][!isNot3Pl, ]\nthreePlParameters[['reference']] <- threePlParameters[['reference']][!isNot3Pl, ]\nthreePlParameters[['focal']][, 3] <- threePlParameters[['focal']][, 3] + 0.1\nthreePlParameters[['reference']][, 3] <- threePlParameters[['reference']][, 3] + 0.1\nthreePlParameters[['focal']][, 2] <- threePlParameters[['focal']][, 2] + 1.5\nthreePlParameters[['reference']][, 2] <- threePlParameters[['reference']][, 2] + 1.5\nthreePlParameters[['focal']] <- threePlParameters[['focal']][-c(12, 16, 28), ]\nthreePlParameters[['reference']] <- threePlParameters[['reference']][-c(12, 16, 28), ]\n\nuam3pl <- UnsignedArea(itemParameters = threePlParameters, irtModel = \"3pl\",\n subdivisions = 5000, logistic = TRUE)\n\n\n\n"} {"package":"micEconCES","topic":"MishraCES","snippet":"### Name: MishraCES\n### Title: Mishra's (2006) CES data\n### Aliases: MishraCES\n### Keywords: datasets\n\n### ** Examples\n\n # load the data set\n data( \"MishraCES\" )\n \n # show mean values of all variables\n colMeans( MishraCES )\n\n # re-calculate the endogenous variable (see Mishra 2006)\n # coefficients of the nested CES function with 4 inputs\n b <- c( \"gamma\" = 200 * 0.5^(1/0.6), \"delta_1\" = 0.6, \"delta_2\" = 0.3, \n \"delta\" = 0.5, \"rho_1\" = 0.5, \"rho_2\" = -0.17, \"rho\" = 0.6 )\n MishraCES$Y2 <- cesCalc( xNames = c( \"X1\", \"X2\", \"X3\", \"X4\" ), \n data = MishraCES, coef = b, nested = TRUE )\n all.equal( MishraCES$Y, MishraCES$Y2 )\n\n\n"} {"package":"micEconCES","topic":"cesCalc","snippet":"### Name: cesCalc\n### Title: Calculate CES function\n### Aliases: cesCalc\n### Keywords: models\n\n### ** Examples\n\n data( germanFarms, package = \"micEcon\" )\n # output quantity:\n germanFarms$qOutput <- germanFarms$vOutput / germanFarms$pOutput\n # quantity of intermediate inputs\n germanFarms$qVarInput <- germanFarms$vVarInput / germanFarms$pVarInput\n\n\n ## Estimate CES: Land & Labor with fixed returns to scale\n cesLandLabor <- cesEst( \"qOutput\", c( \"land\", \"qLabor\" ), germanFarms )\n\n ## Calculate fitted values\n cesCalc( c( \"land\", \"qLabor\" ), germanFarms, coef( cesLandLabor ) )\n\n\n # variable returns to scale\n cesLandLaborVrs <- cesEst( \"qOutput\", c( \"land\", \"qLabor\" ), germanFarms,\n vrs = TRUE )\n\n ## Calculate fitted values\n cesCalc( c( \"land\", \"qLabor\" ), germanFarms, coef( cesLandLaborVrs ) )\n\n\n"} {"package":"micEconCES","topic":"coef.cesEst","snippet":"### Name: cesEst-methods\n### Title: Methods for Estimated CES Functions\n### Aliases: coef.cesEst coef.summary.cesEst fitted.cesEst residuals.cesEst\n### vcov.cesEst\n### Keywords: models\n\n### ** Examples\n\n data( germanFarms, package = \"micEcon\" )\n # output quantity:\n germanFarms$qOutput <- germanFarms$vOutput / germanFarms$pOutput\n # quantity of intermediate inputs\n germanFarms$qVarInput <- germanFarms$vVarInput / germanFarms$pVarInput\n\n\n ## CES: Land & Labor\n cesLandLabor <- cesEst( \"qOutput\", c( \"land\", \"qLabor\" ), germanFarms )\n\n # estimated coefficients\n coef( cesLandLabor )\n\n # estimated coefficients, their standard errors, t-statistic, P-values\n coef( summary( cesLandLabor ) )\n\n # fitted values of the estimated model\n fitted( cesLandLabor )\n\n # residuals of the estimated model\n residuals( cesLandLabor )\n\n # covariance matrix of the estimated coefficients\n vcov( cesLandLabor )\n\n\n"} {"package":"micEconCES","topic":"cesEst","snippet":"### Name: cesEst\n### Title: Estimate a CES function\n### Aliases: cesEst print.cesEst\n### Keywords: models regression nonlinear\n\n### ** Examples\n\n data( germanFarms, package = \"micEcon\" )\n # output quantity:\n germanFarms$qOutput <- germanFarms$vOutput / germanFarms$pOutput\n # quantity of intermediate inputs\n germanFarms$qVarInput <- germanFarms$vVarInput / germanFarms$pVarInput\n\n\n ## CES: Land & Labor (Levenberg-Marquardt algorithm)\n cesLandLabor <- cesEst( \"qOutput\", c( \"land\", \"qLabor\" ), germanFarms )\n\n # variable returns to scale, increased max. number of iter. (LM algorithm)\n cesLandLaborVrs <- cesEst( \"qOutput\", c( \"land\", \"qLabor\" ), germanFarms,\n vrs = TRUE, control = nls.lm.control( maxiter = 1000 ) )\n\n # using the Nelder-Mead optimization method\n cesLandLaborNm <- cesEst( \"qOutput\", c( \"land\", \"qLabor\" ), germanFarms,\n method = \"NM\" )\n\n # using the BFGS optimization method\n cesLandLaborBfgs <- cesEst( \"qOutput\", c( \"land\", \"qLabor\" ), germanFarms,\n method = \"BFGS\" )\n\n # using the L-BFGS-B optimization method with constrained parameters\n cesLandLaborBfgsCon <- cesEst( \"qOutput\", c( \"land\", \"qLabor\" ),\n germanFarms, method = \"L-BFGS-B\" )\n\n # using the CG optimization method\n cesLandLaborSann <- cesEst( \"qOutput\", c( \"land\", \"qLabor\" ), germanFarms,\n method = \"CG\" )\n\n # using the SANN optimization method\n # (with decreased number of iteration to decrease execution time)\n cesLandLaborSann <- cesEst( \"qOutput\", c( \"land\", \"qLabor\" ), germanFarms,\n method = \"SANN\", control = list( maxit = 1000 ) )\n\n # using the Kmenta approximation\n cesLandLaborKmenta <- cesEst( \"qOutput\", c( \"land\", \"qLabor\" ), germanFarms,\n method = \"Kmenta\" )\n\n # using the PORT optimization routine with unconstrained parameters\n cesLandLaborPortCon <- cesEst( \"qOutput\", c( \"land\", \"qLabor\" ),\n germanFarms, vrs = TRUE, method = \"PORT\", lower = -Inf, upper = Inf )\n\n # using the PORT optimization routine with constrained parameters and VRS\n cesLandLaborPortCon <- cesEst( \"qOutput\", c( \"land\", \"qLabor\" ),\n germanFarms, vrs = TRUE, method = \"PORT\" )\n\n # using the Differential Evolution optimization method\n # (with decreased number of iteration to decrease execution time)\n cesLandLaborDe <- cesEst( \"qOutput\", c( \"land\", \"qLabor\" ), germanFarms,\n method = \"DE\", control = DEoptim.control( itermax = 50 ) )\n\n ## estimation with a grid search for rho (using the LM algorithm)\n cesLandInt <- cesEst( \"qOutput\", c( \"land\", \"qLabor\" ),\n data = germanFarms, rho = seq( from = -0.6, to = 0.9, by = 0.3 ) )\n\n\n"} {"package":"micEconCES","topic":"durbinWatsonTest.cesEst","snippet":"### Name: durbinWatsonTest.cesEst\n### Title: Durbin Watson Test for Estimated CES Functions\n### Aliases: durbinWatsonTest.cesEst dwt.cesEst\n### Keywords: models regression nonlinear\n\n### ** Examples\n\n data( germanFarms, package = \"micEcon\" )\n # output quantity:\n germanFarms$qOutput <- germanFarms$vOutput / germanFarms$pOutput\n # quantity of intermediate inputs\n germanFarms$qVarInput <- germanFarms$vVarInput / germanFarms$pVarInput\n\n ## CES: Land & Intermediate Inputs\n cesLandInt <- cesEst( yName = \"qOutput\",\n xNames = c( \"land\", \"qVarInput\" ), data = germanFarms,\n returnGrad = TRUE )\n\n # conduct the generalized Durbin-Watson test\n dwt( cesLandInt ) \n\n\n"} {"package":"micEconCES","topic":"plot.cesEst","snippet":"### Name: plot.cesEst\n### Title: Plot RSSs of a CES Function Estimated by Grid Search\n### Aliases: plot.cesEst\n### Keywords: models regression nonlinear\n\n### ** Examples\n\n data( germanFarms, package = \"micEcon\" )\n # output quantity:\n germanFarms$qOutput <- germanFarms$vOutput / germanFarms$pOutput\n # quantity of intermediate inputs\n germanFarms$qVarInput <- germanFarms$vVarInput / germanFarms$pVarInput\n\n ## CES: Land & Intermediate Inputs\n cesLandInt <- cesEst( yName = \"qOutput\",\n xNames = c( \"land\", \"qVarInput\" ), data = germanFarms,\n rho = seq( from = -0.6, to = 0.9, by = 0.3 ) )\n\n # plot the rhos against the sum of squared residuals\n plot( cesLandInt ) \n\n\n"} {"package":"micEconCES","topic":"summary.cesEst","snippet":"### Name: summary.cesEst\n### Title: Summarize Estimation of a CES Function\n### Aliases: summary.cesEst print.summary.cesEst\n### Keywords: models\n\n### ** Examples\n\n data( germanFarms, package = \"micEcon\" )\n # output quantity:\n germanFarms$qOutput <- germanFarms$vOutput / germanFarms$pOutput\n # quantity of intermediate inputs\n germanFarms$qVarInput <- germanFarms$vVarInput / germanFarms$pVarInput\n\n\n ## CES: Land & Labor\n cesLandLabor <- cesEst( \"qOutput\", c( \"land\", \"qLabor\" ), germanFarms )\n\n # print summary results\n summary( cesLandLabor )\n\n\n"} {"package":"bs4cards","topic":"cards","snippet":"### Name: cards\n### Title: Builds a deck of bootstrap cards\n### Aliases: cards\n\n### ** Examples\n\n## Not run: \n##D galleries %>%\n##D cards(\n##D title = long_name,\n##D text = blurb,\n##D image = image_url,\n##D link = gallery_url\n##D )\n## End(Not run)\n\n\n"} {"package":"mcradds","topic":"ESD_test","snippet":"### Name: ESD_test\n### Title: EDS Test for Outliers\n### Aliases: ESD_test\n\n### ** Examples\n\ndata(\"platelet\")\nres <- blandAltman(x = platelet$Comparative, y = platelet$Candidate)\nESD_test(x = res@stat$relative_diff)\n\n\n"} {"package":"mcradds","topic":"VCAinference","snippet":"### Name: VCAinference\n### Title: Inferential Statistics for VCA-Results\n### Aliases: VCAinference\n\n### ** Examples\n\ndata(glucose)\nfit <- anovaVCA(value ~ day / run, glucose)\nVCAinference(fit)\n\n\n"} {"package":"mcradds","topic":"anovaVCA","snippet":"### Name: anovaVCA\n### Title: ANOVA-Type Estimation of Variance Components for Random Models\n### Aliases: anovaVCA\n\n### ** Examples\n\ndata(glucose)\nanovaVCA(value ~ day / run, glucose)\n\n\n"} {"package":"mcradds","topic":"aucTest","snippet":"### Name: aucTest\n### Title: AUC Test for Paired Two-sample Measurements\n### Aliases: aucTest\n\n### ** Examples\n\ndata(\"ldlroc\")\n# H0 : Difference between areas = 0:\naucTest(x = ldlroc$LDL, y = ldlroc$OxLDL, response = ldlroc$Diagnosis)\n\n# H0 : Superiority margin <= 0.1:\naucTest(\n x = ldlroc$LDL, y = ldlroc$OxLDL, response = ldlroc$Diagnosis,\n method = \"superiority\", h0 = 0.1\n)\n\n# H0 : Non-inferiority margin <= -0.1:\naucTest(\n x = ldlroc$LDL, y = ldlroc$OxLDL, response = ldlroc$Diagnosis,\n method = \"non-inferiority\", h0 = -0.1\n)\n\n\n"} {"package":"mcradds","topic":"autoplot","snippet":"### Name: autoplot\n### Title: Generate a 'ggplot' for Bland-Altman Plot and Regression Plot\n### Aliases: autoplot autoplot,BAsummary-method autoplot,MCResult-method\n\n### ** Examples\n\n# Specify the type for difference plot\ndata(\"platelet\")\nobject <- blandAltman(x = platelet$Comparative, y = platelet$Candidate)\nautoplot(object)\nautoplot(object, type = \"relative\")\n\n# Set the addition parameters for `geom_point`\nautoplot(object,\n type = \"relative\",\n jitter = TRUE,\n fill = \"lightblue\",\n color = \"grey\",\n size = 2\n)\n\n# Set the color and line type for reference and limits of agreement lines\nautoplot(object,\n type = \"relative\",\n ref.line.params = list(col = \"red\", linetype = \"solid\"),\n loa.line.params = list(col = \"grey\", linetype = \"solid\")\n)\n\n# Set label color, size and digits\nautoplot(object,\n type = \"absolute\",\n ref.line.params = list(col = \"grey\"),\n loa.line.params = list(col = \"grey\"),\n label.digits = 2,\n label.params = list(col = \"grey\", size = 3, fontface = \"italic\")\n)\n\n# Add main title, X and Y axis titles, and adjust X ticks.\nautoplot(object,\n type = \"absolute\",\n x.nbreak = 6,\n main.title = \"Bland-Altman Plot\",\n x.title = \"Mean of Test and Reference Methods\",\n y.title = \"Reference - Test\"\n)\n# Using the default arguments for regression plot\ndata(\"platelet\")\nfit <- mcreg(\n x = platelet$Comparative, y = platelet$Candidate,\n method.reg = \"Deming\", method.ci = \"jackknife\"\n)\nautoplot(fit)\n\n# Only present the regression line and alter the color and shape.\nautoplot(fit,\n identity = FALSE,\n reg.params = list(col = \"grey\", linetype = \"dashed\"),\n legend.title = FALSE,\n legend.digits = 4\n)\n\n\n"} {"package":"mcradds","topic":"blandAltman","snippet":"### Name: blandAltman\n### Title: Calculate Statistics for Bland-Altman\n### Aliases: blandAltman\n\n### ** Examples\n\ndata(\"platelet\")\nblandAltman(x = platelet$Comparative, y = platelet$Candidate)\n\n# with sample id as input sid\nblandAltman(x = platelet$Comparative, y = platelet$Candidate, sid = platelet$Sample)\n\n# Specifiy the type for difference\nblandAltman(x = platelet$Comparative, y = platelet$Candidate, type1 = 1, type2 = 4)\n\n\n"} {"package":"mcradds","topic":"calcBias","snippet":"### Name: calcBias\n### Title: Systematical Bias Between Reference Method and Test Method\n### Aliases: calcBias\n\n### ** Examples\n\ndata(platelet)\nfit <- mcreg(\n x = platelet$Comparative, y = platelet$Candidate,\n method.reg = \"Deming\", method.ci = \"jackknife\"\n)\ncalcBias(fit, x.levels = c(30, 200))\ncalcBias(fit, x.levels = c(30, 200), type = \"proportional\")\ncalcBias(fit, x.levels = c(30, 200), type = \"proportional\", percent = FALSE)\n\n\n"} {"package":"mcradds","topic":"cat_with_newline","snippet":"### Name: cat_with_newline\n### Title: Concatenate and Print with Newline\n### Aliases: cat_with_newline\n\n### ** Examples\n\ncat_with_newline(\"hello\", \"world\")\n\n\n"} {"package":"mcradds","topic":"descfreq","snippet":"### Name: descfreq\n### Title: Summarize Frequency Counts and Percentages\n### Aliases: descfreq\n\n### ** Examples\n\ndata(adsl_sub)\n\n# Count the age group by treatment with 'xx (xx.x%)' format\nadsl_sub %>%\n descfreq(\n var = \"AGEGR1\",\n bygroup = \"TRTP\",\n format = \"xx (xx.x%)\"\n )\n\n# Count the race by treatment with 'xx (xx.xx)' format and replace NA with '0'\nadsl_sub %>%\n descfreq(\n var = \"RACE\",\n bygroup = \"TRTP\",\n format = \"xx (xx.xx)\",\n na_str = \"0\"\n )\n\n# Count the sex by treatment adding total column\nadsl_sub %>%\n descfreq(\n var = \"SEX\",\n bygroup = \"TRTP\",\n format = \"xx (xx.x%)\",\n addtot = TRUE\n )\n\n# Count multiple variables by treatment and sort category by corresponding factor levels\nadsl_sub %>%\n dplyr::mutate(\n AGEGR1 = factor(AGEGR1, levels = c(\"<65\", \"65-80\", \">80\")),\n SEX = factor(SEX, levels = c(\"M\", \"F\")),\n RACE = factor(RACE, levels = c(\n \"WHITE\", \"AMERICAN INDIAN OR ALASKA NATIVE\",\n \"BLACK OR AFRICAN AMERICAN\"\n ))\n ) %>%\n descfreq(\n var = c(\"AGEGR1\", \"SEX\", \"RACE\"),\n bygroup = \"TRTP\",\n format = \"xx (xx.x%)\",\n addtot = TRUE,\n na_str = \"0\"\n )\n\n\n"} {"package":"mcradds","topic":"descvar","snippet":"### Name: descvar\n### Title: Summarize Descriptive Statistics\n### Aliases: descvar\n\n### ** Examples\n\ndata(adsl_sub)\n\n# Compute the default statistics of AGE by TRTP group\nadsl_sub %>%\n descvar(\n var = \"AGE\",\n bygroup = \"TRTP\"\n )\n\n# Compute the specific statistics of BMI by TRTP group, adding total column\nadsl_sub %>%\n descvar(\n var = \"BMIBL\",\n bygroup = \"TRTP\",\n stats = c(\"N\", \"MEANSD\", \"MEDIAN\", \"RANGE\", \"IQR\"),\n addtot = TRUE\n )\n\n# Set extra decimal to define precision\nadsl_sub %>%\n descvar(\n var = \"BMIBL\",\n bygroup = \"TRTP\",\n stats = c(\"N\", \"MEANSD\", \"MEDIAN\", \"RANGE\", \"IQR\"),\n autodecimal = FALSE,\n decimal = 2,\n addtot = TRUE\n )\n\n# Set multiple variables together\nadsl_sub %>%\n descvar(\n var = c(\"AGE\", \"BMIBL\", \"HEIGHTBL\"),\n bygroup = \"TRTP\",\n stats = c(\"N\", \"MEANSD\", \"MEDIAN\", \"RANGE\", \"IQR\"),\n autodecimal = TRUE,\n addtot = TRUE\n )\n\n\n"} {"package":"mcradds","topic":"diagTab","snippet":"### Name: diagTab\n### Title: Creates Contingency Table\n### Aliases: diagTab\n\n### ** Examples\n\n# For qualitative performance with wide data structure\ndata(\"qualData\")\nqualData %>% diagTab(formula = ~ CandidateN + ComparativeN)\nqualData %>%\n diagTab(\n formula = ~ CandidateN + ComparativeN,\n levels = c(1, 0)\n )\n\n# For qualitative performance with long data structure\ndummy <- data.frame(\n id = c(\"1001\", \"1001\", \"1002\", \"1002\", \"1003\", \"1003\"),\n value = c(1, 0, 0, 0, 1, 1),\n type = c(\"Test\", \"Ref\", \"Test\", \"Ref\", \"Test\", \"Ref\")\n)\ndummy %>%\n diagTab(\n formula = type ~ value,\n bysort = \"id\",\n dimname = c(\"Test\", \"Ref\"),\n levels = c(1, 0)\n )\n\n# For Between-Reader precision performance\ndata(\"PDL1RP\")\nreader <- PDL1RP$btw_reader\nreader %>%\n diagTab(\n formula = Reader ~ Value,\n bysort = \"Sample\",\n levels = c(\"Positive\", \"Negative\"),\n rep = TRUE,\n across = \"Site\"\n )\n\n\n"} {"package":"mcradds","topic":"dixon_outlier","snippet":"### Name: dixon_outlier\n### Title: Detect Dixon Outlier\n### Aliases: dixon_outlier\n\n### ** Examples\n\nx <- c(13.6, 44.4, 45.9, 11.9, 41.9, 53.3, 44.7, 95.2, 44.1, 50.7, 45.2, 60.1, 89.1)\ndixon_outlier(x)\n\n\n"} {"package":"mcradds","topic":"esd.critical","snippet":"### Name: esd.critical\n### Title: Compute Critical Value for ESD Test\n### Aliases: esd.critical\n\n### ** Examples\n\nesd.critical(alpha = 0.05, N = 100, i = 1)\n\n\n"} {"package":"mcradds","topic":"getAccuracy","snippet":"### Name: getAccuracy\n### Title: Summary Method for 'MCTab' Objects\n### Aliases: getAccuracy getAccuracy,MCTab-method\n\n### ** Examples\n\n# For qualitative performance\ndata(\"qualData\")\ntb <- qualData %>%\n diagTab(\n formula = ~ CandidateN + ComparativeN,\n levels = c(1, 0)\n )\ngetAccuracy(tb, ref = \"r\")\ngetAccuracy(tb, ref = \"nr\", nr_ci = \"wilson\")\n\n# For Between-Reader precision performance\ndata(\"PDL1RP\")\nreader <- PDL1RP$btw_reader\ntb2 <- reader %>%\n diagTab(\n formula = Reader ~ Value,\n bysort = \"Sample\",\n levels = c(\"Positive\", \"Negative\"),\n rep = TRUE,\n across = \"Site\"\n )\ngetAccuracy(tb2, ref = \"bnr\")\ngetAccuracy(tb2, ref = \"bnr\", rng.seed = 12306)\n\n\n"} {"package":"mcradds","topic":"getCoefficients","snippet":"### Name: getCoefficients\n### Title: Get Regression Coefficients\n### Aliases: getCoefficients\n\n### ** Examples\n\ndata(platelet)\nfit <- mcreg(\n x = platelet$Comparative, y = platelet$Candidate,\n method.reg = \"Deming\", method.ci = \"jackknife\"\n)\ngetCoefficients(fit)\n\n\n"} {"package":"mcradds","topic":"getOutlier","snippet":"### Name: getOutlier\n### Title: Detect Outliers From 'BAsummary' Object\n### Aliases: getOutlier getOutlier,BAsummary-method\n\n### ** Examples\n\ndata(\"platelet\")\n# Using `blandAltman` function with default arguments\nba <- blandAltman(x = platelet$Comparative, y = platelet$Candidate)\ngetOutlier(ba, method = \"ESD\", difference = \"rel\")\n\n# Using sample id as input\nba2 <- blandAltman(x = platelet$Comparative, y = platelet$Candidate, sid = platelet$Sample)\ngetOutlier(ba2, method = \"ESD\", difference = \"rel\")\n\n# Using `blandAltman` function when the `tyep2` is 2 with `X vs. (Y-X)/X` difference\nba3 <- blandAltman(x = platelet$Comparative, y = platelet$Candidate, type2 = 4)\ngetOutlier(ba3, method = \"ESD\", difference = \"rel\")\n\n# Using \"4E\" as the method input\nba4 <- blandAltman(x = platelet$Comparative, y = platelet$Candidate)\ngetOutlier(ba4, method = \"4E\")\n\n\n"} {"package":"mcradds","topic":"h_difference","snippet":"### Name: h_difference\n### Title: Compute Difference for Bland-Altman\n### Aliases: h_difference\n\n### ** Examples\n\nh_difference(x = c(1.1, 1.2, 1.5), y = c(1.2, 1.3, 1.4), type = 5)\n\n\n"} {"package":"mcradds","topic":"h_factor","snippet":"### Name: h_factor\n### Title: Factor Variable Per Levels\n### Aliases: h_factor\n\n### ** Examples\n\ndf <- data.frame(a = c(\"aa\", \"a\", \"aa\"))\nh_factor(df, var = \"a\")\nh_factor(df, var = \"a\", levels = c(\"aa\", \"a\"))\n\n\n"} {"package":"mcradds","topic":"h_fmt_count_perc","snippet":"### Name: h_fmt_count_perc\n### Title: Format count and percent\n### Aliases: h_fmt_count_perc\n\n### ** Examples\n\nh_fmt_count_perc(cnt = c(5, 9, 12, 110, 0), format = \"xx\")\nh_fmt_count_perc(\n cnt = c(5, 9, 12, 110, 0),\n perc = c(0.0368, 0.0662, 0.0882, 0.8088, 0),\n format = \"xx (xx.x%)\"\n)\n\n\n"} {"package":"mcradds","topic":"h_fmt_est","snippet":"### Name: h_fmt_est\n### Title: Format and Concatenate to String\n### Aliases: h_fmt_est\n\n### ** Examples\n\nh_fmt_est(num1 = 3.14, num2 = 3.1415, width = c(4, 4))\n\n\n"} {"package":"mcradds","topic":"h_fmt_num","snippet":"### Name: h_fmt_num\n### Title: Format Numeric Data\n### Aliases: h_fmt_num\n\n### ** Examples\n\nh_fmt_num(pi * 10^(-2:2), digits = 2, width = 6)\n\n\n"} {"package":"mcradds","topic":"h_fmt_range","snippet":"### Name: h_fmt_range\n### Title: Format and Concatenate to Range\n### Aliases: h_fmt_range\n\n### ** Examples\n\nh_fmt_range(num1 = 3.14, num2 = 3.14, width = c(4, 4))\n\n\n"} {"package":"mcradds","topic":"h_summarize","snippet":"### Name: h_summarize\n### Title: Summarize Basic Statistics\n### Aliases: h_summarize\n\n### ** Examples\n\nh_summarize(1:50)\n\n\n"} {"package":"mcradds","topic":"mcreg","snippet":"### Name: mcreg\n### Title: Comparison of Two Measurement Methods Using Regression Analysis\n### Aliases: mcreg\n\n### ** Examples\n\ndata(platelet)\nfit <- mcreg(\n x = platelet$Comparative, y = platelet$Candidate,\n method.reg = \"Deming\", method.ci = \"jackknife\"\n)\nprintSummary(fit)\ngetCoefficients(fit)\n\n\n"} {"package":"mcradds","topic":"nonparRI","snippet":"### Name: nonparRI\n### Title: Nonparametric Method in Calculation of Reference Interval\n### Aliases: nonparRI\n\n### ** Examples\n\ndata(\"calcium\")\nx <- calcium$Value\nnonparRI(x)\n\n\n"} {"package":"mcradds","topic":"pearsonTest","snippet":"### Name: pearsonTest\n### Title: Hypothesis Test for Pearson Correlation Coefficient\n### Aliases: pearsonTest\n\n### ** Examples\n\nx <- c(44.4, 45.9, 41.9, 53.3, 44.7, 44.1, 50.7, 45.2, 60.1)\ny <- c(2.6, 3.1, 2.5, 5.0, 3.6, 4.0, 5.2, 2.8, 3.8)\npearsonTest(x, y, h0 = 0.5, alternative = \"greater\")\n\n\n"} {"package":"mcradds","topic":"printSummary","snippet":"### Name: printSummary\n### Title: Print Summary of a Regression Analysis\n### Aliases: printSummary\n\n### ** Examples\n\ndata(platelet)\nfit <- mcreg(\n x = platelet$Comparative, y = platelet$Candidate,\n method.reg = \"Deming\", method.ci = \"jackknife\"\n)\nprintSummary(fit)\n\n\n"} {"package":"mcradds","topic":"refInterval","snippet":"### Name: refInterval\n### Title: Calculate Reference Interval and Corresponding Confidence\n### Interval\n### Aliases: refInterval\n\n### ** Examples\n\ndata(\"calcium\")\nx <- calcium$Value\nrefInterval(x, RI_method = \"parametric\", CI_method = \"parametric\")\nrefInterval(x, RI_method = \"nonparametric\", CI_method = \"nonparametric\")\nrefInterval(x, RI_method = \"robust\", CI_method = \"boot\", R = 1000)\n\n\n"} {"package":"mcradds","topic":"robustRI","snippet":"### Name: robustRI\n### Title: Robust Method in Calculation of Reference Interval\n### Aliases: robustRI\n\n### ** Examples\n\n# This example data is taken from EP28A3 Appendix B. to ensure the result is in accordance.\nx <- c(8.9, 9.2, rep(9.4, 2), rep(9.5, 3), rep(9.6, 4), rep(9.7, 5), 9.8, rep(9.9, 2), 10.2)\nrobustRI(x)\n\n\n"} {"package":"mcradds","topic":"show,SampleSize-method","snippet":"### Name: show,SampleSize-method\n### Title: Show Method for Objects\n### Aliases: show,SampleSize-method show show,MCTab-method\n### show,BAsummary-method show,RefInt-method show,tpROC-method\n### show,Desc-method\n\n### ** Examples\n\n# Sample zie calculation\nsize_one_prop(p1 = 0.95, p0 = 0.9, alpha = 0.05, power = 0.8)\nsize_ci_corr(r = 0.9, lr = 0.85, alpha = 0.025, alternative = \"greater\")\n\n# Get 2x2 Contingency Table\nqualData %>% diagTab(formula = ~ CandidateN + ComparativeN)\n\n# Bland-Altman analysis\ndata(\"platelet\")\nblandAltman(x = platelet$Comparative, y = platelet$Candidate)\n\n# Reference Interval\ndata(\"calcium\")\nrefInterval(x = calcium$Value, RI_method = \"nonparametric\", CI_method = \"nonparametric\")\n\n# Comparing the Paired ROC when Non-inferiority margin <= -0.1\ndata(\"ldlroc\")\naucTest(\n x = ldlroc$LDL, y = ldlroc$OxLDL, response = ldlroc$Diagnosis,\n method = \"non-inferiority\", h0 = -0.1\n)\ndata(adsl_sub)\n\n# Count multiple variables by treatment\nadsl_sub %>%\n descfreq(\n var = c(\"AGEGR1\", \"SEX\", \"RACE\"),\n bygroup = \"TRTP\",\n format = \"xx (xx.x%)\",\n addtot = TRUE,\n na_str = \"0\"\n )\n\n# Summarize multiple variables by treatment\nadsl_sub %>%\n descvar(\n var = c(\"AGE\", \"BMIBL\", \"HEIGHTBL\"),\n bygroup = \"TRTP\",\n stats = c(\"N\", \"MEANSD\", \"MEDIAN\", \"RANGE\", \"IQR\"),\n autodecimal = TRUE,\n addtot = TRUE\n )\n\n\n"} {"package":"mcradds","topic":"size_ci_corr","snippet":"### Name: size_ci_corr\n### Title: Sample Size for Testing Confidence Interval of Pearson's\n### correlation\n### Aliases: size_ci_corr\n\n### ** Examples\n\nsize_ci_corr(r = 0.9, lr = 0.85, alpha = 0.025, alternative = \"greater\")\n\n\n"} {"package":"mcradds","topic":"size_ci_one_prop","snippet":"### Name: size_ci_one_prop\n### Title: Sample Size for Testing Confidence Interval of One Proportion\n### Aliases: size_ci_one_prop\n\n### ** Examples\n\nsize_ci_one_prop(p = 0.85, lr = 0.8, alpha = 0.05, method = \"wilson\")\nsize_ci_one_prop(p = 0.85, lr = 0.8, alpha = 0.05, method = \"simple-asymptotic\")\nsize_ci_one_prop(p = 0.85, lr = 0.8, alpha = 0.05, method = \"wald\")\n\n\n"} {"package":"mcradds","topic":"size_corr","snippet":"### Name: size_corr\n### Title: Sample Size for Testing Pearson's correlation\n### Aliases: size_corr\n\n### ** Examples\n\nsize_corr(r1 = 0.95, r0 = 0.9, alpha = 0.025, power = 0.8, alternative = \"greater\")\n\n\n"} {"package":"mcradds","topic":"size_one_prop","snippet":"### Name: size_one_prop\n### Title: Sample Size for Testing One Proportion\n### Aliases: size_one_prop\n\n### ** Examples\n\nsize_one_prop(p1 = 0.95, p0 = 0.9, alpha = 0.05, power = 0.8)\n\n\n"} {"package":"mcradds","topic":"spearmanTest","snippet":"### Name: spearmanTest\n### Title: Hypothesis Test for Spearman Correlation Coefficient\n### Aliases: spearmanTest\n\n### ** Examples\n\nx <- c(44.4, 45.9, 41.9, 53.3, 44.7, 44.1, 50.7, 45.2, 60.1)\ny <- c(2.6, 3.1, 2.5, 5.0, 3.6, 4.0, 5.2, 2.8, 3.8)\nspearmanTest(x, y, h0 = 0.5, alternative = \"greater\")\n\n\n"} {"package":"mcradds","topic":"tukey_outlier","snippet":"### Name: tukey_outlier\n### Title: Detect Tukey Outlier\n### Aliases: tukey_outlier\n\n### ** Examples\n\nx <- c(13.6, 44.4, 45.9, 14.9, 41.9, 53.3, 44.7, 95.2, 44.1, 50.7, 45.2, 60.1, 89.1)\ntukey_outlier(x)\n\n\n"} {"package":"gamboostLSS","topic":"as.families","snippet":"### Name: as.families\n### Title: Include 'gamlss' families in the boosting framework of\n### 'gamboostLSS'\n### Aliases: as.families gamlss.Families gamlss1parMu gamlss2parMu\n### gamlss2parSigma gamlss3parMu gamlss3parSigma gamlss3parNu\n### gamlss4parMu gamlss4parSigma gamlss4parNu gamlss4parTau\n### Keywords: models distributions\n\n### ** Examples\n\n## simulate small example\nset.seed(123)\nx <- runif(1000)\n\ny <- rnorm(mean = 2 + 3 * x, # effect on mu\n sd = exp( 1 - 1 * x ), # effect on sigma\n n = 1000)\n\n## boosting\nglmss <- glmboostLSS(y ~ x, families = as.families(\"NO\"))\n## the same:\nif (require(\"gamlss.dist\")) {\n glmss <- glmboostLSS(y ~ x, families = as.families(NO))\n glmss <- glmboostLSS(y ~ x, families = as.families(NO()))\n}\n\ncoef(glmss, off2int = TRUE)\n\n## compare to gamlss\nlibrary(gamlss)\nglmss2 <- gamlss(y ~ x, sigma.formula = ~x, family = \"NO\")\ncoef(glmss2)\nglmss2$sigma.coef\n\n\n\n"} {"package":"gamboostLSS","topic":"cvrisk","snippet":"### Name: cvrisk.mboostLSS\n### Title: Cross-Validation\n### Aliases: cvrisk cvrisk.mboostLSS cvrisk.nc_mboostLSS make.grid\n### plot.cvriskLSS plot.nc_cvriskLSS\n### Keywords: models regression\n\n### ** Examples\n\n## Data generating process:\nset.seed(1907)\nx1 <- rnorm(1000)\nx2 <- rnorm(1000)\nx3 <- rnorm(1000)\nx4 <- rnorm(1000)\nx5 <- rnorm(1000)\nx6 <- rnorm(1000)\nmu <- exp(1.5 +1 * x1 +0.5 * x2 -0.5 * x3 -1 * x4)\nsigma <- exp(-0.4 * x3 -0.2 * x4 +0.2 * x5 +0.4 * x6)\ny <- numeric(1000)\nfor( i in 1:1000)\n y[i] <- rnbinom(1, size = sigma[i], mu = mu[i])\ndat <- data.frame(x1, x2, x3, x4, x5, x6, y)\n\n## linear model with y ~ . for both components: 100 boosting iterations\nmodel <- glmboostLSS(y ~ ., families = NBinomialLSS(), data = dat,\n control = boost_control(mstop = 100),\n center = TRUE)\n\n## set up a grid\ngrid <- make.grid(mstop(model), length.out = 5, dense_mu_grid = FALSE)\nplot(grid)\n\n## No test: \n### Do not test the following code per default on CRAN as it takes some time to run:\n### a tiny toy example (5-fold bootsrap with maximum stopping value 100)\n## (to run it on multiple cores of a Linux or Mac OS computer remove\n## set papply = mclapply (default) and set mc.nodes to the\n## appropriate number of nodes)\ncvr <- cvrisk(model, folds = cv(model.weights(model), B = 5),\n papply = lapply, grid = grid)\ncvr\n## plot the results\npar(mfrow = c(1, 2))\nplot(cvr)\nplot(cvr, type = \"lines\")\n## extract optimal mstop (here: grid to small)\nmstop(cvr)\n### END (don't test automatically)\n## End(No test)\n\n## No test: \n### Do not test the following code per default on CRAN as it takes some time to run:\n### a more realistic example\ngrid <- make.grid(c(mu = 400, sigma = 400), dense_mu_grid = FALSE)\nplot(grid)\ncvr <- cvrisk(model, grid = grid)\nmstop(cvr)\n## set model to optimal values:\nmstop(model) <- mstop(cvr)\n### END (don't test automatically)\n## End(No test)\n\n### Other grids:\nplot(make.grid(mstop(model), length.out = 3, dense_mu_grid = FALSE))\nplot(make.grid(c(mu = 400, sigma = 400), log = FALSE, dense_mu_grid = FALSE))\nplot(make.grid(c(mu = 400, sigma = 400), length.out = 4,\n min = 100, log = FALSE, dense_mu_grid = FALSE))\n\n\n### Now use dense mu grids\n# standard grid\nplot(make.grid(c(mu = 100, sigma = 100), dense = FALSE),\n pch = 20, col = \"red\")\n# dense grid for all mstop_mu values greater than mstop_sigma\ngrid <- make.grid(c(mu = 100, sigma = 100))\npoints(grid, pch = 20, cex = 0.2)\nabline(0,1)\n\n# now with three parameters\ngrid <- make.grid(c(mu = 100, sigma = 100, df = 30),\n length.out = c(5, 5, 2), dense = FALSE)\ndensegrid <- make.grid(c(mu = 100, sigma = 100, df = 30),\n length.out = c(5, 5, 2))\npar(mfrow = c(1,2))\n# first for df = 1\nplot(grid[grid$df == 1, 1:2], main = \"df = 1\", pch = 20, col = \"red\")\nabline(0,1)\nabline(v = 1)\n# now expand grid for all mu values greater the corresponding sigma\n# value (i.e. below the bisecting line) and above df (i.e. 1)\npoints(densegrid[densegrid$df == 1, 1:2], pch = 20, cex = 0.2)\n\n# now for df = 30\nplot(grid[grid$df == 30, 1:2], main = \"df = 30\", pch = 20, col = \"red\")\nabline(0,1)\nabline(v = 30)\n# now expand grid for all mu values greater the corresponding sigma\n# value (i.e. below the bisecting line) and above df (i.e. 30)\npoints(densegrid[densegrid$df == 30, 1:2], pch = 20, cex = 0.2)\n\n\n"} {"package":"gamboostLSS","topic":"Families","snippet":"### Name: Families\n### Title: Families for GAMLSS models\n### Aliases: Families families GaussianLSS GaussianMu GaussianSigma\n### GammaLSS GammaMu GammaSigma BetaLSS BetaMu BetaPhi NBinomialLSS\n### NBinomialMu NBinomialSigma StudentTLSS StudentTMu StudentTSigma\n### StudentTDf LogNormalLSS LogNormalMu LogNormalSigma WeibullLSS\n### WeibullMu WeibullSigma LogLogLSS LogLogMu LogLogSigma ZIPoLSS ZINBLSS\n### options stab_ngrad stabilize_ngrad stabilize_ngradient\n### Keywords: models distributions\n\n### ** Examples\n\n## Example to define a new distribution:\n## Students t-distribution with two parameters, df and mu:\n\n## sub-Family for mu\n## -> generate object of the class family from the package mboost\nnewStudentTMu <- function(mu, df){\n\n # loss is negative log-Likelihood, f is the parameter to be fitted with\n # id link -> f = mu\n loss <- function(df, y, f) {\n -1 * (lgamma((df + 1)/2) - lgamma(1/2) -\n lgamma(df/2) - 0.5 * log(df) -\n (df + 1)/2 * log(1 + (y - f)^2/(df )))\n }\n # risk is sum of loss\n risk <- function(y, f, w = 1) {\n sum(w * loss(y = y, f = f, df = df))\n }\n # ngradient is the negative derivate w.r.t. mu (=f)\n ngradient <- function(y, f, w = 1) {\n (df + 1) * (y - f)/(df + (y - f)^2)\n }\n\n # use the Family constructor of mboost\n mboost::Family(ngradient = ngradient, risk = risk, loss = loss,\n response = function(f) f,\n name = \"new Student's t-distribution: mu (id link)\")\n}\n\n## sub-Family for df\nnewStudentTDf <- function(mu, df){\n\n # loss is negative log-Likelihood, f is the parameter to be fitted with\n # log-link: exp(f) = df\n loss <- function( mu, y, f) {\n -1 * (lgamma((exp(f) + 1)/2) - lgamma(1/2) -\n lgamma(exp(f)/2) - 0.5 * f -\n (exp(f) + 1)/2 * log(1 + (y - mu)^2/(exp(f) )))\n }\n # risk is sum of loss\n risk <- function(y, f, w = 1) {\n sum(w * loss(y = y, f = f, mu = mu))\n }\n # ngradient is the negative derivate of the loss w.r.t. f\n # in this case, just the derivative of the log-likelihood \n ngradient <- function(y, f, w = 1) {\n exp(f)/2 * (digamma((exp(f) + 1)/2) - digamma(exp(f)/2)) -\n 0.5 - (exp(f)/2 * log(1 + (y - mu)^2 / (exp(f) )) -\n (y - mu)^2 / (1 + (y - mu)^2 / exp(f)) * (exp(-f) + 1)/2)\n }\n # use the Family constructor of mboost\n mboost::Family(ngradient = ngradient, risk = risk, loss = loss,\n response = function(f) exp(f),\n name = \"Student's t-distribution: df (log link)\")\n}\n\n## families object for new distribution\nnewStudentT <- Families(mu= newStudentTMu(mu=mu, df=df),\n df=newStudentTDf(mu=mu, df=df))\n\n## No test: \n### Do not test the following code per default on CRAN as it takes some time to run:\n### usage of the new Student's t distribution:\nlibrary(gamlss) ## required for rTF\nset.seed(1907)\nn <- 5000\nx1 <- runif(n)\nx2 <- runif(n)\nmu <- 2 -1*x1 - 3*x2\ndf <- exp(1 + 0.5*x1 )\ny <- rTF(n = n, mu = mu, nu = df)\n\n## model fitting\nmodel <- glmboostLSS(y ~ x1 + x2, families = newStudentT,\n control = boost_control(mstop = 100),\n center = TRUE)\n## shrinked effect estimates\ncoef(model, off2int = TRUE)\n\n## compare to pre-defined three parametric t-distribution:\nmodel2 <- glmboostLSS(y ~ x1 + x2, families = StudentTLSS(),\n control = boost_control(mstop = 100),\n center = TRUE)\ncoef(model2, off2int = TRUE)\n\n## with effect on sigma:\nsigma <- 3+ 1*x2\ny <- rTF(n = n, mu = mu, nu = df, sigma=sigma)\nmodel3 <- glmboostLSS(y ~ x1 + x2, families = StudentTLSS(),\n control = boost_control(mstop = 100),\n center = TRUE)\ncoef(model3, off2int = TRUE)\n## End(No test)\n\n\n\n"} {"package":"gamboostLSS","topic":"gamboostLSS-package","snippet":"### Name: gamboostLSS-package\n### Title: Boosting algorithms for GAMLSS\n### Aliases: gamboostLSS-package\n### Keywords: package\n\n### ** Examples\n\n# Generate covariates\nx1 <- runif(100)\nx2 <- runif(100)\neta_mu <- 2 - 2*x1\neta_sigma <- -1 + 2*x2\n\n# Generate response: Negative Binomial Distribution\ny <- numeric(100)\nfor( i in 1:100) y[i] <- rnbinom(1, size=exp(eta_sigma[i]), mu=exp(eta_mu[i]))\n\n# Model fitting, 300 boosting steps, same formula for both distribution parameters\nmod1 <- glmboostLSS( y ~ x1 + x2, families=NBinomialLSS(),\n control=boost_control(mstop=300), center = TRUE)\n\n# Shrinked effect estimates\ncoef(mod1, off2int=TRUE)\n\n# Empirical risk with respect to mu\nplot(risk(mod1)$mu)\n\n# Empirical risk with respect to sigma\nplot(risk(mod1)$sigma)\n\n\n"} {"package":"gamboostLSS","topic":"india","snippet":"### Name: india\n### Title: Malnutrition of Children in India (DHS, 1998-99)\n### Aliases: india india.bnd\n### Keywords: datasets\n\n### ** Examples\n\nif (require(\"BayesX\")) {\n ## plot distribution of stunting in India\n drawmap(india, map = india.bnd, regionvar = \"mcdist\", plotvar = \"stunting\")\n}\n\n\n"} {"package":"gamboostLSS","topic":"mboostLSS","snippet":"### Name: mboostLSS\n### Title: Fitting GAMLSS by Boosting\n### Aliases: mboostLSS blackboostLSS glmboostLSS gamboostLSS mboostLSS_fit\n### Keywords: models nonlinear fitting\n\n### ** Examples\n\n\n### Data generating process:\nset.seed(1907)\nx1 <- rnorm(1000)\nx2 <- rnorm(1000)\nx3 <- rnorm(1000)\nx4 <- rnorm(1000)\nx5 <- rnorm(1000)\nx6 <- rnorm(1000)\nmu <- exp(1.5 +1 * x1 +0.5 * x2 -0.5 * x3 -1 * x4)\nsigma <- exp(-0.4 * x3 -0.2 * x4 +0.2 * x5 +0.4 * x6)\ny <- numeric(1000)\nfor( i in 1:1000)\n y[i] <- rnbinom(1, size = sigma[i], mu = mu[i])\ndat <- data.frame(x1, x2, x3, x4, x5, x6, y)\n\n### linear model with y ~ . for both components: 400 boosting iterations\nmodel <- glmboostLSS(y ~ ., families = NBinomialLSS(), data = dat,\n control = boost_control(mstop = 400),\n center = TRUE)\ncoef(model, off2int = TRUE)\n\n\n### estimate model with different formulas for mu and sigma:\nnames(NBinomialLSS()) # names of the family\n\n## No test: \n### Do not test the following code per default on CRAN as it takes some time to run:\n# Note: Multiple formulas must be specified via a _named list_\n# where the names correspond to the names of the distribution parameters\n# in the family (see above)\nmodel2 <- glmboostLSS(formula = list(mu = y ~ x1 + x2 + x3 + x4,\n sigma = y ~ x3 + x4 + x5 + x6),\n families = NBinomialLSS(), data = dat,\n control = boost_control(mstop = 400, trace = TRUE),\n center = TRUE)\ncoef(model2, off2int = TRUE)\n### END (don't test automatically)\n## End(No test)\n\n\n### Offset needs to be specified via the arguments of families object:\nmodel <- glmboostLSS(y ~ ., data = dat,\n families = NBinomialLSS(mu = mean(mu),\n sigma = mean(sigma)),\n control = boost_control(mstop = 10),\n center = TRUE)\n# Note: mu-offset = log(mean(mu)) and sigma-offset = log(mean(sigma))\n# as we use a log-link in both families\ncoef(model)\nlog(mean(mu))\nlog(mean(sigma))\n\n## No test: \n### Do not test the following code per default on CRAN as it takes some time to run:\n### use different mstop values for the two distribution parameters\n### (two-dimensional early stopping)\n### the number of iterations is passed to boost_control via a named list\nmodel3 <- glmboostLSS(formula = list(mu = y ~ x1 + x2 + x3 + x4,\n sigma = y ~ x3 + x4 + x5 + x6),\n families = NBinomialLSS(), data = dat,\n control = boost_control(mstop = list(mu = 400,\n sigma = 300),\n trace = TRUE),\n center = TRUE)\ncoef(model3, off2int = TRUE)\n\n### Alternatively we can change mstop of model2:\n# here it is assumed that the first element in the vector corresponds to\n# the first distribution parameter of model2 etc.\nmstop(model2) <- c(400, 300)\npar(mfrow = c(1,2))\nplot(model2, xlim = c(0, max(mstop(model2))))\n## all.equal(coef(model2), coef(model3)) # same!\n### END (don't test automatically)\n## End(No test)\n\n\n"} {"package":"gamboostLSS","topic":"print.mboostLSS","snippet":"### Name: methods\n### Title: Methods for mboostLSS\n### Aliases: print.mboostLSS summary.mboostLSS coef.mboostLSS\n### coef.glmboostLSS risk risk.mboostLSS risk.nc_mboostLSS [.mboostLSS\n### mstop.mboostLSS mstop.oobag mstop.cvriskLSS selected\n### selected.mboostLSS fitted.mboostLSS predict.mboostLSS predint PI\n### plot.glmboostLSS plot.gamboostLSS plot.predint update.mboostLSS\n### model.weights model.weights.default model.weights.mboostLSS\n### Keywords: methods\n\n### ** Examples\n\n\n### generate data\nset.seed(1907)\nx1 <- rnorm(1000)\nx2 <- rnorm(1000)\nx3 <- rnorm(1000)\nx4 <- rnorm(1000)\nx5 <- rnorm(1000)\nx6 <- rnorm(1000)\nmu <- exp(1.5 + x1^2 +0.5 * x2 - 3 * sin(x3) -1 * x4)\nsigma <- exp(-0.2 * x4 +0.2 * x5 +0.4 * x6)\ny <- numeric(1000)\nfor( i in 1:1000)\n y[i] <- rnbinom(1, size = sigma[i], mu = mu[i])\ndat <- data.frame(x1, x2, x3, x4, x5, x6, y)\n\n### fit a model\nmodel <- gamboostLSS(y ~ ., families = NBinomialLSS(), data = dat,\n control = boost_control(mstop = 100))\n\n## No test: \n### Do not test the following line per default on CRAN as it takes some time to run:\n### use a model with more iterations for a better fit\nmstop(model) <- 400\n## End(No test)\n### extract coefficients\ncoef(model)\n\n### only for distribution parameter mu\ncoef(model, parameter = \"mu\")\n\n### only for covariate x1\ncoef(model, which = \"x1\")\n\n\n### plot complete model\npar(mfrow = c(4, 3))\nplot(model)\n### plot first parameter only\npar(mfrow = c(2, 3))\nplot(model, parameter = \"mu\")\n### now plot only effect of x3 of both parameters\npar(mfrow = c(1, 2))\nplot(model, which = \"x3\")\n### first component second parameter (sigma)\npar(mfrow = c(1, 1))\nplot(model, which = 1, parameter = 2)\n\n## No test: \n### Do not test the following code per default on CRAN as it takes some time to run:\n### plot marginal prediction interval\npi <- predint(model, pi = 0.9, which = \"x1\")\npi <- predint(model, pi = c(0.8, 0.9), which = \"x1\")\nplot(pi, log = \"y\") # warning as some y values are below 0\n## here it would be better to plot x1 against\n## sqrt(y) and sqrt(pi)\n\n### set model to mstop = 300 (one-dimensional)\nmstop(model) <- 300\n### END (don't test automatically)\n## End(No test)\n\npar(mfrow = c(2, 2))\nplot(risk(model, parameter = \"mu\")[[1]])\nplot(risk(model, parameter = \"sigma\")[[1]])\n\n## No test: \n### Do not test the following code per default on CRAN as it takes some time to run:\n### get back to orignal fit\nmstop(model) <- 400\nplot(risk(model, parameter = \"mu\")[[1]])\nplot(risk(model, parameter = \"sigma\")[[1]])\n\n### use different mstop values for the components\nmstop(model) <- c(100, 200)\n## same as\n mstop(model) <- c(mu = 100, sigma = 200)\n## or\n mstop(model) <- list(mu = 100, sigma = 200)\n## or\n mstop(model) <- list(100, 200)\n\nplot(risk(model, parameter = \"mu\")[[1]])\nplot(risk(model, parameter = \"sigma\")[[1]])\n### END (don't test automatically)\n## End(No test)\n\n\n"} {"package":"gamboostLSS","topic":"stabsel.mboostLSS","snippet":"### Name: stabsel\n### Title: Stability Selection\n### Aliases: stabsel.mboostLSS selected.stabsel_mboostLSS\n### Keywords: nonparametric\n\n### ** Examples\n\n\n### Data generating process:\nset.seed(1907)\nx1 <- rnorm(500)\nx2 <- rnorm(500)\nx3 <- rnorm(500)\nx4 <- rnorm(500)\nx5 <- rnorm(500)\nx6 <- rnorm(500)\nmu <- exp(1.5 +1 * x1 +0.5 * x2 -0.5 * x3 -1 * x4)\nsigma <- exp(-0.4 * x3 -0.2 * x4 +0.2 * x5 +0.4 * x6)\ny <- numeric(500)\nfor( i in 1:500)\n y[i] <- rnbinom(1, size = sigma[i], mu = mu[i])\ndat <- data.frame(x1, x2, x3, x4, x5, x6, y)\n\n### linear model with y ~ . for both components: 400 boosting iterations\nmodel <- glmboostLSS(y ~ ., families = NBinomialLSS(), data = dat,\n control = boost_control(mstop = 400),\n center = TRUE, method = \"noncyclic\")\n\n## No test: \n### Do not test the following code per default on CRAN as it takes some time to run:\n\n#run stability selection \n(s <- stabsel(model, q = 5, PFER = 1))\n#get selected effects\nselected(s)\n\n#visualize selection frequencies \nplot(s)\n\n### END (don't test automatically)\n## End(No test)\n\n\n"} {"package":"gamboostLSS","topic":"weighted.median","snippet":"### Name: weighted.median\n### Title: Weighted Median\n### Aliases: weighted.median\n### Keywords: methods\n\n### ** Examples\n\n\n## compute the weighted median with case weights\nx <- c(1, 2, 3, 4)\nw <- c(0, 1, 2, 3)\nweighted.median(x, w)\n\n## compute the weighted median with arbitrary weights\nx <- rnorm(100)\nw <- runif(100)\nweighted.median(x, w)\n\n\n\n"} {"package":"coat","topic":"batest","snippet":"### Name: batest\n### Title: Bland-Altman Test of Method Agreement\n### Aliases: batest print.batest plot.batest\n\n### ** Examples\n\n## Don't show: \n if(!requireNamespace(\"MethComp\")) {\n if(interactive() || is.na(Sys.getenv(\"_R_CHECK_PACKAGE_NAME_\", NA))) {\n stop(\"the MethComp package is required for this example but is not installed\")\n } else q() }\n## End(Don't show)\n## package and data (reshaped to wide format)\nlibrary(\"coat\")\ndata(\"VitCap\", package = \"MethComp\")\nVitCap_wide <- reshape(VitCap, v.names = \"y\", timevar = \"instrument\",\n idvar = c(\"item\", \"user\"), drop = \"meth\", direction = \"wide\")\n\n## two-sample BA-test\ntestresult <- batest(y.St + y.Exp ~ user, data = VitCap_wide)\n\n## display\ntestresult\nprint(testresult, digits = 1, type = \"both\")\nplot(testresult)\n\n\n\n"} {"package":"coat","topic":"print.coat","snippet":"### Name: print.coat\n### Title: Methods for Conditional Method Agreement Trees (COAT)\n### Aliases: print.coat coef.coat plot.coat node_baplot autoplot.coat\n\n### ** Examples\n\n## Don't show: \n if(!requireNamespace(\"MethComp\")) {\n if(interactive() || is.na(Sys.getenv(\"_R_CHECK_PACKAGE_NAME_\", NA))) {\n stop(\"the MethComp package is required for this example but is not installed\")\n } else q() }\n## End(Don't show)\n## package and data (reshaped to wide format)\nlibrary(\"coat\")\ndata(\"scint\", package = \"MethComp\")\nscint_wide <- reshape(scint, v.names = \"y\", timevar = \"meth\", idvar = \"item\", direction = \"wide\")\n\n## conditional method agreement tree\ntr <- coat(y.DTPA + y.DMSA ~ age + sex, data = scint_wide)\n\n## illustration of methods (including some customization)\n\n## printing\nprint(tr)\nprint(tr, header = FALSE, footer = FALSE)\n\n## extracting Bland-Altman parameters\ncoef(tr)\ncoef(tr, node = 1)\n\n## visualization (via grid with node_baplot)\nplot(tr)\nplot(tr, ip_args = list(id = FALSE),\n tp_args = list(col = \"slategray\", id = FALSE, digits = 3, pch = 19))\n\n## visualization (via ggplot2 with ggparty)\nlibrary(\"ggplot2\")\nautoplot(tr)\nautoplot(tr, digits = 3) + ggtitle(\"Conditional method agreement tree\") +\n theme(plot.title = element_text(hjust = 0.5))\n\n\n"} {"package":"coat","topic":"coat","snippet":"### Name: coat\n### Title: Conditional Method Agreement Trees (COAT)\n### Aliases: coat\n\n### ** Examples\n\n## Don't show: \n if(!requireNamespace(\"MethComp\")) {\n if(interactive() || is.na(Sys.getenv(\"_R_CHECK_PACKAGE_NAME_\", NA))) {\n stop(\"the MethComp package is required for this example but is not installed\")\n } else q() }\n## End(Don't show)\n## package and data (reshaped to wide format)\nlibrary(\"coat\")\ndata(\"scint\", package = \"MethComp\")\nscint_wide <- reshape(scint, v.names = \"y\", timevar = \"meth\", idvar = \"item\", direction = \"wide\")\n\n## coat based on ctree() without and with mean values of paired measurements as predictor\ntr1 <- coat(y.DTPA + y.DMSA ~ age + sex, data = scint_wide)\ntr2 <- coat(y.DTPA + y.DMSA ~ age + sex, data = scint_wide, means = TRUE)\n\n## display\nprint(tr1)\nplot(tr1)\n\nprint(tr2)\nplot(tr2)\n\n## tweak various graphical arguments of the panel function (just for illustration):\n## different colors, nonparametric bootstrap percentile confidence intervals, ...\nplot(tr1, tp_args = list(\n xscale = c(0, 150), linecol = \"deeppink\",\n confint = TRUE, B = 250, cilevel = 0.5, cicol = \"gold\"\n))\n\n\n"} {"package":"coat","topic":"diffs","snippet":"### Name: diffs\n### Title: Convenience Functions for Bland-Altman Analysis\n### Aliases: diffs means\n\n### ** Examples\n\n## pair of measurements\ny1 <- 1:4\ny2 <- c(2, 2, 1, 3)\n\n## differences and means\ndiffs(y1, y2)\nmeans(y1, y2)\n\n\n"} {"package":"karaoke","topic":"karaoke","snippet":"### Name: karaoke\n### Title: Remove vocals from a song\n### Aliases: karaoke\n### Keywords: karaoke\n\n### ** Examples\n\n#We use some '.wav' samples included in the seewave package to construct an example.\ndata(orni)\ndata(peewit)\ndata(tico)\n#Extend all the samples to at be least 20 seconds long.\nfor(loop in 1:5) orni<-pastew(orni,orni,f=22050, output=\"Wave\")\nfor(loop in 1:5) peewit<-pastew(peewit,peewit,f=22050, output=\"Wave\")\nfor(loop in 1:5) tico<-pastew(tico,tico,f=22050, output=\"Wave\")\n#Cut all samples down to exactly 20 seconds long.\norni<-cutw(orni,f=22050,from=0,to=20, output=\"Wave\")\ntico<-cutw(tico,f=22050,from=0,to=20, output=\"Wave\")\npeewit<-cutw(peewit,f=22050,from=0,to=20, output=\"Wave\")\n#Construct and write the example song to a '.wav' file. The tweeting bird, \"tico\", will be the \n#\"vocalist\", mixed to center. The \"rhythm section\" will be \"orni\" and \"peewit\", not mixed to \n#center. (Usually in stereo song recordings, the vocals are most mixed to center, allowing the \n#trick employed by the \"karaoke\" function to work.)\nleft<-normalize(tico+orni)\nright<-normalize(tico+peewit)\nwobj<-stereo(left,right)\nslot(wobj,\"pcm\")<-FALSE\nwriteWave(wobj, filename=\"song.wav\")\n#Make the karaoke version: the vocal from \"tico\" is removed.\nkaraoke(infile=\"song.wav\", outfile=\"song4karaoke.wav\")\n\n\n"} {"package":"GPIC","topic":"df2idx","snippet":"### Name: df2idx\n### Title: Compute GPIC for Multiple Groups\n### Aliases: df2idx\n\n### ** Examples\n\ndf2idx(vnomath)\ndf2idx(vnomath, c(61, 477, 836, 1007), \"n\")\ndf2idx(vnomath, c(0.026, 0.200, 0.351, 0.423), \"p\")\n\n\n"} {"package":"GPIC","topic":"n2p","snippet":"### Name: n2p\n### Title: Calculate Proportions\n### Aliases: n2p\n\n### ** Examples\n\nn2p(c(61, 477, 836, 1007))\n\n\n"} {"package":"GPIC","topic":"vec2idx","snippet":"### Name: vec2idx\n### Title: Compute GPIC for Single Group\n### Aliases: vec2idx\n\n### ** Examples\n\nvec2idx(c(3, 19, 34, 22), c(61, 477, 836, 1007), \"n\")\nvec2idx(c(3, 19, 34, 22), c(0.026, 0.200, 0.351, 0.423), \"p\")\n\n\n"} {"package":"cbass","topic":"augment.X","snippet":"### Name: augment.X\n### Title: Augment X for missing data approach for MNAR\n### Aliases: augment.X\n\n### ** Examples\n\nset.seed(1)\nn <- 100\nX <- matrix(runif(n*2, 0, 1), ncol=2)\nX[sample(1:length(X), round(.1*length(X)))] <- NA\nX.new <- augment.X(X)\nsum(is.na(X.new))\n\n\n"} {"package":"cbass","topic":"fit.cbass","snippet":"### Name: fit.cbass\n### Title: Fit CBASS model using reversible jump MCMC\n### Aliases: fit.cbass\n\n### ** Examples\n\nset.seed(1)\nn <- 100; d <- 3\nX <- matrix(runif(n*2, 0, 1), ncol=2)\nmu <- scale(X)\nbound <- qnorm(1/d^(1/(d-1)))\nmu <- cbind(bound, mu)\nz <- mu\nz[,-1] <- rnorm(length(mu[,-1]), mu[,-1], 1)\ny <- apply(z, 1, which.max)\nmod <- fit.cbass(X, y, max.int=1, max.basis=10, nmcmc=1e3, nburn=500, nthin=10)\npred.chain <- pred.cbass(mod, X)\nmu.hat <- apply(pred.chain, 2:3, mean)\nmean(abs(mu - mu.hat))\nplot(c(mu), c(mu.hat))\n\n\n"} {"package":"cbass","topic":"p.mu","snippet":"### Name: p.mu\n### Title: Predict vector of probabilities from vector of latent means\n### Aliases: p.mu\n\n### ** Examples\n\nset.seed(1)\nmu <- rnorm(5)\np.mu(mu)\n\n\n"} {"package":"cbass","topic":"pred.cbass","snippet":"### Name: pred.cbass\n### Title: Generate chain of latent normal random variables for a given X,\n### for values saved in 'mod'\n### Aliases: pred.cbass\n\n### ** Examples\n\nset.seed(1)\nn <- 100; d <- 3\nX <- matrix(runif(n*2, 0, 1), ncol=2)\nmu <- scale(X)\nbound <- qnorm(1/d^(1/(d-1)))\nmu <- cbind(bound, mu)\nz <- mu\nz[,-1] <- rnorm(length(mu[,-1]), mu[,-1], 1)\ny <- apply(z, 1, which.max)\nmod <- fit.cbass(X, y, max.int=1, max.basis=10, nmcmc=1e3, nburn=500, nthin=10)\npred.chain <- pred.cbass(mod, X)\nmu.hat <- apply(pred.chain, 2:3, mean)\nround(p.mu(mu.hat[1,]), 3)\n\n\n"} {"package":"cbass","topic":"sample.z","snippet":"### Name: sample.z\n### Title: Draw samples of independent normals (matrix) given previous\n### sample, and maximal values\n### Aliases: sample.z\n\n### ** Examples\n\nset.seed(1)\nn <- 100; d <- 3\nmu <- matrix(rnorm(n*d), n, d)\nbound <- qnorm(1/d^(1/(d-1)))\nmu[,1] <- bound\nz <- mu\nz[,-1] <- rnorm(length(mu[,-1]), mu[,-1], 1)\ny <- apply(z, 1, which.max)\nz.new <- sample.z(mu, y, z)\nall(apply(z.new, 1, which.max) == y)\n\n\n"} {"package":"estprod","topic":"levinsohn_petrin","snippet":"### Name: levinsohn_petrin\n### Title: Levinsohn-Petrin Estimation of Production Functions\n### Aliases: levinsohn_petrin\n\n### ** Examples\n\ndata(estprod_data)\nlevinsohn_petrin(data = estprod_data, var1 ~ var2 | var3 | var4, \nexit = ~exit, id = \"id\", time = \"year\", bootstrap = TRUE)\n\n\n"} {"package":"estprod","topic":"olley_pakes","snippet":"### Name: olley_pakes\n### Title: Olley-Pakes Estimation of Production Functions\n### Aliases: olley_pakes\n\n### ** Examples\n\ndata(estprod_data)\nolley_pakes(data = estprod_data, var1 ~ var2 | var3 | var4, \nexit = ~exit, id = \"id\", time = \"year\", bootstrap = TRUE)\n\n\n"} {"package":"estprod","topic":"wooldridge","snippet":"### Name: wooldridge\n### Title: Wooldridge Estimation of Production Functions (Cobb-Douglas)\n### Aliases: wooldridge\n\n### ** Examples\n\ndata(estprod_data)\nwooldridge(data = estprod_data, var1 ~ var2 | var3 | var4, \nid = \"id\", time = \"year\", bootstrap = TRUE)\n\n\n"} {"package":"intRegGOF","topic":"anovarIntReg","snippet":"### Name: anovarIntReg\n### Title: Integrated Regression Goodness of Fit\n### Aliases: anovarIntReg print.anovarIntReg\n### Keywords: models regression nonlinear goodnes of fit\n\n### ** Examples\n\n n <- 50\n d <- data.frame( X1=runif(n),X2=runif(n))\n d$Y <- 1 - 2*d$X1 - 5*d$X2 + rnorm(n,sd=.125)\n a0 <- lm(Y~1,d) \n a1 <- lm(Y~X1,d) \n a2 <- lm(Y~X1+X2,d) \n anovarIntReg(a0,a1,a2,B=50) \n anovarIntReg(a0,a1,a2,B=50,INCREMENTAL=TRUE) \n\n\n"} {"package":"intRegGOF","topic":"intRegGOF","snippet":"### Name: intRegGOF\n### Title: Integrated Regression Goodness of Fit\n### Aliases: intRegGOF print.intRegGOF\n### Keywords: models regression nonlinear goodnes of fit\n\n### ** Examples\n\nn <- 50\nd <- data.frame( X1=runif(n),X2=runif(n))\nd$Y <- 1 + 2*d$X1 + rnorm(n,sd=.125)\nplot( d ) \nintRegGOF(lm(Y~X1+X2,d),B=99)\nintRegGOF(a <- lm(Y~X1-1,d),B=99) \nintRegGOF(a,c(\"X1\",\"X2\"),B=99) \nintRegGOF(a,~X2+X1,B=99) \n\n\n"} {"package":"intRegGOF","topic":"plotAsIntRegGOF","snippet":"### Name: plot\n### Title: Integrated Regression Goodness of Fit graphical output\n### Aliases: plotAsIntRegGOF pointsAsIntRegGOF linesAsIntRegGOF\n### Keywords: models regression nonlinear goodnes of fit\n\n### ** Examples\n\n n <- 50\n d <- data.frame( X1=runif(n),X2=runif(n))\n d$Y <- 1 + 2*d$X1 + rnorm(n,sd=.125)\n par(ask=TRUE)\n plot( d ) \n plotAsIntRegGOF(lm(Y~X1+X2,d),covar=\"X1\") \n plotAsIntRegGOF(a <- lm(Y~X1-1,d)) \n plotAsIntRegGOF(a,c(\"X1\")) \n plotAsIntRegGOF(a,0) \n plotAsIntRegGOF(a,fitted(a)) \n par(ask=FALSE)\n\n\n"} {"package":"TESS","topic":"cettiidae","snippet":"### Name: cettiidae\n### Title: Cettiidae phylogeny from Alstroem et al. (2011)\n### Aliases: cettiidae\n### Keywords: datasets\n\n### ** Examples\n\n# load the data\ndata(cettiidae)\n\n# safe the old plotting settings\nop <- par()\n\n# set the new plotting settings\npar(cex = 0.3)\n\n# plot the phylogeny\nplot(cettiidae)\n\n# restore the plotting settings\npar(op)\n\n\n"} {"package":"TESS","topic":"conifers","snippet":"### Name: conifers\n### Title: Conifer phylogeny from Leslie et al. (2012)\n### Aliases: conifers\n### Keywords: datasets\n\n### ** Examples\n\n# load the tree\ndata(conifers)\n\n# safe the settings of the plotting device\nop <- par()\n\n# set the line width for plotting the branches\npar(cex = 0.3)\n\n# plot the phylogenetic tree\nplot(conifers)\n\n# restore the settings of the device\npar(op)\n\n\n"} {"package":"TESS","topic":"mammalia","snippet":"### Name: mammalia\n### Title: Dated family level mammalian phylogeny from Meredith et al.\n### (2011): Impacts of the cretaceous terrestrial revolution and kpg\n### extinction on mammal diversification.\n### Aliases: mammalia\n### Keywords: datasets\n\n### ** Examples\n\n# load the data\ndata(mammalia)\n\n# safe the current settings of the plotting device\nop <- par()\n\n# set the line width for drawing thinner lines for the branches\npar(cex = 0.3)\n\n# plot the mammalian phylogeny\nplot(mammalia)\n\n# restore the settings of the device\npar(op)\n\n\n"} {"package":"TESS","topic":"tess.PosteriorPrediction","snippet":"### Name: tess.PosteriorPrediction\n### Title: tess.PosteriorPrediction: Approximation of the posterior\n### predictive distribution.\n### Aliases: tess.PosteriorPrediction\n### Keywords: htest\n\n### ** Examples\n\n# We first run an MCMC to obtain samples from the posterior distribution \n# and then simulate the posterior predictive distribution.\n\n# The bird phylogeny as the test data set\ndata(cettiidae)\ntimes <- as.numeric( branching.times(cettiidae) )\n\n# The log-likelihood function\nlikelihood <- function(params) {\n # We use the parameters as diversification rate and turnover rate.\n # Thus we need to transform first\n b <- params[1] + params[2]\n d <- params[2]\n \n lnl <- tess.likelihood(times,b,d,samplingProbability=1.0,log=TRUE)\n return (lnl)\n}\n\nprior_diversification <- function(x) { dexp(x,rate=0.1,log=TRUE) }\nprior_turnover <- function(x) { dexp(x,rate=0.1,log=TRUE) }\npriors <- c(prior_diversification,prior_turnover)\n\n# Note, the number of iterations and the burnin is too small here \n# and should be adapted for real analyses\nsamples <- tess.mcmc(likelihood,priors,c(1,0.1),c(TRUE,TRUE),c(0.1,0.1),10,10)\n\ntmrca <- max(branching.times(cettiidae))\n# The simulation function\nsim <- function(params) {\n # We use the parameters as diversification rate and turnover rate.\n # Thus we need to transform first\n b <- params[1] + params[2]\n d <- params[2]\n \n tree <- tess.sim.age(n=1,age=tmrca,b,d,samplingProbability=1.0)[[1]]\n return (tree)\n}\n\ntrees <- tess.PosteriorPrediction(sim,samples)\n\n# compute the posterior predictive test statistic\nppt <- tess.PosteriorPredictiveTest(trees,cettiidae,gammaStat)\n# get the p-value of the observed test-statistic\nppt[[2]]\n\n\n\n"} {"package":"TESS","topic":"tess.PosteriorPredictiveTest","snippet":"### Name: tess.PosteriorPredictiveTest\n### Title: tess.PosteriorPredictiveTest: Approximation of the posterior\n### predictive distribution.\n### Aliases: tess.PosteriorPredictiveTest\n### Keywords: htest\n\n### ** Examples\n\n# We first run an MCMC to obtain samples from the posterior distribution\n# and then simulate the posterior predictive distribution.\n\n# The bird phylogeny as the test data set\ndata(cettiidae)\ntimes <- as.numeric( branching.times(cettiidae) )\n\n# The log-likelihood function\nlikelihood <- function(params) {\n # We use the parameters as diversification rate and turnover rate.\n # Thus we need to transform first\n b <- params[1] + params[2]\n d <- params[2]\n \n lnl <- tess.likelihood(times,b,d,samplingProbability=1.0,log=TRUE)\n return (lnl)\n}\n\nprior_diversification <- function(x) { dexp(x,rate=0.1,log=TRUE) }\nprior_turnover <- function(x) { dexp(x,rate=0.1,log=TRUE) }\npriors <- c(prior_diversification,prior_turnover)\n\n# Note, the number of iterations and the burnin is too small here\n# and should be adapted for real analyses\nsamples <- tess.mcmc(likelihood,priors,c(1,0.1),c(TRUE,TRUE),c(0.1,0.1),10,10)\n\ntmrca <- max(branching.times(cettiidae))\n# The simulation function\nsim <- function(params) {\n # We use the parameters as diversification rate and turnover rate.\n # Thus we need to transform first\n b <- params[1] + params[2]\n d <- params[2]\n \n # We need trees with at least three tips for the gamma-statistics\n repeat {\n tree <- tess.sim.age(n=1,age=tmrca,b,d,samplingProbability=1.0,MRCA=TRUE)[[1]]\n if (tree$Nnode > 1) break\n }\n return (tree)\n}\n\n# simulate trees from the posterior predictive distribution\ntrees <- tess.PosteriorPrediction(sim,samples)\n\n# compute the posterior predictive test statistic\nppt <- tess.PosteriorPredictiveTest(trees,cettiidae,gammaStat)\n# get the p-value of the observed test-statistic\nppt[[2]]\n\n\n\n"} {"package":"TESS","topic":"tess.analysis","snippet":"### Name: tess.analysis\n### Title: tess.analysis: Diversification rate estimation under an episodic\n### birth-death process including mass-extinction events.\n### Aliases: tess.analysis globalBiDe.analysis\n### Keywords: models htest\n\n### ** Examples\n\n# we load the conifers as the test data set\ndata(conifers)\n\n# for the conifers we know what the total number of species is\ntotal <- 630\n# thus, we can compute what the sampling fraction is\nrho <- (conifers$Nnode+1)/total\n\n\n# next, we specify the prior mean and standard deviation \n# for the speciation and extinction rate\nmu_lambda = 0.15\nstd_lambda = 0.02\nmu_mu = 0.09\nstd_mu = 0.02\n\n# now we can run the entire analysis.\n# note that a full analyses should be run much longer\ntess.analysis( tree=conifers,\n initialSpeciationRate=exp(mu_lambda),\n initialExtinctionRate=exp(mu_mu),\n empiricalHyperPriors = FALSE,\n speciationRatePriorMean = mu_lambda,\n speciationRatePriorStDev = std_lambda,\n extinctionRatePriorMean = mu_mu,\n extinctionRatePriorStDev = std_mu,\n numExpectedRateChanges = 2,\n samplingProbability = rho,\n numExpectedMassExtinctions = 2,\n BURNIN = 100,\n MAX_ITERATIONS = 200,\n THINNING = 10,\n dir = \"analysis_conifer\")\n \n# You may want to look into the vignette for a more detailed description\n# of the features for an analysis.\n# also have a look at the functions tess.process.output and tess.plot.output\n## Don't show: \n\tunlink(\"analysis_conifer\", recursive = TRUE)\n## End(Don't show)\n\n\n\n\n"} {"package":"TESS","topic":"tess.likelihood","snippet":"### Name: tess.likelihood\n### Title: tess.likelihood: Probability density of a tree under a tree-wide\n### time-dependent birth-death process\n### Aliases: tess.likelihood\n### Keywords: models htest\n\n### ** Examples\n\n# load a test data set\ndata(cettiidae)\n\n# convert the phylogeny into the branching times\ntimes <- as.numeric( branching.times(cettiidae) )\n\n# construct speciation and extinction rate function that resemble the rate-shift\n# any other function could be used too\nl <- Vectorize(function(x) { if (x > 0.5 || x < 0.3) { return (1) } else { return (2) } })\ne <- Vectorize(function(x) { if (x > 0.5 || x < 0.3) { return (0.95) } else { return (0.5) } })\n\n# now compute the likelihood for the tree\ntess.likelihood(times,l,e,MRCA=TRUE,log=TRUE)\n\n# a second approach is the specific episodic birth-death process likelihood function\n# we need to give the rates for each episode and the end time of the episodes\n# you should see that both are equivalent in this setting\n# the function approach is more general but also slower.\ntess.likelihood.rateshift(times,\n\t\t\t\tlambda=c(2,1,2),\n\t\t\t\tmu=c(0.95,0.5,0.95),\n\t\t\t\trateChangeTimesLambda=c(0.3,0.5),\n\t\t\t\trateChangeTimesMu=c(0.3,0.5),\n\t\t\t\tMRCA=TRUE,\n\t\t\t\tlog=TRUE)\n\n\n\n\n"} {"package":"TESS","topic":"tess.likelihood.rateshift","snippet":"### Name: tess.likelihood.rateshift\n### Title: tess.likelihood.rateshift: Probability density of a tree under a\n### tree-wide time-dependent birth-death-shift process\n### Aliases: tess.likelihood.rateshift\n### Keywords: models htest\n\n### ** Examples\n\n# load a test data set\ndata(cettiidae)\n\n# convert the phylogeny into the branching times\ntimes <- as.numeric( branching.times(cettiidae) )\n\n# construct speciation and extinction rate function that resemble the rate-shift\n# any other function could be used too\nl <- Vectorize(function(x) { if (x > 0.5 || x < 0.3) { return (1) } else { return (2) } })\ne <- Vectorize(function(x) { if (x > 0.5 || x < 0.3) { return (0.95) } else { return (0.5) } })\n\n# now compute the likelihood for the tree\ntess.likelihood(times,l,e,MRCA=TRUE,log=TRUE)\n\n# a second approach is the specific episodic birth-death process likelihood function\n# we need to give the rates for each episode and the end time of the episodes\n# you should see that both are equivalent in this setting\n# the function approach is more general but also slower.\ntess.likelihood.rateshift(times,\n\t\t\t\tlambda=c(2,1,2),\n\t\t\t\tmu=c(0.95,0.5,0.95),\n\t\t\t\trateChangeTimesLambda=c(0.3,0.5),\n\t\t\t\trateChangeTimesMu=c(0.3,0.5),\n\t\t\t\tMRCA=TRUE,\n\t\t\t\tlog=TRUE)\n\n\n\n\n"} {"package":"TESS","topic":"tess.mcmc","snippet":"### Name: tess.mcmc\n### Title: tess.mcmc: Markov chain Monte Carlo simulation using a general\n### Metropolis-Hastings algorithm.\n### Aliases: tess.mcmc\n### Keywords: htest\n\n### ** Examples\n\n# load in a test data set\ndata(cettiidae)\n\n# convert the phylogeny into the branching times\ntimes <- as.numeric( branching.times(cettiidae) )\n\n# specify a likelihood function that takes in a vector of parameters\nlikelihood <- function(params) {\n # We use the parameters as diversification rate and turnover rate.\n # Thus we need to transform first\n b <- params[1] + params[2]\n d <- params[2]\n \n lnl <- tess.likelihood(times,b,d,samplingProbability=1.0,log=TRUE)\n return (lnl)\n}\n\n# specify a the prior functions\nprior.diversification <- function(x) { dexp(x,rate=0.1,log=TRUE) }\nprior.turnover <- function(x) { dexp(x,rate=0.1,log=TRUE) }\npriors <- c(prior.diversification,prior.turnover)\n\n# Note, the number of iterations and the burnin is too small here\n# and should be adapted for real analyses\nsamples <- tess.mcmc( likelihood,\n\t\t priors,\n\t\t runif(2,0,1),\n\t\t logTransforms=c(TRUE,TRUE),\n\t\t delta=c(0.1,0.1),\n\t\t iterations=100,\n\t\t burnin=20)\n\n# now summarize and visualize the results\n#plot(samples)\nsummary(samples)\ncolMeans(samples)\n\n\n\n\n"} {"package":"TESS","topic":"tess.nTaxa.expected","snippet":"### Name: tess.nTaxa.expected\n### Title: tess.nTaxa.expected: The expected number of taxa at present of a\n### tree under a global, time-dependent birth-death process ( E[ N(T) ] )\n### Aliases: tess.nTaxa.expected\n### Keywords: models\n\n### ** Examples\n\n\n# create the time-dependent speciation and extinction rate functions\n# here we use episodic functions\nl <- Vectorize(function(x) { if (x > 0.5 || x < 0.3) { return (1) } else { return (2) } })\ne <- Vectorize(function(x) { if (x > 0.5 || x < 0.3) { return (0.95) } else { return (0.5) } })\n\n# now we can compute the expected number of taxa at time t\n# note that we compute here the actual diversity at time t\n# if you set reconstructed=TRUE, then you get the expected\n# number of lineages that will survive until the present\ntess.nTaxa.expected(begin=0,t=2,end=5,l,e,MRCA=TRUE)\n\n\n\n\n"} {"package":"TESS","topic":"tess.pathSampling","snippet":"### Name: tess.pathSampling\n### Title: tess.pathSampling: Marginal likelihood estimation via\n### Path-Sampling.\n### Aliases: tess.pathSampling\n### Keywords: htest\n\n### ** Examples\n\n# load a test data set\ndata(cettiidae)\n# convert the phylogeny into the branching times\ntimes <- as.numeric( branching.times(cettiidae) )\n\n# construct a likelihood function taking in a vector of parameters\nlikelihood <- function(params) {\n # We use the parameters as diversification rate and turnover rate.\n # Thus we need to transform first\n b <- params[1] + params[2]\n d <- params[2]\n \n lnl <- tess.likelihood(times,b,d,samplingProbability=1.0,log=TRUE)\n return (lnl)\n}\n\n# next, create the prior density functions\nprior_diversification <- function(x) { dexp(x,rate=0.1,log=TRUE) }\nprior_turnover <- function(x) { dexp(x,rate=0.1,log=TRUE) }\npriors <- c(prior_diversification,prior_turnover)\n\n# Note, the number of iterations, the burnin\n# and the number of stepping stones is too small here\n# and should be adapted for real analyses\nmarginalLikelihood <- tess.pathSampling( likelihood,\n\t\t\t\t\t\t priors,\n\t\t\t\t\t\t runif(2,0,1),\n\t\t\t\t\t\t c(TRUE,TRUE),\n\t\t\t\t\t\t 10,\n\t\t\t\t\t\t 10,\n\t\t\t\t\t\t K=4)\n\n\n\n\n"} {"package":"TESS","topic":"tess.plot.multichain.diagnostics","snippet":"### Name: tess.plot.multichain.diagnostics\n### Title: tess.plot.multichain.diagnostics: Plotting the mcmc diagnostics\n### of a episodic diversification rate analysis with mass-extinction\n### events.\n### Aliases: tess.plot.multichain.diagnostics\n### Keywords: models htest\n\n### ** Examples\n\n# Load the data, compute the sampling fraction rho\ndata(conifers)\ntotalConiferSpecies <- 630\nsampledConiferSpecies <- conifers$Nnode+1\nrho <- sampledConiferSpecies / totalConiferSpecies\n\n# Run a tess analysis\ntess.analysis(tree = conifers,\n initialSpeciationRate=c(1.0),\n initialExtinctionRate=c(0.5),\n empiricalHyperPriors = FALSE,\n numExpectedRateChanges = 2,\n numExpectedMassExtinctions = 2,\n samplingProbability = rho,\n MAX_ITERATIONS = 200,\n BURNIN = 100,\n dir = \"./run_1\")\n\ntess.analysis(tree = conifers,\n initialSpeciationRate=c(1.0),\n initialExtinctionRate=c(0.5),\n empiricalHyperPriors = FALSE,\n numExpectedRateChanges = 2,\n numExpectedMassExtinctions = 2,\n samplingProbability = rho,\n MAX_ITERATIONS = 200,\n BURNIN = 100,\n dir = \"./run_2\")\n\n# Process the output\nconiferOutput_1 <- tess.process.output(dir=\"./run_1\",\n numExpectedRateChanges=2,\n numExpectedMassExtinctions=2)\n\nconiferOutput_2 <- tess.process.output(dir=\"./run_2\",\n numExpectedRateChanges=2,\n numExpectedMassExtinctions=2)\n\n# Plot the output\noutputs <- list(coniferOutput_1,coniferOutput_2)\ntess.plot.multichain.diagnostics(outputs)\n## Don't show: \n\tunlink(\"run_1\", recursive = TRUE) \n\tunlink(\"run_2\", recursive = TRUE) \n## End(Don't show)\n\n\n"} {"package":"TESS","topic":"tess.plot.output","snippet":"### Name: tess.plot.output\n### Title: tess.plot.output: Plotting the output of a diversification rate\n### estimation including mass-extinction events.\n### Aliases: tess.plot.output\n### Keywords: models htest\n\n### ** Examples\n\n# Load the data, compute the sampling fraction rho\ndata(conifers)\ntotalConiferSpecies <- 630\nsampledConiferSpecies <- conifers$Nnode+1\nrho <- sampledConiferSpecies / totalConiferSpecies\n\n# Run a tess analysis\ntess.analysis(tree = conifers,\n initialSpeciationRate=c(1.0),\n initialExtinctionRate=c(0.5),\n empiricalHyperPriors = FALSE,\n numExpectedRateChanges = 2,\n numExpectedMassExtinctions = 2,\n samplingProbability = rho,\n MAX_ITERATIONS = 200,\n BURNIN = 100)\n\n# Process the output\nconiferOutput <- tess.process.output(dir=getwd(),\n numExpectedRateChanges=2,\n numExpectedMassExtinctions=2)\n\n# Plot the output\ntess.plot.output(coniferOutput)\n\n\n"} {"package":"TESS","topic":"tess.plot.singlechain.diagnostics","snippet":"### Name: tess.plot.singlechain.diagnostics\n### Title: tess.plot.mcmc.diagnostics: Plotting the single chain mcmc\n### diagnostics of a episodic diversification rate analysis with\n### mass-extinction events.\n### Aliases: tess.plot.singlechain.diagnostics\n### Keywords: models htest\n\n### ** Examples\n\n# Load the data, compute the sampling fraction rho\ndata(conifers)\ntotalConiferSpecies <- 630\nsampledConiferSpecies <- conifers$Nnode+1\nrho <- sampledConiferSpecies / totalConiferSpecies\n\n# Run a tess analysis\ntess.analysis(tree = conifers,\n initialSpeciationRate=c(1.0),\n initialExtinctionRate=c(0.5),\n empiricalHyperPriors = FALSE,\n numExpectedRateChanges = 2,\n numExpectedMassExtinctions = 2,\n samplingProbability = rho,\n MAX_ITERATIONS = 200,\n BURNIN = 100)\n\n# Process the output\nconiferOutput <- tess.process.output(dir=getwd(),\n numExpectedRateChanges=2,\n numExpectedMassExtinctions=2)\n\n# Plot the output\ntess.plot.singlechain.diagnostics(coniferOutput)\n\n\n"} {"package":"TESS","topic":"tess.process.output","snippet":"### Name: tess.process.output\n### Title: tess.process.output: Summarizing the output of a diversification\n### rate estimation including mass-extinction events. See the\n### tess.analysis function for more information on how such output is\n### generated and the tess.plot.output how the output can be visualized.\n### Also have a look at the vignette for more in detail description and\n### examples.\n### Aliases: tess.process.output globalBiDe.output.summary\n### Keywords: models htest\n\n### ** Examples\n\n# Load the data, compute the sampling fraction rho\ndata(conifers)\ntotalConiferSpecies <- 630\nsampledConiferSpecies <- conifers$Nnode+1\nrho <- sampledConiferSpecies / totalConiferSpecies\n\n# Run a tess analysis\ntess.analysis(tree = conifers,\n initialSpeciationRate=c(1.0),\n initialExtinctionRate=c(0.5),\n empiricalHyperPriors = FALSE,\n numExpectedRateChanges = 2,\n numExpectedMassExtinctions = 2,\n samplingProbability = rho,\n MAX_ITERATIONS = 200,\n BURNIN=100)\n\n# Process the output\nconiferOutput <- tess.process.output(dir=getwd(),\n numExpectedRateChanges=2,\n numExpectedMassExtinctions=2)\n\n# Plot the output\ntess.plot.output(coniferOutput)\n\n## Don't show: \n\tunlink(\"*.txt\")\n\tunlink(\"*.pdf\")\n\tunlink(\"*.tre\")\n## End(Don't show)\n\n\n"} {"package":"TESS","topic":"tess.sim.age","snippet":"### Name: tess.sim.age\n### Title: tess.sim.age: Simulate a reconstructed tree for a given age\n### under a global, time-dependent birth-death process.\n### Aliases: tess.sim.age\n### Keywords: datagen\n\n### ** Examples\n\n\nl <- Vectorize(function(x) { if (x > 0.5 || x < 0.3) { return (1) } else { return (2) } })\ne <- Vectorize(function(x) { if (x > 0.5 || x < 0.3) { return (0.95) } else { return (0.5) } })\n\ntess.sim.age(n=1,age=1,l,e,MRCA=TRUE)\n\n# simulation under constant rates\ntess.sim.age(n=1,age=1,2.0,1.0,MRCA=TRUE)\n\n\n\n"} {"package":"TESS","topic":"tess.sim.taxa","snippet":"### Name: tess.sim.taxa\n### Title: tess.sim.taxa.taxa: Simulate a reconstructed tree for a given\n### number of taxa under a global, time-dependent birth-death process.\n### Aliases: tess.sim.taxa\n### Keywords: datagen\n\n### ** Examples\n\n\nl <- Vectorize(function(x) { if (x > 0.5 || x < 0.3) { return (1) } else { return (2) } })\ne <- Vectorize(function(x) { if (x > 0.5 || x < 0.3) { return (0.95) } else { return (0.5) } })\n\ntess.sim.taxa(n=1,nTaxa=10,max=10,l,e,MRCA=TRUE)\n\n# simulation under constant rates\ntess.sim.taxa(n=1,nTaxa=10,max=10,2.0,1.0,MRCA=TRUE)\n\n\n\n"} {"package":"TESS","topic":"tess.sim.taxa.age","snippet":"### Name: tess.sim.taxa.age\n### Title: tess.sim.taxa.taxa.age: Simulate a reconstructed tree for a\n### given age and number of taxa under a global, time-dependent\n### birth-death process.\n### Aliases: tess.sim.taxa.age\n### Keywords: datagen\n\n### ** Examples\n\n\nl <- Vectorize(function(x) { if (x > 0.5 || x < 0.3) { return (1) } else { return (2) } })\ne <- Vectorize(function(x) { if (x > 0.5 || x < 0.3) { return (0.95) } else { return (0.5) } })\n\ntess.sim.taxa.age(n=1,l,e,nTaxa=10,age=1,MRCA=TRUE)\n\n# simulation under constant rates\ntess.sim.taxa.age(n=1,2.0,1.0,nTaxa=10,age=1,MRCA=TRUE)\n\n\n\n"} {"package":"TESS","topic":"tess.steppingStoneSampling","snippet":"### Name: tess.steppingStoneSampling\n### Title: tess.steppingStoneSampling: Marginal likelihood estimation via\n### Stepping-Stone-Sampling.\n### Aliases: tess.steppingStoneSampling\n### Keywords: htest\n\n### ** Examples\n\n\ndata(cettiidae)\ntimes <- as.numeric( branching.times(cettiidae) )\n\nlikelihood <- function(params) {\n # We use the parameters as diversification rate and turnover rate.\n # Thus we need to transform first\n b <- params[1] + params[2]\n d <- params[2]\n \n lnl <- tess.likelihood(times,b,d,samplingProbability=1.0,log=TRUE)\n return (lnl)\n}\n\nprior_diversification <- function(x) { dexp(x,rate=0.1,log=TRUE) }\nprior_turnover <- function(x) { dexp(x,rate=0.1,log=TRUE) }\npriors <- c(prior_diversification,prior_turnover)\n\n# Note, the number of iterations, the burnin\n# and the number of stepping stones is too small here\n# and should be adapted for real analyses\nmarginalLikelihood <- tess.steppingStoneSampling( likelihood,\n\t\t\t\t\t\t priors,\n\t\t\t\t\t\t runif(2,0,1),\n\t\t\t\t\t\t c(TRUE,TRUE),\n\t\t\t\t\t\t 10,\n\t\t\t\t\t\t 10,\n\t\t\t\t\t\t K=4)\n\n\n\n\n"} {"package":"BiasedUrn","topic":"BiasedUrn","snippet":"### Name: BiasedUrn-package\n### Title: Biased Urn Model Distributions\n### Aliases: BiasedUrn BiasedUrn-package\n### Keywords: package distribution univar multivariate\n\n### ** Examples\n\ndWNCHypergeo(12, 25, 32, 20, 2.5)\n\n\n"} {"package":"BiasedUrn","topic":"BiasedUrn-Univariate","snippet":"### Name: BiasedUrn-Univariate\n### Title: Biased urn models: Univariate distributions\n### Aliases: BiasedUrn-Univariate dWNCHypergeo dFNCHypergeo pWNCHypergeo\n### pFNCHypergeo qWNCHypergeo qFNCHypergeo rWNCHypergeo rFNCHypergeo\n### meanWNCHypergeo meanFNCHypergeo varWNCHypergeo varFNCHypergeo\n### modeWNCHypergeo modeFNCHypergeo oddsWNCHypergeo oddsFNCHypergeo\n### numWNCHypergeo numFNCHypergeo minHypergeo maxHypergeo\n### Keywords: distribution univar\n\n### ** Examples\n\n# get probability\ndWNCHypergeo(12, 25, 32, 20, 2.5)\n\n\n"} {"package":"BiasedUrn","topic":"BiasedUrn-Multivariate","snippet":"### Name: BiasedUrn-Multivariate\n### Title: Biased urn models: Multivariate distributions\n### Aliases: BiasedUrn-Multivariate dMWNCHypergeo dMFNCHypergeo\n### rMWNCHypergeo rMFNCHypergeo meanMWNCHypergeo meanMFNCHypergeo\n### varMWNCHypergeo varMFNCHypergeo momentsMWNCHypergeo\n### momentsMFNCHypergeo oddsMWNCHypergeo oddsMFNCHypergeo numMWNCHypergeo\n### numMFNCHypergeo minMHypergeo maxMHypergeo\n### Keywords: distribution univar multivariate\n\n### ** Examples\n\n# get probability\ndMWNCHypergeo(c(8,10,6), c(20,30,20), 24, c(1.,2.5,1.8))\n\n\n"} {"package":"mcp","topic":"criterion","snippet":"### Name: criterion\n### Title: Compute information criteria for model comparison\n### Aliases: criterion loo.mcpfit loo LOO waic.mcpfit waic WAIC\n\n### ** Examples\n\n## No test: \n# Define two models and sample them\n# options(mc.cores = 3) # Speed up sampling\nex = mcp_example(\"intercepts\") # Get some simulated data.\nmodel1 = list(y ~ 1 + x, ~ 1)\nmodel2 = list(y ~ 1 + x) # Without a change point\nfit1 = mcp(model1, ex$data)\nfit2 = mcp(model2, ex$data)\n\n# Compute LOO for each and compare (works for waic(fit) too)\nfit1$loo = loo(fit1)\nfit2$loo = loo(fit2)\nloo::loo_compare(fit1$loo, fit2$loo)\n## End(No test)\n\n\n\n"} {"package":"mcp","topic":"fitted.mcpfit","snippet":"### Name: fitted.mcpfit\n### Title: Expected Values from the Posterior Predictive Distribution\n### Aliases: fitted.mcpfit fitted\n\n### ** Examples\n\n## No test: \nfitted(demo_fit)\nfitted(demo_fit, probs = c(0.1, 0.5, 0.9)) # With median and 80% credible interval.\nfitted(demo_fit, summary = FALSE) # Samples instead of summary.\nfitted(demo_fit,\n newdata = data.frame(time = c(-5, 20, 300)), # New data\n probs = c(0.025, 0.5, 0.975))\n## End(No test)\n\n\n\n"} {"package":"mcp","topic":"get_segment_table","snippet":"### Name: get_segment_table\n### Title: Build a table describing a list of segments\n### Aliases: get_segment_table\n### Keywords: internal\n\n### ** Examples\n\nmodel = list(\n y ~ 1 + x,\n 1 + (1|id) ~ 1\n)\nget_segment_table(model)\n\n\n"} {"package":"mcp","topic":"mcp","snippet":"### Name: mcp\n### Title: Fit Multiple Linear Segments And Their Change Points\n### Aliases: mcp\n\n### ** Examples\n\n## No test: \n# Define the segments using formulas. A change point is estimated between each formula.\nmodel = list(\n response ~ 1, # Plateau in the first segment (int_1)\n ~ 0 + time, # Joined slope (time_2) at cp_1\n ~ 1 + time # Disjoined slope (int_3, time_3) at cp_2\n)\n\n# Fit it and sample the prior too.\n# options(mc.cores = 3) # Uncomment to speed up sampling\nex = mcp_example(\"demo\") # Simulated data example\ndemo_fit = mcp(model, data = ex$data, sample = \"both\")\n\n# See parameter estimates\nsummary(demo_fit)\n\n# Visual inspection of the results\nplot(demo_fit) # Visualization of model fit/predictions\nplot_pars(demo_fit) # Parameter distributions\npp_check(demo_fit) # Prior/Posterior predictive checks\n\n# Test a hypothesis\nhypothesis(demo_fit, \"cp_1 > 10\")\n\n# Make predictions\nfitted(demo_fit)\npredict(demo_fit)\npredict(demo_fit, newdata = data.frame(time = c(55.545, 80, 132)))\n\n# Compare to a one-intercept-only model (no change points) with default prior\nmodel_null = list(response ~ 1)\nfit_null = mcp(model_null, data = ex$data, par_x = \"time\") # fit another model here\ndemo_fit$loo = loo(demo_fit)\nfit_null$loo = loo(fit_null)\nloo::loo_compare(demo_fit$loo, fit_null$loo)\n\n# Inspect the prior. Useful for prior predictive checks.\nsummary(demo_fit, prior = TRUE)\nplot(demo_fit, prior = TRUE)\n\n# Show all priors. Default priors are added where you don't provide any\nprint(demo_fit$prior)\n\n# Set priors and re-run\nprior = list(\n int_1 = 15,\n time_2 = \"dt(0, 2, 1) T(0, )\", # t-dist slope. Truncated to positive.\n cp_2 = \"dunif(cp_1, 80)\", # change point to segment 2 > cp_1 and < 80.\n int_3 = \"int_1\" # Shared intercept between segment 1 and 3\n)\n\nfit3 = mcp(model, data = ex$data, prior = prior)\n\n# Show the JAGS model\ndemo_fit$jags_code\n## End(No test)\n\n\n\n"} {"package":"mcp","topic":"mcp_example","snippet":"### Name: mcp_example\n### Title: Get example models and data\n### Aliases: mcp_example\n\n### ** Examples\n\n## No test: \nex = mcp_example(\"demo\")\nplot(ex$data) # Plot data\nprint(ex$simulated) # See true parameters used to simulate\nprint(ex$call) # See how the data was simulated\n\n# Fit the model. Either...\nfit = mcp(ex$model, ex$data)\nplot(fit)\n\nex_with_fit = mcp_example(\"demo\", sample = TRUE)\nplot(ex_with_fit$fit)\n## End(No test)\n\n\n"} {"package":"mcp","topic":"plot.mcpfit","snippet":"### Name: plot.mcpfit\n### Title: Plot full fits\n### Aliases: plot.mcpfit plot\n\n### ** Examples\n\n# Typical usage. demo_fit is an mcpfit object.\nplot(demo_fit)\n## No test: \nplot(demo_fit, prior = TRUE) # The prior\n\nplot(demo_fit, lines = 0, q_fit = TRUE) # 95% HDI without lines\nplot(demo_fit, q_predict = c(0.1, 0.9)) # 80% prediction interval\nplot(demo_fit, which_y = \"sigma\", lines = 100) # The variance parameter on y\n\n# Show a panel for each varying effect\n# plot(fit, facet_by = \"my_column\")\n\n# Customize plots using regular ggplot2\nlibrary(ggplot2)\nplot(demo_fit) + theme_bw(15) + ggtitle(\"Great plot!\")\n## End(No test)\n\n\n\n"} {"package":"mcp","topic":"plot_pars","snippet":"### Name: plot_pars\n### Title: Plot individual parameters\n### Aliases: plot_pars\n\n### ** Examples\n\n# Typical usage. demo_fit is an mcpfit object.\nplot_pars(demo_fit)\n\n## Not run: \n##D # More options\n##D plot_pars(demo_fit, regex_pars = \"^cp_\") # Plot only change points\n##D plot_pars(demo_fit, pars = c(\"int_3\", \"time_3\")) # Plot these parameters\n##D plot_pars(demo_fit, type = c(\"trace\", \"violin\")) # Combine plots\n##D # Some plots only take pairs. hex is good to assess identifiability\n##D plot_pars(demo_fit, type = \"hex\", pars = c(\"cp_1\", \"time_2\"))\n##D \n##D # Visualize the priors:\n##D plot_pars(demo_fit, prior = TRUE)\n##D \n##D # Useful for varying effects:\n##D # plot_pars(my_fit, pars = \"varying\", ncol = 3) # plot all varying effects\n##D # plot_pars(my_fit, regex_pars = \"my_varying\", ncol = 3) # plot all levels of a particular varying\n##D \n##D # Customize multi-column ggplots using \"*\" instead of \"+\" (patchwork)\n##D library(ggplot2)\n##D plot_pars(demo_fit, type = c(\"trace\", \"dens_overlay\")) * theme_bw(10)\n## End(Not run)\n\n\n"} {"package":"mcp","topic":"pp_check","snippet":"### Name: pp_check\n### Title: Posterior Predictive Checks For Mcpfit Objects\n### Aliases: pp_check pp_check.mcpfit\n\n### ** Examples\n\n## No test: \npp_check(demo_fit)\npp_check(demo_fit, type = \"ecdf_overlay\")\n#pp_check(some_varying_fit, type = \"loo_intervals\", facet_by = \"id\")\n## End(No test)\n\n\n\n"} {"package":"mcp","topic":"predict.mcpfit","snippet":"### Name: predict.mcpfit\n### Title: Samples from the Posterior Predictive Distribution\n### Aliases: predict.mcpfit predict\n\n### ** Examples\n\n## No test: \npredict(demo_fit) # Evaluate at each demo_fit$data\npredict(demo_fit, probs = c(0.1, 0.5, 0.9)) # With median and 80% credible interval.\npredict(demo_fit, summary = FALSE) # Samples instead of summary.\npredict(\n demo_fit,\n newdata = data.frame(time = c(-5, 20, 300)), # Evaluate\n probs = c(0.025, 0.5, 0.975)\n)\n## End(No test)\n\n\n\n"} {"package":"mcp","topic":"print.mcptext","snippet":"### Name: print.mcptext\n### Title: Nice printing texts\n### Aliases: print.mcptext\n\n### ** Examples\n\nmytext = \"line1 = 2\\n line2 = 'horse'\"\nclass(mytext) = \"mcptext\"\nprint(mytext)\n\n\n"} {"package":"mcp","topic":"residuals.mcpfit","snippet":"### Name: residuals.mcpfit\n### Title: Compute Residuals From Mcpfit Objects\n### Aliases: residuals.mcpfit residuals resid resid.mcpfit\n\n### ** Examples\n\n## No test: \nresiduals(demo_fit)\nresiduals(demo_fit, probs = c(0.1, 0.5, 0.9)) # With median and 80% credible interval.\nresiduals(demo_fit, summary = FALSE) # Samples instead of summary.\n## End(No test)\n\n\n\n"} {"package":"mcp","topic":"summary.mcpfit","snippet":"### Name: summary.mcpfit\n### Title: Summarise mcpfit objects\n### Aliases: summary.mcpfit summary fixef fixef.mcpfit fixed.effects ranef\n### ranef.mcpfit random.effects print.mcpfit print\n\n### ** Examples\n\n# Typical usage\nsummary(demo_fit)\nsummary(demo_fit, width = 0.8, digits = 4) # Set HDI width\n\n# Get the results as a data frame\nresults = summary(demo_fit)\n\n# Varying (random) effects\n# ranef(my_fit)\n\n# Summarise prior\nsummary(demo_fit, prior = TRUE)\n\n\n\n"} {"package":"metaBMA","topic":"bma","snippet":"### Name: bma\n### Title: Bayesian Model Averaging\n### Aliases: bma\n\n### ** Examples\n\n## No test: \n# model averaging for fixed and random effects\ndata(towels)\nfixed <- meta_fixed(logOR, SE, study, towels)\nrandom <- meta_random(logOR, SE, study, towels)\n\naveraged <- bma(list(\"fixed\" = fixed, \"random\" = random))\naveraged\nplot_posterior(averaged)\nplot_forest(averaged, mar = c(4.5, 20, 4, .3))\n## End(No test)\n\n\n"} {"package":"metaBMA","topic":"facial_feedback","snippet":"### Name: facial_feedback\n### Title: Data Set: Facial Feedback\n### Aliases: facial_feedback\n### Keywords: datasets\n\n### ** Examples\n\ndata(facial_feedback)\nhead(facial_feedback)\nmf <- meta_fixed(d, SE, study, facial_feedback)\nmf\nplot_posterior(mf)\n\n\n"} {"package":"metaBMA","topic":"inclusion","snippet":"### Name: inclusion\n### Title: Inclusion Bayes Factor\n### Aliases: inclusion\n\n### ** Examples\n\n#### Example with simple Normal-distribution models\n# generate data:\nx <- rnorm(50)\n\n# Model 1: x ~ Normal(0,1)\nlogm1 <- sum(dnorm(x, log = TRUE))\n# Model 2: x ~ Normal(.2, 1)\nlogm2 <- sum(dnorm(x, mean = .2, log = TRUE))\n# Model 3: x ~ Student-t(df=2)\nlogm3 <- sum(dt(x, df = 2, log = TRUE))\n\n# BF: Correct (Model 1) vs. misspecified (2 & 3)\ninclusion(c(logm1, logm2, logm3), include = 1)\n\n\n"} {"package":"metaBMA","topic":"meta_bma","snippet":"### Name: meta_bma\n### Title: Model Averaging for Meta-Analysis\n### Aliases: meta_bma\n\n### ** Examples\n\n## No test: \n### Bayesian Model-Averaged Meta-Analysis (H1: d>0)\ndata(towels)\nset.seed(123)\nmb <- meta_bma(logOR, SE, study, towels,\n d = prior(\"norm\", c(mean = 0, sd = .3), lower = 0),\n tau = prior(\"invgamma\", c(shape = 1, scale = 0.15))\n)\nmb\nplot_posterior(mb, \"d\")\n## End(No test)\n\n\n"} {"package":"metaBMA","topic":"meta_default","snippet":"### Name: meta_default\n### Title: Defaults for Model Averaging in Meta-Analysis\n### Aliases: meta_default\n\n### ** Examples\n\n## No test: \ndata(towels)\nset.seed(123)\nmd <- meta_default(logOR, SE, study, towels,\n field = \"psychology\", effect = \"logOR\"\n)\nmd\nplot_forest(md)\n## End(No test)\n\n\n\n"} {"package":"metaBMA","topic":"meta_fixed","snippet":"### Name: meta_fixed\n### Title: Bayesian Fixed-Effects Meta-Analysis\n### Aliases: meta_fixed\n\n### ** Examples\n\n### Bayesian Fixed-Effects Meta-Analysis (H1: d>0)\ndata(towels)\nmf <- meta_fixed(logOR, SE, study,\n data = towels,\n d = prior(\"norm\", c(mean = 0, sd = .3), lower = 0)\n)\nmf\nplot_posterior(mf)\nplot_forest(mf)\n\n\n"} {"package":"metaBMA","topic":"meta_ordered","snippet":"### Name: meta_ordered\n### Title: Meta-Analysis with Order-Constrained Study Effects\n### Aliases: meta_ordered\n\n### ** Examples\n\n## No test: \n### Bayesian Meta-Analysis with Order Constraints (H1: d>0)\ndata(towels)\nset.seed(123)\nmo <- meta_ordered(logOR, SE, study, towels,\n d = prior(\"norm\", c(mean = 0, sd = .3), lower = 0)\n)\nmo\nplot_posterior(mo)\n## End(No test)\n\n\n"} {"package":"metaBMA","topic":"meta_random","snippet":"### Name: meta_random\n### Title: Bayesian Random-Effects Meta-Analysis\n### Aliases: meta_random\n\n### ** Examples\n\n## No test: \n### Bayesian Random-Effects Meta-Analysis (H1: d>0)\ndata(towels)\nset.seed(123)\nmr <- meta_random(logOR, SE, study,\n data = towels,\n d = prior(\"norm\", c(mean = 0, sd = .3), lower = 0),\n tau = prior(\"invgamma\", c(shape = 1, scale = 0.15))\n)\nmr\nplot_posterior(mr)\n## End(No test)\n\n\n"} {"package":"metaBMA","topic":"meta_sensitivity","snippet":"### Name: meta_sensitivity\n### Title: Sensitivity Analysis for Bayesian Meta-Analysis\n### Aliases: meta_sensitivity\n\n### ** Examples\n\n## No test: \ndata(towels)\nsensitivity <- meta_sensitivity(\n y = logOR, SE = SE, labels = study, data = towels,\n d_list = list(prior(\"cauchy\", c(0, .707)),\n prior(\"norm\", c(0, .5)),\n prior(\"norm\", c(.5, .3))),\n tau_list = list(prior(\"invgamma\", c(1, 0.15), label = \"tau\"),\n prior(\"gamma\", c(1.5, 3), label = \"tau\")),\n analysis = \"random\",\n combine_priors = \"crossed\")\n\nprint(sensitivity, digits = 2)\n\npar(mfrow = c(1,2))\nplot(sensitivity, \"d\", \"prior\")\nplot(sensitivity, \"d\", \"posterior\")\n\nplot(sensitivity, \"tau\", \"prior\")\nplot(sensitivity, \"tau\", \"posterior\")\n## End(No test)\n\n\n\n"} {"package":"metaBMA","topic":"plot.prior","snippet":"### Name: plot.prior\n### Title: Plot Prior Distribution\n### Aliases: plot.prior\n\n### ** Examples\n\np1 <- prior(\"t\", c(location = 0, scale = 0.707, nu = 1), 0, 3)\nplot(p1, 0, 2)\n\n# define custom prior pdf up to a constant:\np2 <- prior(\"custom\", function(x) x^.5, 0, .5)\nplot(p2)\n\n\n"} {"package":"metaBMA","topic":"plot_default","snippet":"### Name: plot_default\n### Title: Plot Default Priors\n### Aliases: plot_default\n\n### ** Examples\n\nplot_default(field = \"psychology\", effect = \"d\")\n\n\n"} {"package":"metaBMA","topic":"plot_forest","snippet":"### Name: plot_forest\n### Title: Forest Plot for Meta-Analysis\n### Aliases: plot_forest\n\n### ** Examples\n\ndata(towels)\nmf <- meta_fixed(logOR, SE, study, towels)\nplot_forest(mf, mar = c(4.5, 20, 4, .2), xlab = \"Log Odds Ratio\")\n\n\n"} {"package":"metaBMA","topic":"power_pose","snippet":"### Name: power_pose\n### Title: Data Set: Power Pose Effect\n### Aliases: power_pose power_pose_unfamiliar\n### Keywords: datasets\n\n### ** Examples\n\ndata(power_pose)\nhead(power_pose)\n\n# Simple fixed-effects meta-analysis\nmfix <- meta_fixed(effectSize, SE, study,\n data = power_pose\n)\nmfix\nplot_posterior(mfix)\n\n\n"} {"package":"metaBMA","topic":"prior","snippet":"### Name: prior\n### Title: Prior Distribution\n### Aliases: prior\n\n### ** Examples\n\n### Half-Normal Distribution\np1 <- prior(\"norm\", c(mean = 0, sd = .3), lower = 0)\np1\np1(c(-1, 1, 3))\nplot(p1, -.1, 1)\n\n### Half-Cauchy Distribution\np2 <- prior(\"cauchy\", c(location = 0, scale = .3), lower = 0)\nplot(p2, -.5, 3)\n\n### Custom Prior Distribution\np3 <- prior(\"custom\", function(x) x^2, 0, 1)\nplot(p3, -.1, 1.2)\n\n\n\n"} {"package":"metaBMA","topic":"towels","snippet":"### Name: towels\n### Title: Data Set: Reuse of Towels in Hotels\n### Aliases: towels\n### Keywords: datasets\n\n### ** Examples\n\ndata(towels)\nhead(towels)\n\n\n"} {"package":"metaBMA","topic":"transform_es","snippet":"### Name: transform_es\n### Title: Transformation of Effect Sizes\n### Aliases: transform_es\n\n### ** Examples\n\n# transform a single value of Cohen's\ntransform_es(y = 0.50, SE = 0.20, from = \"d\", to = \"logOR\")\n\n# towels data set:\ntransform_es(y = towels$logOR, SE = towels$SE, from = \"logOR\", to = \"d\")\n\n\n\n"} {"package":"CondIndTests","topic":"CondIndTest","snippet":"### Name: CondIndTest\n### Title: Wrapper function for conditional independence tests.\n### Aliases: CondIndTest\n\n### ** Examples\n\n\n# Example 1\nset.seed(1)\nn <- 100\nZ <- rnorm(n)\nX <- 4 + 2 * Z + rnorm(n)\nY <- 3 * X^2 + Z + rnorm(n)\ntest1 <- CondIndTest(X,Y,Z, method = \"KCI\")\ncat(\"These data come from a distribution, for which X and Y are NOT\ncond. ind. given Z.\")\ncat(paste(\"The p-value of the test is: \", test1$pvalue))\n\n# Example 2\nset.seed(1)\nZ <- rnorm(n)\nX <- 4 + 2 * Z + rnorm(n)\nY <- 3 + Z + rnorm(n)\ntest2 <- CondIndTest(X,Y,Z, method = \"KCI\")\ncat(\"The data come from a distribution, for which X and Y are cond.\nind. given Z.\")\ncat(paste(\"The p-value of the test is: \", test2$pvalue))\n\n\n\n"} {"package":"CondIndTests","topic":"InvariantConditionalQuantilePrediction","snippet":"### Name: InvariantConditionalQuantilePrediction\n### Title: Invariant conditional quantile prediction.\n### Aliases: InvariantConditionalQuantilePrediction\n\n### ** Examples\n\n# Example 1\nn <- 1000\nE <- rbinom(n, size = 1, prob = 0.2)\nX <- 4 + 2 * E + rnorm(n)\nY <- 3 * (X)^2 + rnorm(n)\nInvariantConditionalQuantilePrediction(Y, as.factor(E), X)\n\n# Example 2\nE <- rbinom(n, size = 1, prob = 0.2)\nX <- 4 + 2 * E + rnorm(n)\nY <- 3 * E + rnorm(n)\nInvariantConditionalQuantilePrediction(Y, as.factor(E), X)\n\n\n\n"} {"package":"CondIndTests","topic":"InvariantEnvironmentPrediction","snippet":"### Name: InvariantEnvironmentPrediction\n### Title: Invariant environment prediction.\n### Aliases: InvariantEnvironmentPrediction\n\n### ** Examples\n\n# Example 1\nn <- 1000\nE <- rbinom(n, size = 1, prob = 0.2)\nX <- 4 + 2 * E + rnorm(n)\nY <- 3 * (X)^2 + rnorm(n)\nInvariantEnvironmentPrediction(Y, as.factor(E), X)\n\n# Example 2\nE <- rbinom(n, size = 1, prob = 0.2)\nX <- 4 + 2 * E + rnorm(n)\nY <- 3 * E + rnorm(n)\nInvariantEnvironmentPrediction(Y, as.factor(E), X)\n\n# Example 3\nE <- rnorm(n)\nX <- 4 + 2 * E + rnorm(n)\nY <- 3 * (X)^2 + rnorm(n)\nInvariantEnvironmentPrediction(Y, E, X, test = wilcoxTestTargetY)\nInvariantEnvironmentPrediction(Y, X, E, test = wilcoxTestTargetY)\n\n\n"} {"package":"CondIndTests","topic":"InvariantResidualDistributionTest","snippet":"### Name: InvariantResidualDistributionTest\n### Title: Invariant residual distribution test.\n### Aliases: InvariantResidualDistributionTest\n\n### ** Examples\n\n\n# Example 1\nn <- 1000\nE <- rbinom(n, size = 1, prob = 0.2)\nX <- 4 + 2 * E + rnorm(n)\nY <- 3 * (X)^2 + rnorm(n)\nInvariantResidualDistributionTest(Y, as.factor(E), X)\nInvariantResidualDistributionTest(Y, as.factor(E), X, test = ksResidualDistributions)\n\n# Example 2\nE <- rbinom(n, size = 1, prob = 0.2)\nX <- 4 + 2 * E + rnorm(n)\nY <- 3 * E + rnorm(n)\nInvariantResidualDistributionTest(Y, as.factor(E), X)\nInvariantResidualDistributionTest(Y, as.factor(E), X, test = ksResidualDistributions)\n\n\n"} {"package":"CondIndTests","topic":"InvariantTargetPrediction","snippet":"### Name: InvariantTargetPrediction\n### Title: Invariant target prediction.\n### Aliases: InvariantTargetPrediction\n\n### ** Examples\n\n# Example 1\nn <- 1000\nE <- rbinom(n, size = 1, prob = 0.2)\nX <- 4 + 2 * E + rnorm(n)\nY <- 3 * (X)^2 + rnorm(n)\nInvariantTargetPrediction(Y, as.factor(E), X)\nInvariantTargetPrediction(Y, as.factor(E), X, test = wilcoxTestTargetY)\n\n# Example 2\nE <- rbinom(n, size = 1, prob = 0.2)\nX <- 4 + 2 * E + rnorm(n)\nY <- 3 * E + rnorm(n)\nInvariantTargetPrediction(Y, as.factor(E), X)\nInvariantTargetPrediction(Y, as.factor(E), X, test = wilcoxTestTargetY)\n\n# Example 3\nE <- rnorm(n)\nX <- 4 + 2 * E + rnorm(n)\nY <- 3 * (X)^2 + rnorm(n)\nInvariantTargetPrediction(Y, E, X)\nInvariantTargetPrediction(Y, X, E)\nInvariantTargetPrediction(Y, E, X, test = wilcoxTestTargetY)\nInvariantTargetPrediction(Y, X, E, test = wilcoxTestTargetY)\n\n\n"} {"package":"CondIndTests","topic":"KCI","snippet":"### Name: KCI\n### Title: Kernel conditional independence test.\n### Aliases: KCI\n\n### ** Examples\n\n# Example 1\nn <- 100\nE <- rnorm(n)\nX <- 4 + 2 * E + rnorm(n)\nY <- 3 * (X)^2 + rnorm(n)\nKCI(Y, E, X)\nKCI(Y, X, E)\n\n\n\n"} {"package":"CondIndTests","topic":"ResidualPredictionTest","snippet":"### Name: ResidualPredictionTest\n### Title: Residual prediction test.\n### Aliases: ResidualPredictionTest\n\n### ** Examples\n\n# Example 1\nn <- 100\nE <- rbinom(n, size = 1, prob = 0.2)\nX <- 4 + 2 * E + rnorm(n)\nY <- 3 * (X)^2 + rnorm(n)\nResidualPredictionTest(Y, as.factor(E), X)\n\n# Example 2\nE <- rbinom(n, size = 1, prob = 0.2)\nX <- 4 + 2 * E + rnorm(n)\nY <- 3 * E + rnorm(n)\nResidualPredictionTest(Y, as.factor(E), X)\n\n# not run:\n# # Example 3\n# E <- rnorm(n)\n# X <- 4 + 2 * E + rnorm(n)\n# Y <- 3 * (X)^2 + rnorm(n)\n# ResidualPredictionTest(Y, E, X)\n# ResidualPredictionTest(Y, X, E)\n\n\n"} {"package":"llbayesireg","topic":"EDI","snippet":"### Name: EDI\n### Title: Education Development Index\n### Aliases: EDI\n### Keywords: datasets\n\n### ** Examples\n\ndata(EDI)\n## maybe str(EDI) ; plot(EDI) ...\n\n\n"} {"package":"llbayesireg","topic":"MHDI","snippet":"### Name: MHDI\n### Title: Municipal Human Development Index\n### Aliases: MHDI\n### Keywords: datasets\n\n### ** Examples\n\ndata(MHDI)\n## maybe str(MHDI) ; plot(MHDI) ...\n\n\n"} {"package":"llbayesireg","topic":"Votes","snippet":"### Name: Votes\n### Title: Data of the votes in the presidential elections of the\n### municipalities of Sergipe in the years 1994, 1998, 2002 and 2006\n### Aliases: Votes\n### Keywords: datasets\n\n### ** Examples\n\ndata(Votes)\n## maybe str(Votes) ; plot(Votes) ...\n\n\n"} {"package":"llbayesireg","topic":"llHPD","snippet":"### Name: llHPD\n### Title: Highest Posterior Density for the L-Logistic Bayesian Regression\n### Aliases: llHPD\n\n### ** Examples\n\n# Modelation the coeficient with generated data\n\nlibrary(llbayesireg)\nlibrary(llogistic)\n\n# Number of elements to be generated\n\nn=50\n\n# Generated response\n\nbin=2005\nset.seed(bin)\ny=rllogistic(n,0.5, 2)\n\nfitll = llbayesireg(y, niter=100, jump=10)\n\nllHPD(fitll)\n\n ## No test: \n# Modelation the coeficient with real data\nlibrary(llbayesireg)\n\ndata(\"Votes\",\"MHDI\")\n\ny = Votes[,4]\nX = MHDI\n\nfitll = llbayesireg(y,X)\n\nllHPD(fitll)\n \n## End(No test)\n\n\n\n"} {"package":"llbayesireg","topic":"llbayesireg","snippet":"### Name: llbayesireg\n### Title: The L-Logistic Bayesian Regression\n### Aliases: llbayesireg\n\n### ** Examples\n\n# Modelation the coeficient with generated data\n\nlibrary(llbayesireg)\nlibrary(llogistic)\n\n# Number of elements to be generated\n\nn=50\n\n# Generated response\n\nbin=2005\nset.seed(bin)\ny=rllogistic(n,0.5, 2)\n\nfitll = llbayesireg(y, niter=100, jump=10)\n\nm.hat=mean(fitll$sample.m); m.hat\nphi.hat=mean(fitll$sample.phi); phi.hat\n\n ## No test: \n# Modelation the coeficient with real data\nlibrary(llbayesireg)\n\ndata(\"Votes\",\"MHDI\")\n\ny = Votes[,4]\nX = MHDI\n\nfitll = llbayesireg(y,X)\n\nsummary(fitll$object, pars = c(\"beta\",\"delta\"), probs = c(0.025,0.975))\n\nplot(fitll$betas[,1,1], type = \"l\")\n \n## End(No test)\n\n\n\n"} {"package":"llbayesireg","topic":"lldiagnostics","snippet":"### Name: lldiagnostics\n### Title: Diagnostics from a fitll object\n### Aliases: lldiagnostics\n\n### ** Examples\n\n# Modelation the coeficient with generated data\n\nlibrary(llbayesireg)\nlibrary(llogistic)\n\n# Number of elements to be generated\n\nn=50\n\n# Generated response\n\nbin=2005\nset.seed(bin)\ny=rllogistic(n,0.5, 2)\n\nfitll = llbayesireg(y, niter=100, jump=10)\n\nlldiagnostics(fitll$object)\n ## No test: \n# Modelation the coeficient with real data\nlibrary(llbayesireg)\n\ndata(\"Votes\",\"MHDI\")\n\ny = Votes[,4]\nX = MHDI\n\nfitll = llbayesireg(y,X)\n\nlldiagnostics(fitll$object)\n \n## End(No test)\n\n\n\n"} {"package":"gatepoints","topic":"fhs","snippet":"### Name: fhs\n### Title: Freehand select\n### Aliases: fhs\n\n### ** Examples\n\n## Not run: \n##D x <- cbind(1:10, 1:10)\n##D rownames(x) <- 1:10\n##D plot(x, pch = 16, col = \"red\")\n##D fhs(x)\n## End(Not run)\n\n\n"} {"package":"sugarbag","topic":"allocate","snippet":"### Name: allocate\n### Title: Allocate polygon centroids to hexagons in a grid\n### Aliases: allocate\n\n### ** Examples\n\n# Create centroids set\ncentroids <- create_centroids(tas_lga, sf_id = \"lga_code_2016\")\n# Smaller set for faster example\ncentroids <- centroids[1:10,] \n# Create hexagon location grid\ndata(capital_cities)\ngrid <- create_grid(centroids = centroids, hex_size = 0.2, buffer_dist = 1.2)\n# Allocate polygon centroids to hexagon grid points\nhex_allocated <- allocate(\n centroids = centroids,\n hex_grid = grid,\n hex_size = 0.2, # same size used in create_grid\n hex_filter = 3,\n focal_points = capital_cities,\n width = 30, \n verbose = TRUE\n)\n# NEXT: \n# create a set of hexagon points for plotting\n# using fortify_hexagon, and\n# plot the hexagons with geom_polygon, see vignette\n\n\n"} {"package":"sugarbag","topic":"closest_focal_point","snippet":"### Name: closest_focal_point\n### Title: For the polygon provided, find the closest focal point in the\n### set provided\n### Aliases: closest_focal_point\n\n### ** Examples\n\n# Create a set of polygon centroids\ncentroids <- create_centroids(tas_sa2, \"sa2_5dig_2016\")\n\n# Find the closest capital city for the first centroid\nclosest_focal_point(centroids[1, ], capital_cities)\n\n\n"} {"package":"sugarbag","topic":"create_buffer","snippet":"### Name: create_buffer\n### Title: Expand points to extend beyond the outermost centroids\n### Aliases: create_buffer\n\n### ** Examples\n\nlga_centroids <- create_centroids(sugarbag::tas_lga, \"lga_code_2016\")\nlga_grid <- create_grid(lga_centroids, hex_size = 0.2, buffer_dist = 1.2)\n\n\n\n"} {"package":"sugarbag","topic":"create_centroids","snippet":"### Name: create_centroids\n### Title: Create a data frame of longitude and latitude centroids of each\n### polygon.\n### Aliases: create_centroids\n\n### ** Examples\n\ncentroids <- create_centroids(tas_lga, \"lga_code_2016\")\n\n\n"} {"package":"sugarbag","topic":"create_grid","snippet":"### Name: create_grid\n### Title: Create a grid of evenly spaced points to allow hexagons to\n### tessellate\n### Aliases: create_grid\n\n### ** Examples\n\n# Create a set of centroids for grid to overlay\ncentroids <- create_centroids(tas_lga, \"lga_code_2016\")\n# Create the grid\ngrid <- create_grid(centroids = centroids, hex_size = 0.2, buffer_dist = 1.2, verbose = FALSE)\n\n\n\n"} {"package":"sugarbag","topic":"create_hexmap","snippet":"### Name: create_hexmap\n### Title: Create a tessellated hexagon map from a set of polygons\n### Aliases: create_hexmap\n\n### ** Examples\n\n\ndata(tas_lga)\n# Smaller set for faster example\ntas_lga_sub <- tas_lga[1:10,] \ndata(capital_cities)\nhexmap <- create_hexmap(\n shp = tas_lga_sub,\n sf_id = \"lga_code_2016\",\n hex_filter = 3,\n focal_points = capital_cities, \n verbose = TRUE)\n\n\n\n"} {"package":"sugarbag","topic":"fortify_hexagon","snippet":"### Name: fortify_hexagon\n### Title: Creates the points that define a hexagon polygon for plotting\n### Aliases: fortify_hexagon\n\n### ** Examples\n\n# same column is used in create_centroids\nfortify_hexagon(data = tas_lga_hexctr, sf_id = \"lga_code_2016\", hex_size = 0.2)\n\n\n"} {"package":"sugarbag","topic":"read_shape","snippet":"### Name: read_shape\n### Title: Read in the shape file as sf object\n### Aliases: read_shape\n\n### ** Examples\n\n## No test: \n# Example of how a shape file is read\nshape <- read_shape(shp_path = file.choose())\n## End(No test)\n\n\n\n"} {"package":"BTdecayLasso","topic":"BTdecay","snippet":"### Name: BTdecay\n### Title: Bradley-Terry Model with Exponential Decayed weighted likelihood\n### Aliases: BTdecay\n\n### ** Examples\n\n##Initializing Dataframe\nx <- BTdataframe(NFL2010)\n\n##Standard Bradley-Terry Model optimization\ny <- BTdecay(x$dataframe, x$ability, decay.rate = 0, fixed = x$worstTeam)\nsummary(y)\n\n##Dynamic approximation of current ability scores using exponential decayed likelihood.\n##If we take decay.rate = 0.005\n##Match happens one month before will weight exp(-0.15)=0.86 on log-likelihood function\nz <- BTdecay(x$dataframe, x$ability, decay.rate = 0.005, fixed = x$worstTeam)\nsummary(z)\n\n\n"} {"package":"BTdecayLasso","topic":"BTdecayLasso","snippet":"### Name: BTdecayLasso\n### Title: Bradley-Terry Model with Exponential Decayed weighted likelihood\n### and Adaptive Lasso\n### Aliases: BTdecayLasso\n\n### ** Examples\n\n##Initializing Dataframe\nx <- BTdataframe(NFL2010)\n\n##The following code runs the main results\n##Usually a single lambda's run will take 1-20 s\n##The whole Adaptive Lasso run will take 5-20 min\n## No test: \n##BTdecayLasso run with exponential decay rate 0.005 and \n##lambda 0.1, use path = TRUE if you want to run whole LASSO path\ny1 <- BTdecayLasso(x$dataframe, x$ability, lambda = 0.1, path = FALSE,\n decay.rate = 0.005, fixed = x$worstTeam)\nsummary(y1)\n\n##Defining equal weight\n##Note that comparing to Adaptive weight, the user defined weight may not be \n##efficient in groupiing. Therefore, to run the whole Lasso path \n##(evolving of distinct ability scores), it may take a much longer time. \n##We recommend the user to apply the default setting,\n##where Adaptive Lasso will be run.\n\nn <- nrow(x$ability) - 1\nw2 <- matrix(1, nrow = n, ncol = n)\nw2[lower.tri(w2, diag = TRUE)] <- 0\n\n##BTdecayLasso run with exponential decay rate 0.005 and with a specific lambda 0.1\ny2 <- BTdecayLasso(x$dataframe, x$ability, lambda = 0.1, weight = w2, \n path = FALSE, decay.rate = 0.005, fixed = x$worstTeam)\n\nsummary(y2)\n## End(No test)\n\n\n\n"} {"package":"NPCD","topic":"AlphaMLE","snippet":"### Name: AlphaMLE\n### Title: Maximum likelihood estimation of attribute profile\n### Aliases: AlphaMLE\n\n### ** Examples\n\n# Generate item and examinee profiles\n\nnatt <- 3\nnitem <- 4\nnperson <- 5\nQ <- rbind(c(1, 0, 0), c(0, 1, 0), c(0, 0, 1), c(1, 1, 1))\nalpha <- rbind(c(0, 0, 0), c(1, 0, 0), c(0, 1, 0), c(0, 0, 1), c(1, 1, 1))\n\n# Generate DINA model-based response data\n\nslip <- c(0.1, 0.15, 0.2, 0.25)\nguess <- c(0.1, 0.15, 0.2, 0.25)\nmy.par <- list(slip=slip, guess=guess)\n\ndata <- matrix(NA, nperson, nitem)\neta <- matrix(NA, nperson, nitem)\n\nfor (i in 1:nperson) {\n for (j in 1:nitem) {\n eta[i, j] <- prod(alpha[i,] ^ Q[j, ])\n P <- (1 - slip[j]) ^ eta[i, j] * guess[j] ^ (1 - eta[i, j])\n u <- runif(1)\n data[i, j] <- as.numeric(u < P)\n }\n}\n\n# Using the function to estimate examinee attribute profile\n\nalpha.est.MLE <- AlphaMLE(data, Q, my.par, model=\"DINA\", undefined.flag=NULL)\n\nnperson <- 1 # Choose an examinee to investigate\nprint(alpha.est.MLE) # Print the estimated examinee attribute profiles\nplot(alpha.est.MLE, nperson) # Plot the sorted log-likelihood function \n#of different attribute profiles for this examinee\nItemFit(alpha.est.MLE)\n\n\n"} {"package":"NPCD","topic":"AlphaNP","snippet":"### Name: AlphaNP\n### Title: Nonparametric estimation of attribute profiles\n### Aliases: AlphaNP\n\n### ** Examples\n\n# Generate item and examinee profiles\n\nnatt <- 3\nnitem <- 4\nnperson <- 5\nQ <- rbind(c(1, 0, 0), c(0, 1, 0), c(0, 0, 1), c(1, 1, 1))\nalpha <- rbind(c(0, 0, 0), c(1, 0, 0), c(0, 1, 0), c(0, 0, 1), c(1, 1, 1))\n\n# Generate DINA model-based response data\n\nslip <- c(0.1, 0.15, 0.2, 0.25)\nguess <- c(0.1, 0.15, 0.2, 0.25)\nmy.par <- list(slip=slip, guess=guess)\n\ndata <- matrix(NA, nperson, nitem)\neta <- matrix(NA, nperson, nitem)\n\nfor (i in 1:nperson) {\n for (j in 1:nitem) {\n eta[i, j] <- prod(alpha[i,] ^ Q[j, ])\n P <- (1 - slip[j]) ^ eta[i, j] * guess[j] ^ (1 - eta[i, j])\n u <- runif(1)\n data[i, j] <- as.numeric(u < P)\n }\n}\n\n# Using the function to estimate examinee attribute profile\n\nalpha.est.NP.H <- AlphaNP(data, Q, gate=\"AND\", method=\"Hamming\")\nalpha.est.NP.W <- AlphaNP(data, Q, gate=\"AND\", method=\"Weighted\")\nalpha.est.NP.P <- AlphaNP(data, Q, gate=\"AND\", method=\"Penalized\", wg=2, ws=1)\n\nnperson <- 1 # Choose an examinee to investigate\nprint(alpha.est.NP.H) # Print the estimated examinee attribute profiles\nplot(alpha.est.NP.H, nperson) # Plot the sorted loss function of different \n#attribute profiles for this examinee\nItemFit(alpha.est.NP.H, model=\"DINA\", par=list(slip=slip, guess=guess))\nItemFit(alpha.est.NP.W, model=\"DINA\", par=list(slip=slip, guess=guess))\nItemFit(alpha.est.NP.P, model=\"DINA\", par=list(slip=slip, guess=guess))\n\n\n"} {"package":"NPCD","topic":"CDL","snippet":"### Name: CDL\n### Title: Log-likelihood for cognitive diagnostic models\n### Aliases: CDL\n\n### ** Examples\n\n# Generate item and examinee profiles\n\nnitem <- 4\nQ <- rbind(c(1, 0, 0), c(0, 1, 0), c(0, 0, 1), c(1, 1, 1))\nalpha <- c(1, 0, 0)\n\n# Generate DINA model-based response data\n\nslip <- rep(0.1, nitem)\nguess <- rep(0.1, nitem)\nmy.par <- list(slip=slip, guess=guess)\n\ndata <- NA\neta <- NA\n\nfor (i in 1:nitem) {\n eta[i] <- prod(alpha ^ Q[i, ])\n P <- (1 - slip[i]) ^ eta[i] * guess[i] ^ (1 - eta[i])\n u <- runif(1)\n data[i] <- as.numeric(u < P)\n}\n\n# Using the function to compute the log-likelihood of the given data\n\nCDL(data, Q, my.par, alpha, model=\"DINA\", undefined.flag=rep(0, nitem))\n\n\n"} {"package":"NPCD","topic":"CDP","snippet":"### Name: CDP\n### Title: Probability of correct response for cognitive diagnostic models\n### Aliases: CDP\n\n### ** Examples\n\n# Generate item and examinee profiles\n\nQ <- c(1, 0, 0)\nalpha <- c(1, 0, 0)\nslip <- 0.2\nguess <- 0.1\nmy.par <- list(slip=slip, guess=guess)\nCDP(Q, my.par, alpha, model=\"DINA\")\n\n\n"} {"package":"NPCD","topic":"ItemFit","snippet":"### Name: ItemFit\n### Title: Compute item fit statistics for outputs generated by estimation\n### functions in the package\n### Aliases: ItemFit\n\n### ** Examples\n\n# See examples in AlphaNP, AlphaMLE, ParMLE, and JMLE.\n\n\n"} {"package":"NPCD","topic":"JMLE","snippet":"### Name: JMLE\n### Title: Joint maximum likelihood estimation of item parameters and\n### examinee attribute profiles\n### Aliases: JMLE\n\n### ** Examples\n\ndata(\"Data.DINA\")\nJMLE.result <- JMLE(Data.DINA$response, Data.DINA$Q, model=\"DINA\", conv.crit.par=0.001, \nconv.crit.att=0.001, max.ite=100)\nprint(JMLE.result) # Print the estimated item parameters, standard errors, \n#and examinee attribute profiles\nplot(JMLE.result, nperson=1) # Plot the sorted loss function of different \n#attribute profiles for this examinee\nItemFit(JMLE.result)\nModelFit(JMLE.result)\n\n\n"} {"package":"NPCD","topic":"ModelFit","snippet":"### Name: ModelFit\n### Title: Compute overall model fit statistics for outputs generated by\n### estimation functions in the package\n### Aliases: ModelFit\n\n### ** Examples\n\n# See examples in ParMLE and JMLE.\n\n\n"} {"package":"NPCD","topic":"ParMLE","snippet":"### Name: ParMLE\n### Title: Maximum likelihood estimation of item parameters for cognitive\n### diagnostic models.\n### Aliases: ParMLE\n\n### ** Examples\n\n# Generate item and examinee profiles\n\nnatt <- 3\nnitem <- 4\nnperson <- 5\nQ <- rbind(c(1, 0, 0), c(0, 1, 0), c(0, 0, 1), c(1, 1, 1))\nalpha <- rbind(c(0, 0, 0), c(1, 0, 0), c(0, 1, 0), c(0, 0, 1), c(1, 1, 1))\n\n# Generate DINA model-based response data\n\nslip <- c(0.1, 0.15, 0.2, 0.25)\nguess <- c(0.1, 0.15, 0.2, 0.25)\nmy.par <- list(slip=slip, guess=guess)\n\ndata <- matrix(NA, nperson, nitem)\neta <- matrix(NA, nperson, nitem)\n\nfor (i in 1:nperson) {\n for (j in 1:nitem) {\n eta[i, j] <- prod(alpha[i,] ^ Q[j, ])\n P <- (1 - slip[j]) ^ eta[i, j] * guess[j] ^ (1 - eta[i, j])\n u <- runif(1)\n data[i, j] <- as.numeric(u < P)\n }\n}\n\n# Using the function to estimate item parameters\n\nparMLE.result <- ParMLE(data, Q, alpha, model=\"DINA\")\nprint(parMLE.result) # Print the estimated item parameters and standard errors\nItemFit(parMLE.result)\nModelFit(parMLE.result)\n\n\n"} {"package":"NPCD","topic":"Qrefine","snippet":"### Name: Qrefine\n### Title: Refine the Q-matrix by minimizing the residual sum of square\n### (RSS)\n### Aliases: Qrefine\n\n### ** Examples\n\n# Generate item and examinee profiles\n\nnatt <- 3\nnitem <- 4\nnperson <- 16\nQ <- rbind(c(1, 0, 0), c(0, 1, 0), c(0, 0, 1), c(1, 1, 1))\nalpha <- rbind(c(0, 0, 0), c(1, 0, 0), c(0, 1, 0), c(0, 0, 1), \n c(1, 1, 0), c(1, 0, 1), c(0, 1, 1), c(1, 1, 1))\nalpha <- rbind(alpha, alpha)\n\n# Generate DINA model-based response data\n\nslip <- c(0.1, 0.15, 0.2, 0.25)\nguess <- c(0.1, 0.15, 0.2, 0.25)\nmy.par <- list(slip=slip, guess=guess)\n\ndata <- matrix(NA, nperson, nitem)\neta <- matrix(NA, nperson, nitem)\n\nfor (i in 1:nperson) {\n for (j in 1:nitem) {\n eta[i, j] <- prod(alpha[i,] ^ Q[j, ])\n P <- (1 - slip[j]) ^ eta[i, j] * guess[j] ^ (1 - eta[i, j])\n u <- runif(1)\n data[i, j] <- as.numeric(u < P)\n }\n}\n\n# Generate misspecified Q-matrix\n\nQ_mis <- Q\nQ_mis[c(1,2), 1] <- 1 - Q_mis[c(1,2), 1]\n\n# Run Qrefine and create diagnostic plots\n\nQrefine.out <- Qrefine(data, Q_mis, gate=\"AND\", max.ite=50)\nprint(Qrefine.out)\nplot(Qrefine.out)\n\n\n"} {"package":"NPCD","topic":"plot.AlphaNP","snippet":"### Name: plot.NPCD\n### Title: Produce diagnostic plots\n### Aliases: plot.AlphaNP plot.AlphaMLE plot.JMLE plot.Qrefine\n\n### ** Examples\n\n# See examples in AlphaNP, AlphaMLE, JMLE, and Qrefine.\n\n\n"} {"package":"NPCD","topic":"print.AlphaNP","snippet":"### Name: print.NPCD\n### Title: Print outputs generated from the functions in the package.\n### Aliases: print.AlphaNP print.AlphaMLE print.ParMLE print.JMLE\n### print.Qrefine\n\n### ** Examples\n\n# See examples in AlphaNP, AlphaMLE, ParMLE, JMLE, and Qrefine.\n\n\n"} {"package":"DMQ","topic":"EstimateDMQ","snippet":"### Name: EstimateDMQ\n### Title: Estimate the Dynamic Multiple Quantile (DMQ) model.\n### Aliases: EstimateDMQ\n\n### ** Examples\n\n# Load Microsoft Corporation logarithmic percentage returns from December 8, \n# 2010 to November 15, 2018 for a total of T = 2000 observation\ndata(\"MSFT\")\n\n##############################################################\n######################## Estimate DMQ ########################\n##############################################################\n\n# Deciles\nvTau = seq(0.1, 0.9, 0.1)\n\n# Reference quantile to the median\niTau_star = 5\n\n# Fix the reference quantile to a constant\nFixReference = TRUE\n\n# Estimate DMQ\nFit_solnp = EstimateDMQ(vY = vY,\n vTau = vTau,\n iTau_star = iTau_star,\n FixReference = FixReference,\n fn.optimizer = fn.solnp,\n cluster = cluster)\n\nFit_solnp$vPn\nFit_solnp$optimizer$value\n\n## Not run: \n##D #### Estimate DMQ using different optimizers\n##D \n##D # With the DEoptim optimizer\n##D \n##D # parallel computation\n##D iG = 7\n##D cluster = makeCluster(iG)\n##D \n##D set.seed(123)\n##D \n##D # Estimate DMQ\n##D Fit_DEoptim = EstimateDMQ(vY = vY,\n##D vTau = vTau,\n##D iTau_star = iTau_star,\n##D FixReference = FixReference,\n##D fn.optimizer = fn.DEoptim,\n##D cluster = cluster)\n##D \n##D Fit_DEoptim$vPn\n##D Fit_DEoptim$optimizer$value\n##D \n##D # Estimate the model with a user defined optimizer.\n##D # Let's use the gosolnp() optimizer from the Rsolnp package.\n##D \n##D library(\"Rsolnp\")\n##D fn.gosolnp <- function(par0, vY, FUN, LB, UB, ...) {\n##D \n##D foo = list(...)\n##D if (!is.null(foo$cluster)) {\n##D cluster = foo$cluster\n##D clusterEvalQ(cluster, library(DMQ))\n##D } \n##D \n##D optimiser = gosolnp(\n##D pars = par0,\n##D fun = FUN, vY = vY, \n##D n.sim = 1000,\n##D n.restarts = 5,\n##D LB = LB,\n##D UB = UB, control = list(trace = 1), \n##D ...)\n##D \n##D out = list(pars = optimiser$pars,\n##D value = tail(optimiser$values, 1),\n##D hessian = optimiser$hessian,\n##D convergence = optimiser$convergence)\n##D \n##D return(out)\n##D \n##D }\n##D \n##D set.seed(123)\n##D # Estimate DMQ\n##D Fit_gosolnp = EstimateDMQ(vY = vY,\n##D vTau = vTau,\n##D iTau_star = iTau_star,\n##D FixReference = FixReference,\n##D fn.optimizer = fn.gosolnp,\n##D cluster = cluster,\n##D smooth = TRUE) \n##D \n##D Fit_gosolnp$vPn\n##D Fit_gosolnp$optimizer$value\n##D \n##D stopCluster(cluster)\n##D \n## End(Not run)\n\n\n"} {"package":"DMQ","topic":"ForecastDMQ","snippet":"### Name: ForecastDMQ\n### Title: Forecast with univariate DMQ model\n### Aliases: ForecastDMQ\n\n### ** Examples\n\n# Load Microsoft Corporation logarithmic percentage returns from December 8, \n# 2010 to November 15, 2018 for a total of T = 2000 observation\ndata(\"MSFT\")\n\n##############################################################\n######################## Estimate DMQ ########################\n##############################################################\n\n# Estimate DMQ at tau_j = 0.05, 0.10, ..., 0.95\n# with fixed median as reference quantile.\nFit = EstimateDMQ(vY = vY,\n vTau = seq(0.05, 0.95, 0.05),\n iTau_star = 10,\n FixReference = TRUE,\n fn.optimizer = fn.solnp)\n\n# Compute 20-step ahead predictions\nmQ_pred = ForecastDMQ(Fit, H = 20) \n\nmQ_pred\n\n\n"} {"package":"DMQ","topic":"MomentsDMQ","snippet":"### Name: MomentsDMQ\n### Title: Estimate conditional moments using DMQ\n### Aliases: MomentsDMQ\n\n### ** Examples\n\n## No test: \n# Load Microsoft Corporation logarithmic percentage returns from December 8, \n# 2010 to November 15, 2018 for a total of T = 2000 observation\ndata(\"MSFT\")\n\n##############################################################\n######################## Estimate DMQ ########################\n##############################################################\n\n# Estimate DMQ on the in sample period\nFit = EstimateDMQ(vY = vY,\n vTau = seq(0.01, 0.99, 0.01),\n iTau_star = 50,\n FixReference = TRUE,\n fn.optimizer = fn.solnp)\n\n# Compute estimated moments\n\nMoments = MomentsDMQ(Fit)\n## End(No test)\n\n\n"} {"package":"DMQ","topic":"SimulateDMQ","snippet":"### Name: SimulateDMQ\n### Title: Simulate from the DMQ model\n### Aliases: SimulateDMQ\n\n### ** Examples\n\n\nset.seed(123)\n\n# Simulate 500 observations from the DMQ model.\n\n# Use the percentiles\nvTau = seq(0.01, 0.99, 0.01)\n\n# Median as reference quantile\niTau_star = 50\n\n# Standard Gaussian limiting distribution\nvQ_0 = qnorm(vTau)\n\n# vector of parameters\nvPn = c(\"phi\" = 0.95, \"gamma\" = 0.10, \"alpha\" = 0.01, \"beta\" = 0.7)\n\nlSim = SimulateDMQ(iT = 500, vQ_0, vTau, iTau_star, vPn)\n\nplot.ts(lSim$vY)\nplot.ts(lSim$mQ, plot.type = \"single\")\n\n\n"} {"package":"DMQ","topic":"UpdateDMQ","snippet":"### Name: UpdateDMQ\n### Title: Update filtered quantiles\n### Aliases: UpdateDMQ\n\n### ** Examples\n\n# Load Microsoft Corporation logarithmic percentage returns from December 8, \n# 2010 to November 15, 2018 for a total of T = 2000 observation\ndata(\"MSFT\")\n\n# Divide the sample in two equal parts\nvY_is = vY[1:1000]\n\n##############################################################\n######################## Estimate DMQ ########################\n##############################################################\n\n# Estimate DMQ over the deciles on the in sample period\nFit = EstimateDMQ(vY = vY_is,\n vTau = seq(0.1, 0.9, 0.1),\n iTau_star = 5,\n FixReference = TRUE,\n fn.optimizer = fn.solnp)\n\n# compute a sequence of one-step-ahead rolling predictions over the out of sample\n\nRoll = UpdateDMQ(Fit, vY) \n\n# one steap ahead predictions from time t = 1001 to 2001 are\nmForecast = t(Roll$lFilter$mQ)[1001:2001, ]\n\n\n\n"} {"package":"bmggum","topic":"bayesplot","snippet":"### Name: bayesplot\n### Title: bayesian convergence diagnosis plotting function\n### Aliases: bayesplot\n\n### ** Examples\n\nData <- c(1,4,2,3)\nData <- matrix(Data,nrow = 2)\ndeli <- c(1,-1,2,1)\ndeli <- matrix(deli,nrow = 2)\nind <- c(1,2)\nind <- t(ind)\ncova <- c(0.70, -1.25)\nmod <- bmggum(GGUM.Data=Data,delindex=deli,trait=2,ind=ind,option=4,covariate=cova,iter=5,chains=1)\nbayesplot(mod, 'alpha', 'density', inc_warmup=FALSE)\n\n\n"} {"package":"bmggum","topic":"bmggum","snippet":"### Name: bmggum\n### Title: Bayesian Multidimensional Generalized Graded Unfolding Model\n### (bmggum)\n### Aliases: bmggum\n\n### ** Examples\n\nData <- c(1,4,2,3)\nData <- matrix(Data,nrow = 2)\ndeli <- c(1,-1,2,1)\ndeli <- matrix(deli,nrow = 2)\nind <- c(1,2)\nind <- t(ind)\ncova <- c(0.70, -1.25)\nmod <- bmggum(GGUM.Data=Data,delindex=deli,trait=2,ind=ind,option=4,covariate=cova,iter=5,chains=1)\n\n\n"} {"package":"bmggum","topic":"extract","snippet":"### Name: extract\n### Title: results extraction\n### Aliases: extract\n\n### ** Examples\n\nData <- c(1,4,2,3)\nData <- matrix(Data,nrow = 2)\ndeli <- c(1,-1,2,1)\ndeli <- matrix(deli,nrow = 2)\nind <- c(1,2)\nind <- t(ind)\ncova <- c(0.70, -1.25)\nmod <- bmggum(GGUM.Data=Data,delindex=deli,trait=2,ind=ind,option=4,covariate=cova,iter=5,chains=1)\nalpha <- extract(mod, 'alpha')\n\n\n"} {"package":"bmggum","topic":"itemplot","snippet":"### Name: itemplot\n### Title: item plotting function including observable response categories\n### (ORCs)\n### Aliases: itemplot\n\n### ** Examples\n\nData <- c(1,4,2,3)\nData <- matrix(Data,nrow = 2)\ndeli <- c(1,-1,2,1)\ndeli <- matrix(deli,nrow = 2)\nind <- c(1,2)\nind <- t(ind)\ncova <- c(0.70, -1.25)\nmod <- bmggum(GGUM.Data=Data,delindex=deli,trait=2,ind=ind,option=4,covariate=cova,iter=5,chains=1)\nitemplot(mod, items=1)\n\n\n"} {"package":"bmggum","topic":"modfit","snippet":"### Name: modfit\n### Title: Model fit\n### Aliases: modfit\n\n### ** Examples\n\nData <- c(1,4,2,3)\nData <- matrix(Data,nrow = 2)\ndeli <- c(1,-1,2,1)\ndeli <- matrix(deli,nrow = 2)\nind <- c(1,2)\nind <- t(ind)\ncova <- c(0.70, -1.25)\nmod <- bmggum(GGUM.Data=Data,delindex=deli,trait=2,ind=ind,option=4,covariate=cova,iter=5,chains=1)\nwaic <- modfit(mod, 'waic')\n\n\n"} {"package":"bytescircle","topic":"bytescircle","snippet":"### Name: bytescircle\n### Title: Statistics About Bytes Contained in a File as a Circle Plot\n### Aliases: bytescircle\n\n### ** Examples\n\n bytescircle( system.file(\"extdata\", \"gplv3.txt\", package=\"bytescircle\"),\n ascii=TRUE, plot=1, output=2)\n\n # which bytes in this file have a sd greater than 2*sigma?\n BYTES=bytescircle( system.file(\"extdata\", \"gplv3.txt.gz\", package=\"bytescircle\"), plot=3,\n col=c(\"gold\",\"blueviolet\"));\n which(BYTES$deviation>2.0)-1 # -1, 'cause BYTES[1] corresponds to byte 0\n\n # use a vector as input:\n BYTES=c(256:1); bytescircle(input=BYTES,output=0)\n\n\n\n"} {"package":"DMLLZU","topic":"dml_bagging","snippet":"### Name: dml_bagging\n### Title: Double Machine Learning based on bagging\n### Aliases: dml_bagging\n\n### ** Examples\n\n\nlibrary(ISLR)\nattach(Auto)\ndata<- Auto\ny <- data$mpg #Dependent variable\nd <- data$origin #Independent variable\nx=\"weight+year +horsepower\" #Control variables;\n\ndml_bagging(y,x,d,data,sed=123)\n\n\n\n"} {"package":"DMLLZU","topic":"dml_boosting","snippet":"### Name: dml_boosting\n### Title: Double Machine Learning based on boosting\n### Aliases: dml_boosting\n\n### ** Examples\n\n\nlibrary(ISLR)\nattach(Auto)\ndata<- Auto\ny <- data$mpg #Dependent variable\nd <- data$origin #Independent variable\nx=\"weight+year +horsepower\" #Control variables;\n\ndml_boosting(y,x,d,data,sed=123)\n\n\n"} {"package":"DMLLZU","topic":"dml_ensemble_lm","snippet":"### Name: dml_ensemble_lm\n### Title: dml_ensemble_lm\n### Aliases: dml_ensemble_lm\n\n### ** Examples\n\n\nlibrary(ISLR)\nattach(Auto)\ndata<- Auto\ny <- data$mpg #Dependent variable\nd <- data$origin #Independent variable\nx=\"weight+year +horsepower\" #Control variables;\n\ndml_ensemble_lm(y,x,d,data,sed=123)\n\n\n"} {"package":"DMLLZU","topic":"dml_ensemble_rf","snippet":"### Name: dml_ensemble_rf\n### Title: dml_ensemble_rf\n### Aliases: dml_ensemble_rf\n\n### ** Examples\n\nlibrary(ISLR)\nattach(Auto)\ndata<- Auto\ny <- data$mpg #Dependent variable\nd <- data$origin #Independent variable\nx=\"weight+year +horsepower\" #Control variables;\n\ndml_ensemble_rf(y,x,d,data,sed=123)\n\n\n"} {"package":"DMLLZU","topic":"dml_neural_network","snippet":"### Name: dml_neural_network\n### Title: Double Machine Learning based on neural network\n### Aliases: dml_neural_network\n\n### ** Examples\n\n\nlibrary(ISLR)\nattach(Auto)\ndata<- Auto\ny <- data$mpg #Dependent variable\nd <- data$origin #Independent variable\nx=\"weight+year +horsepower\" #Control variables;\n\ndml_neural_network(y,x,d,data,sed=123)\n\n\n"} {"package":"DMLLZU","topic":"dml_random_forest","snippet":"### Name: dml_random_forest\n### Title: Double Machine Learning based on random forest\n### Aliases: dml_random_forest\n\n### ** Examples\n\nlibrary(ISLR)\nattach(Auto)\ndata<- Auto\ny <- data$mpg #Dependent variable\nd <- data$origin #Independent variable\nx=\"weight+year +horsepower\" #Control variables;\n\ndml_random_forest(y,x,d,data,sed=123)\n\n\n"} {"package":"DistPlotter","topic":"runDistPlotterApp","snippet":"### Name: runDistPlotterApp\n### Title: Run the DistPlotter Shiny application\n### Aliases: runDistPlotterApp\n\n### ** Examples\n\n## only run the app in an interactive R session\nif (interactive()) {runDistPlotterApp()}\n\n\n\n"} {"package":"ringostat","topic":"rs_download_record","snippet":"### Name: rs_download_record\n### Title: Download call recording\n### Aliases: rs_download_record\n\n### ** Examples\n\n## Not run: \n##D rs_download_record(\"ua-987h79879\")\n## End(Not run)\n\n\n"} {"package":"ringostat","topic":"rs_get_call_data","snippet":"### Name: rs_get_call_data\n### Title: Get calls data\n### Aliases: rs_get_call_data\n\n### ** Examples\n\n## Not run: \n##D calls <- rs_get_call_data(\n##D date_from = \"2021-09-01\",\n##D date_to = \"2021-09-30\",\n##D fields = c('caller', 'utm_campaign'),\n##D filters = \"utm_campaign=brand\"\n##D )\n## End(Not run)\n\n\n\n"} {"package":"leaps","topic":"leaps","snippet":"### Name: leaps\n### Title: all-subsets regressiom\n### Aliases: leaps\n### Keywords: regression\n\n### ** Examples\n\nx<-matrix(rnorm(100),ncol=4)\ny<-rnorm(25)\nleaps(x,y)\n\n\n"} {"package":"leaps","topic":"plot.regsubsets","snippet":"### Name: plot.regsubsets\n### Title: Graphical table of best subsets\n### Aliases: plot.regsubsets\n### Keywords: hplot regression\n\n### ** Examples\n\ndata(swiss)\na<-regsubsets(Fertility~.,nbest=3,data=swiss)\npar(mfrow=c(1,2))\nplot(a)\nplot(a,scale=\"r2\")\n\n\n"} {"package":"leaps","topic":"regsubsets","snippet":"### Name: regsubsets\n### Title: functions for model selection\n### Aliases: regsubsets regsubsets.default print.regsubsets\n### print.summary.regsubsets regsubsets.formula summary.regsubsets\n### coef.regsubsets vcov.regsubsets regsubsets.biglm\n### Keywords: regression\n\n### ** Examples\n\ndata(swiss)\na<-regsubsets(as.matrix(swiss[,-1]),swiss[,1])\nsummary(a)\nb<-regsubsets(Fertility~.,data=swiss,nbest=2)\nsummary(b)\n\ncoef(a, 1:3)\nvcov(a, 3)\n\n\n"} {"package":"DDRTree","topic":"DDRTree","snippet":"### Name: DDRTree\n### Title: Perform DDRTree construction\n### Aliases: DDRTree DDRTree DDRTree-package DDRTree-package\n\n### ** Examples\n\ndata('iris')\nsubset_iris_mat <- as.matrix(t(iris[c(1, 2, 52, 103), 1:4])) #subset the data\n#run DDRTree with ncenters equal to species number\nDDRTree_res <- DDRTree(subset_iris_mat, dimensions = 2, maxIter = 5, sigma = 1e-2,\nlambda = 1, ncenter = 3, param.gamma = 10, tol = 1e-2, verbose = FALSE)\nZ <- DDRTree_res$Z #obatain matrix\nY <- DDRTree_res$Y\nstree <- DDRTree_res$stree\nplot(Z[1, ], Z[2, ], col = iris[c(1, 2, 52, 103), 'Species']) #reduced dimension\nlegend(\"center\", legend = unique(iris[c(1, 2, 52, 103), 'Species']), cex=0.8,\ncol=unique(iris[c(1, 2, 52, 103), 'Species']), pch = 1) #legend\ntitle(main=\"DDRTree reduced dimension\", col.main=\"red\", font.main=4)\ndev.off()\nplot(Y[1, ], Y[2, ], col = 'blue', pch = 17) #center of the Z\ntitle(main=\"DDRTree smooth principal curves\", col.main=\"red\", font.main=4)\n\n#run DDRTree with ncenters equal to species number\nDDRTree_res <- DDRTree(subset_iris_mat, dimensions = 2, maxIter = 5, sigma = 1e-3,\nlambda = 1, ncenter = NULL,param.gamma = 10, tol = 1e-2, verbose = FALSE)\nZ <- DDRTree_res$Z #obatain matrix\nY <- DDRTree_res$Y\nstree <- DDRTree_res$stree\nplot(Z[1, ], Z[2, ], col = iris[c(1, 2, 52, 103), 'Species']) #reduced dimension\nlegend(\"center\", legend = unique(iris[c(1, 2, 52, 103), 'Species']), cex=0.8,\ncol=unique(iris[c(1, 2, 52, 103), 'Species']), pch = 1) #legend\ntitle(main=\"DDRTree reduced dimension\", col.main=\"red\", font.main=4)\ndev.off()\nplot(Y[1, ], Y[2, ], col = 'blue', pch = 2) #center of the Z\ntitle(main=\"DDRTree smooth principal graphs\", col.main=\"red\", font.main=4)\n\n\n"} {"package":"pmd","topic":"getcda","snippet":"### Name: getcda\n### Title: Perform correlation directed analysis for peaks list.\n### Aliases: getcda\n\n### ** Examples\n\ndata(spmeinvivo)\ncluster <- getcorcluster(spmeinvivo)\ncbp <- enviGCMS::getfilter(cluster,rowindex = cluster$stdmassindex2)\ncda <- getcda(cbp)\n\n\n"} {"package":"pmd","topic":"getchain","snippet":"### Name: getchain\n### Title: Get reaction chain for specific mass to charge ratio\n### Aliases: getchain\n\n### ** Examples\n\ndata(spmeinvivo)\n# check metabolites of C18H39NO\npmd <- getchain(spmeinvivo,diff = c(2.02,14.02,15.99),mass = 286.3101)\n\n\n"} {"package":"pmd","topic":"getcluster","snippet":"### Name: getcluster\n### Title: Get Pseudo-Spectrum as peaks cluster based on pmd analysis.\n### Aliases: getcluster\n\n### ** Examples\n\ndata(spmeinvivo)\nre <- getpaired(spmeinvivo)\nre <- getstd(re)\ncluster <- getcluster(re)\n\n\n"} {"package":"pmd","topic":"getcorcluster","snippet":"### Name: getcorcluster\n### Title: Get Pseudo-Spectrum as peaks cluster based on correlation\n### analysis.\n### Aliases: getcorcluster\n\n### ** Examples\n\ndata(spmeinvivo)\ncluster <- getcorcluster(spmeinvivo)\n\n\n"} {"package":"pmd","topic":"getpaired","snippet":"### Name: getpaired\n### Title: Filter ions/peaks based on retention time hierarchical\n### clustering, paired mass distances(PMD) and PMD frequency analysis.\n### Aliases: getpaired\n\n### ** Examples\n\ndata(spmeinvivo)\npmd <- getpaired(spmeinvivo)\n\n\n"} {"package":"pmd","topic":"getpmd","snippet":"### Name: getpmd\n### Title: Get pmd for specific reaction\n### Aliases: getpmd\n\n### ** Examples\n\ndata(spmeinvivo)\npmd <- getpmd(spmeinvivo,pmd=15.99)\n\n\n"} {"package":"pmd","topic":"getrda","snippet":"### Name: getrda\n### Title: Perform structure/reaction directed analysis for mass only.\n### Aliases: getrda\n\n### ** Examples\n\ndata(spmeinvivo)\npmd <- getpaired(spmeinvivo)\nstd <- getstd(pmd)\nsda <- getrda(spmeinvivo$mz[std$stdmassindex])\n\n\n"} {"package":"pmd","topic":"getreact","snippet":"### Name: getreact\n### Title: Get quantitative paired peaks list for specific reaction/pmd\n### Aliases: getreact\n\n### ** Examples\n\ndata(spmeinvivo)\npmd <- getreact(spmeinvivo,pmd=15.99)\n\n\n"} {"package":"pmd","topic":"getsda","snippet":"### Name: getsda\n### Title: Perform structure/reaction directed analysis for peaks list.\n### Aliases: getsda\n\n### ** Examples\n\ndata(spmeinvivo)\npmd <- getpaired(spmeinvivo)\nstd <- getstd(pmd)\nsda <- getsda(std)\n\n\n"} {"package":"pmd","topic":"getstd","snippet":"### Name: getstd\n### Title: Find the independent ions for each retention time hierarchical\n### clustering based on PMD relationship within each retention time\n### cluster and isotope and return the index of the std data for each\n### retention time cluster.\n### Aliases: getstd\n\n### ** Examples\n\ndata(spmeinvivo)\npmd <- getpaired(spmeinvivo)\nstd <- getstd(pmd)\n\n\n"} {"package":"pmd","topic":"gettarget","snippet":"### Name: gettarget\n### Title: Get multiple injections index for selected retention time\n### Aliases: gettarget\n\n### ** Examples\n\ndata(spmeinvivo)\npmd <- getpaired(spmeinvivo)\nstd <- getstd(pmd)\nindex <- gettarget(std$rt[std$stdmassindex])\ntable(index)\n\n\n"} {"package":"pmd","topic":"globalstd","snippet":"### Name: globalstd\n### Title: GlobalStd algorithm with structure/reaction directed analysis\n### Aliases: globalstd\n\n### ** Examples\n\ndata(spmeinvivo)\nre <- globalstd(spmeinvivo)\n\n\n"} {"package":"pmd","topic":"pcasf","snippet":"### Name: pcasf\n### Title: Compare matrices using PCA similarity factor\n### Aliases: pcasf\n\n### ** Examples\n\nc1 <- matrix(rnorm(16),nrow=4)\nc2 <- matrix(rnorm(16),nrow=4)\npcasf(c1, c2)\n\n\n\n"} {"package":"pmd","topic":"plotcn","snippet":"### Name: plotcn\n### Title: plot PMD KEGG network for certain compounds and output network\n### average distance and degree\n### Aliases: plotcn\n\n### ** Examples\n\nplotcn('C6H12O6','Glucose',c(2.016,14.016,15.995))\n\n\n"} {"package":"pmd","topic":"plotpaired","snippet":"### Name: plotpaired\n### Title: Plot the mass pairs and high frequency mass distances\n### Aliases: plotpaired\n\n### ** Examples\n\ndata(spmeinvivo)\npmd <- getpaired(spmeinvivo)\nplotpaired(pmd)\n\n\n"} {"package":"pmd","topic":"plotrtg","snippet":"### Name: plotrtg\n### Title: Plot the retention time group\n### Aliases: plotrtg\n\n### ** Examples\n\ndata(spmeinvivo)\npmd <- getpaired(spmeinvivo)\nplotrtg(pmd)\n\n\n"} {"package":"pmd","topic":"plotsda","snippet":"### Name: plotsda\n### Title: Plot the specific structure directed analysis(SDA) groups\n### Aliases: plotsda\n\n### ** Examples\n\ndata(spmeinvivo)\nre <- getpmd(spmeinvivo,pmd=78.9)\nplotsda(re)\n\n\n"} {"package":"pmd","topic":"plotstd","snippet":"### Name: plotstd\n### Title: Plot the std mass from GlobalStd algorithm\n### Aliases: plotstd\n\n### ** Examples\n\ndata(spmeinvivo)\npmd <- getpaired(spmeinvivo)\nstd <- getstd(pmd)\nplotstd(std)\n\n\n"} {"package":"pmd","topic":"plotstdrt","snippet":"### Name: plotstdrt\n### Title: Plot the std mass from GlobalStd algorithm in certain retention\n### time groups\n### Aliases: plotstdrt\n\n### ** Examples\n\ndata(spmeinvivo)\npmd <- getpaired(spmeinvivo)\nstd <- getstd(pmd)\nplotstdrt(std,rtcluster = 6)\n\n\n"} {"package":"pmd","topic":"plotstdsda","snippet":"### Name: plotstdsda\n### Title: Plot the std mass from GlobalStd algorithm in structure directed\n### analysis(SDA) groups\n### Aliases: plotstdsda\n\n### ** Examples\n\ndata(spmeinvivo)\nre <- globalstd(spmeinvivo, sda=TRUE)\nplotstdsda(re)\n\n\n"} {"package":"somspace","topic":"cnet","snippet":"### Name: cnet\n### Title: Complex network analysis\n### Aliases: cnet\n\n### ** Examples\n\n## No test: \ndummy <- owda[Time <= 1600]\ninp_som <- sominp(dummy)\nmy_som <- somspa(inp_som, rlen = 100, grid = somgrid(3, 3, \"hexagonal\"))\nmy_regions <- somregs(my_som, nregions = 6) \ncnet(my_regions, n = 5, thres = 0.2)\n## End(No test)\n\n\n"} {"package":"somspace","topic":"owda","snippet":"### Name: owda\n### Title: Old World Drought Atlas (1500-2012)\n### Aliases: owda\n### Keywords: datasets\n\n### ** Examples\n\nstr(owda)\n\n\n"} {"package":"somspace","topic":"sominp","snippet":"### Name: sominp\n### Title: Create sominp object\n### Aliases: sominp\n\n### ** Examples\n\n\n\n## No test: \ndummy <- owda[Time <= 1510]\ninp_som <- sominp(dummy)\n## End(No test)\n\n\n\n"} {"package":"somspace","topic":"somregs","snippet":"### Name: somregs\n### Title: Classify SOM into regions\n### Aliases: somregs\n\n### ** Examples\n\n## No test: \ndummy <- owda[Time <= 1600]\ninp_som <- sominp(dummy)\nmy_som <- somspa(inp_som, rlen = 100, grid = somgrid(4, 4, \"hexagonal\"))\nmy_regions <- somregs(my_som, nregions = 9) \nplot(my_regions, regions = c(2, 4, 6, 8), nrow = 2, ncol = 2) \nplot_ts(my_regions, n = 4)\n## End(No test)\n\n\n\n"} {"package":"somspace","topic":"somspa","snippet":"### Name: somspa\n### Title: Spatial SOM\n### Aliases: somspa\n\n### ** Examples\n\n## No test: \ndummy <- owda[Time <= 1600] #toy example\ninp_som <- sominp(dummy)\n\nmy_som <- somspa(inp_som, rlen = 100, grid = somgrid(3, 3, \"hexagonal\"))\nmy_som$summary\nmy_som$som\n\nplot(my_som)\nplot_ts(my_som, n = 3)\nplot_ts(my_som, n = c(1, 2, 4, 9)) \nplot_ts(my_som, n = 1:max(my_som$summary$node)) #plots all soms\n## End(No test)\n\n\n\n"} {"package":"diversityForest","topic":"divfor","snippet":"### Name: divfor\n### Title: Construct a basic diversity forest prediction rule that uses\n### univariable, binary splitting.\n### Aliases: divfor\n\n### ** Examples\n\n## Not run: \n##D \n##D ## Load package:\n##D library(\"diversityForest\")\n##D \n##D ## Set seed to obtain reproducible results:\n##D set.seed(1234)\n##D \n##D ## Diversity forest with default settings (NOT recommended)\n##D # Classification:\n##D divfor(Species ~ ., data = iris, num.trees = 20)\n##D # Regression:\n##D iris2 <- iris; iris2$Species <- NULL; iris2$Y <- rnorm(nrow(iris2))\n##D divfor(Y ~ ., data = iris2, num.trees = 20)\n##D # Survival:\n##D library(\"survival\")\n##D divfor(Surv(time, status) ~ ., data = veteran, num.trees = 20, respect.unordered.factors = \"order\")\n##D # NOTE: num.trees = 20 is specified too small for practical \n##D # purposes - the prediction performance of the resulting \n##D # forest will be suboptimal!!\n##D # In practice, num.trees = 500 (default value) or a \n##D # larger number should be used.\n##D \n##D ## Diversity forest with specified values for nsplits and proptry (NOT recommended)\n##D divfor(Species ~ ., data = iris, nsplits = 10, proptry = 0.4, num.trees = 20)\n##D # NOTE again: num.trees = 20 is specified too small for practical purposes.\n##D \n##D ## Applying diversity forest after optimizing the values of nsplits and proptry (recommended)\n##D tuneres <- tunedivfor(formula = Species ~ ., data = iris, num.trees.pre = 20)\n##D # NOTE: num.trees.pre = 20 is specified too small for practical \n##D # purposes - the out-of-bag error estimates of the forests \n##D # constructed during optimization will be much too variable!!\n##D # In practice, num.trees.pre = 500 (default value) or a \n##D # larger number should be used.\n##D divfor(Species ~ ., data = iris, nsplits = tuneres$nsplitsopt, \n##D proptry = tuneres$proptryopt, num.trees = 20)\n##D # NOTE again: num.trees = 20 is specified too small for practical purposes.\n##D \n##D ## Prediction\n##D train.idx <- sample(nrow(iris), 2/3 * nrow(iris))\n##D iris.train <- iris[train.idx, ]\n##D iris.test <- iris[-train.idx, ]\n##D tuneres <- tunedivfor(formula = Species ~ ., data = iris.train, num.trees.pre = 20)\n##D # NOTE again: num.trees.pre = 20 is specified too small for practical purposes.\n##D rg.iris <- divfor(Species ~ ., data = iris.train, nsplits = tuneres$nsplitsopt, \n##D proptry = tuneres$proptryopt, num.trees = 20)\n##D # NOTE again: num.trees = 20 is specified too small for practical purposes.\n##D pred.iris <- predict(rg.iris, data = iris.test)\n##D table(iris.test$Species, pred.iris$predictions)\n##D \n##D ## Variable importance\n##D rg.iris <- divfor(Species ~ ., data = iris, importance = \"permutation\", num.trees = 20)\n##D # NOTE again: num.trees = 20 is specified too small for practical purposes.\n##D rg.iris$variable.importance\n## End(Not run)\n\n\n\n"} {"package":"diversityForest","topic":"interactionfor","snippet":"### Name: interactionfor\n### Title: Construct an interaction forest prediction rule and calculate\n### EIM values as described in Hornung & Boulesteix (2022).\n### Aliases: interactionfor\n\n### ** Examples\n\n## Not run: \n##D \n##D ## Load package:\n##D \n##D library(\"diversityForest\")\n##D \n##D \n##D \n##D ## Set seed to make results reproducible:\n##D \n##D set.seed(1234)\n##D \n##D \n##D \n##D ## Construct interaction forests and calculate EIM values:\n##D \n##D \n##D # Binary outcome:\n##D data(zoo)\n##D modelcat <- interactionfor(dependent.variable.name = \"type\", data = zoo, \n##D num.trees = 20)\n##D \n##D \n##D # Metric outcome:\n##D data(stock)\n##D modelcont <- interactionfor(dependent.variable.name = \"company10\", data = stock, \n##D num.trees = 20) \n##D \n##D \n##D # Survival outcome:\n##D library(\"survival\")\n##D mgus2$id <- NULL # 'mgus2' data set is contained in the 'survival' package\n##D \n##D # categorical variables need to be of factor format - important!!\n##D mgus2$sex <- factor(mgus2$sex)\n##D mgus2$pstat <- factor(mgus2$pstat)\n##D \n##D # Remove the second time variable 'ptime':\n##D mgus2$ptime <- NULL\n##D \n##D # Remove missing values:\n##D mgus2 <- mgus2[complete.cases(mgus2),]\n##D \n##D # Take subset to make the calculations less computationally\n##D # expensive for the example (in actual applications, we would of course\n##D # use the whole data set):\n##D mgus2sub <- mgus2[sample(1:nrow(mgus2), size=500),]\n##D \n##D # Apply 'interactionfor':\n##D modelsurv <- interactionfor(formula = Surv(futime, death) ~ ., data=mgus2sub, num.trees=20)\n##D \n##D # NOTE: num.trees = 20 (in the above) would be much too small for practical \n##D # purposes. This small number of trees was simply used to keep the\n##D # runtime of the example short.\n##D # The default number of trees is num.trees = 20000 if EIM values are calculated\n##D # and num.trees = 2000 otherwise.\n##D \n##D \n##D \n##D ## Inspect the rankings of the variables and variable pairs with respect to \n##D ## the univariable, quantitative, and qualitative EIM values:\n##D \n##D # Univariable EIM values: \n##D modelcat$eim.univ.sorted\n##D \n##D # Pairs with top quantitative EIM values:\n##D modelcat$eim.quant.sorted[1:5]\n##D \n##D # Pairs with top qualitative EIM values:\n##D modelcat$eim.qual.sorted[1:5]\n##D \n##D \n##D \n##D ## Investigate visually the forms of the interaction effects of the variable pairs with\n##D ## largest quantitative and qualitative EIM values:\n##D \n##D plot(modelcat)\n##D plotEffects(modelcat, type=\"quant\") # type=\"quant\" is default.\n##D plotEffects(modelcat, type=\"qual\")\n##D \n##D \n##D \n##D ## Prediction:\n##D \n##D # Separate 'zoo' data set randomly in training\n##D # and test data:\n##D \n##D data(zoo)\n##D train.idx <- sample(nrow(zoo), 2/3 * nrow(zoo))\n##D zoo.train <- zoo[train.idx, ]\n##D zoo.test <- zoo[-train.idx, ]\n##D \n##D # Construct interaction forest on training data:\n##D # NOTE again: num.trees = 20 is specified too small for practical purposes.\n##D modelcattrain <- interactionfor(dependent.variable.name = \"type\", data = zoo, \n##D importance = \"none\", num.trees = 20)\n##D # NOTE: Because we are only interested in prediction here, we do not\n##D # calculate EIM values (by setting importance = \"none\"), because this\n##D # speeds up calculations.\n##D \n##D # Predict class values of the test data:\n##D pred.zoo <- predict(modelcattrain, data = zoo.test)\n##D \n##D # Compare predicted and true class values of the test data:\n##D table(zoo.test$type, pred.zoo$predictions)\n## End(Not run)\n\n\n\n"} {"package":"diversityForest","topic":"plot.interactionfor","snippet":"### Name: plot.interactionfor\n### Title: Plot method for 'interactionfor' objects\n### Aliases: plot.interactionfor\n\n### ** Examples\n\n## Not run: \n##D \n##D ## Load package:\n##D \n##D library(\"diversityForest\")\n##D \n##D \n##D \n##D ## Set seed to make results reproducible:\n##D \n##D set.seed(1234)\n##D \n##D \n##D \n##D ## Construct interaction forest and calculate EIM values:\n##D \n##D data(stock)\n##D model <- interactionfor(dependent.variable.name = \"company10\", data = stock, \n##D num.trees = 20)\n##D \n##D # NOTE: num.trees = 20 (in the above) would be much too small for practical \n##D # purposes. This small number of trees was simply used to keep the\n##D # runtime of the example short.\n##D # The default number of trees is num.trees = 20000 if EIM values are calculated\n##D # and num.trees = 2000 otherwise.\n##D \n##D \n##D \n##D ## When using the plot() function without further specifications,\n##D ## by default the estimated bivariable influences of the two pairs with largest quantitative\n##D ## and qualitative EIM values are shown:\n##D \n##D plot(model)\n##D \n##D # It is, however, also possible to change the numbers of\n##D # pairs with largest quantitative and qualitative EIM values\n##D # to be shown:\n##D \n##D plot(model, numpairsquant = 4, numpairsqual = 3)\n##D \n## End(Not run)\n\n\n\n"} {"package":"diversityForest","topic":"plotEffects","snippet":"### Name: plotEffects\n### Title: Interaction forest plots: exploring interaction forest results\n### through visualisation\n### Aliases: plotEffects\n\n### ** Examples\n\n## Not run: \n##D \n##D ## Load package:\n##D \n##D library(\"diversityForest\")\n##D \n##D \n##D \n##D ## Set seed to make results reproducible:\n##D \n##D set.seed(1234)\n##D \n##D \n##D \n##D ## Construct interaction forest and calculate EIM values:\n##D \n##D data(stock)\n##D model <- interactionfor(dependent.variable.name = \"company10\", data = stock, \n##D num.trees = 20)\n##D \n##D # NOTE: num.trees = 20 (in the above) would be much too small for practical \n##D # purposes. This small number of trees was simply used to keep the\n##D # runtime of the example short.\n##D # The default number of trees is num.trees = 20000 if EIM values are calculated\n##D # and num.trees = 2000 otherwise.\n##D \n##D \n##D \n##D ## Obtain a first overview by applying the plot() function for\n##D ## interactionfor obects:\n##D \n##D plot(model)\n##D \n##D \n##D \n##D ## Several possible application cases of the plotEffects() function:\n##D \n##D # Visualise the estimated bivariable influences of the five variable pairs with the \n##D # largest quantitative EIM values:\n##D \n##D plotEffects(model) # type=\"quant\" is default.\n##D \n##D \n##D # Visualise the estimated bivariable influences of the five pairs with the \n##D # largest qualitative EIM values:\n##D \n##D plotEffects(model, type=\"qual\")\n##D \n##D \n##D # Visualise the estimated bivariable influences of all (eight) pairs that involve\n##D # the variable \"company7\" sorted in decreasing order according to the\n##D # qualitative EIM values:\n##D \n##D plotEffects(model, allwith=\"company7\", type=\"qual\", numpairs=8)\n##D \n##D \n##D # Visualise the estimated bivariable influences of the pairs with third and fifth\n##D # largest qualitative EIM values:\n##D \n##D plotEffects(model, type=\"qual\", indpairs=c(3,5))\n##D \n##D \n##D # Visualise the estimated bivariable influences of the pairs (\"company3\", \"company5\") and\n##D # (\"company1\", \"company9\"):\n##D \n##D plotEffects(model, pairs=list(c(\"company3\", \"company5\"), c(\"company1\", \"company9\")))\n##D \n##D \n##D \n##D ## Saving of plots generated with the plotEffects() function (e.g., for use in publications):\n##D \n##D # Apply plotEffects() to obtain plots for the five variable pairs\n##D # with the largest qualitative EIM values and store these plots in\n##D # an object 'ps':\n##D \n##D ps <- plotEffects(model, type=\"qual\", pvalues=FALSE, twoplots=FALSE, addtitles=FALSE, plotit=FALSE)\n##D \n##D # pvalues = FALSE states that no p-values should be shown in the plots,\n##D # because these might not be desired in plots meant for publication.\n##D # twoplots = FALSE ensures that we get one plot for each page instead of two plots per page.\n##D # addtitles = FALSE removes the automatically generated titles, because these are likely\n##D # not desired in publications.\n##D # plotit = FALSE ensures that the plots are not displayed, but only returned (invisibly) \n##D # by plotEffects().\n##D \n##D \n##D # Save the plot with second largest qualitative EIM value:\n##D \n##D p1 <- ps[[2]]\n##D \n##D # Add title:\n##D library(\"ggpubr\")\n##D p1 <- annotate_figure(p1, top = text_grob(\"My descriptive plot title 1\", face = \"bold\", size = 14))\n##D p1\n##D \n##D # Save as PDF:\n##D # library(\"ggplot2\")\n##D # ggsave(file=\"mypathtofolder/FigureXY1.pdf\", width=14, height=6)\n##D \n##D \n##D # Save the plot with fifth largest qualitative EIM value:\n##D \n##D p2 <- ps[[5]]\n##D \n##D # Add title:\n##D p2 <- annotate_figure(p2, top = text_grob(\"My descriptive plot title 2\", face = \"bold\", size = 14))\n##D p2\n##D \n##D # Save as PDF:\n##D # ggsave(file=\"mypathtofolder/FigureXY1.pdf\", width=14, height=6)\n##D \n##D \n##D # Combine both of the above plots:\n##D p <- ggarrange(p1, p2, nrow = 2)\n##D p\n##D \n##D # Save the combined plot:\n##D # ggsave(file=\"mypathtofolder/FigureXYcombined.pdf\", width=14, height=11)\n##D \n##D # NOTE: Using plotEffects() it is not possible to change the plots \n##D # themselves (by e.g., increasing the label sizes or changing the \n##D # axes ranges). However, the function plotPair() can be used to change \n##D # the plots themselves.\n##D \n## End(Not run)\n\n\n\n"} {"package":"diversityForest","topic":"plotPair","snippet":"### Name: plotPair\n### Title: Plot of the (estimated) simultaneous influence of two variables\n### Aliases: plotPair\n\n### ** Examples\n\n## Not run: \n##D \n##D ## Load package:\n##D \n##D library(\"diversityForest\")\n##D \n##D \n##D \n##D ## Visualise the estimated bivariable influence of 'toothed' and 'feathers' on\n##D ## the probability of type=\"mammal\":\n##D \n##D data(zoo)\n##D plotPair(pair = c(\"toothed\", \"feathers\"), yvarname=\"type\", data = zoo)\n##D \n##D \n##D \n##D ## Visualise the estimated bivariable influence of 'creat' and 'hgb' on\n##D ## survival (more precisely, on the log hazards ratio compared to the\n##D ## median effect):\n##D \n##D library(\"survival\")\n##D mgus2compl <- mgus2[complete.cases(mgus2),]\n##D plotPair(pair=c(\"creat\", \"hgb\"), yvarname=\"futime\", statusvarname = \"death\", data=mgus2compl)\n##D \n##D # Problem: The outliers in the left plot make it difficult to see what is going\n##D # on in the region with creat values smaller than about two even though the\n##D # majority of values lie there.\n##D \n##D # --> Solution: We re-run the above line setting returnseparate = TRUE, because\n##D # this allows to get the two ggplot plots separately, which can then be manipulated\n##D # to change the x-axis range in order to remove the outliers:\n##D \n##D ps <- plotPair(pair=c(\"creat\", \"hgb\"), yvarname=\"futime\", statusvarname = \"death\", \n##D data=mgus2compl, returnseparate = TRUE)\n##D \n##D # Change the x-axis range:\n##D library(\"ggplot2\")\n##D ps[[1]] + xlim(c(0.5,2))\n##D # Save the plot:\n##D # ggsave(file=\"mypathtofolder/FigureXY1.pdf\", width=7, height=6)\n##D \n##D # We can, for example, also change the label sizes of the second plot:\n##D # With original label sizes:\n##D ps[[2]]\n##D # With larger label sizes:\n##D ps[[2]] + theme(axis.title=element_text(size=15))\n##D # Save the plot:\n##D # library(\"ggplot2\")\n##D # ggsave(file=\"mypathtofolder/FigureXY2.pdf\", width=7, height=6)\n##D \n## End(Not run)\n\n\n\n"} {"package":"diversityForest","topic":"stock","snippet":"### Name: stock\n### Title: Data on stock prices of aerospace companies\n### Aliases: stock\n\n### ** Examples\n\n\n## Load data:\ndata(stock)\n\n## Dimension of data:\ndim(stock)\n\n## First rows of data:\nhead(stock) \n\n\n\n"} {"package":"diversityForest","topic":"tunedivfor","snippet":"### Name: tunedivfor\n### Title: Optimization of the values of the tuning parameters 'nsplits'\n### and 'proptry'\n### Aliases: tunedivfor\n\n### ** Examples\n\n\n## Load package:\n\nlibrary(\"diversityForest\")\n\n\n## Set seed to obtain reproducible results:\n\nset.seed(1234)\n\n\n## Tuning parameter optimization for the iris data set:\n\ntuneres <- tunedivfor(formula = Species ~ ., data = iris, num.trees.pre = 20)\n# NOTE: num.trees.pre = 20 is specified too small for practical \n# purposes - the out-of-bag error estimates of the forests \n# constructed during optimization will be much too variable!!\n# In practice, num.trees.pre = 500 (default value) or a \n# larger number should be used.\n\ntuneres\n\ntuneres$nsplitsopt\ntuneres$proptryopt\ntuneres$tunegrid\ntuneres$ooberrs\n\n\n\n"} {"package":"diversityForest","topic":"zoo","snippet":"### Name: zoo\n### Title: Data on biological species\n### Aliases: zoo\n\n### ** Examples\n\n\n##' Load data:\ndata(zoo)\n\n##' Numbers of observations in the two classes:\ntable(zoo$type)\n\n##' Dimension of data:\ndim(zoo)\n\n##' First rows of data:\nhead(zoo) \n\n\n\n"} {"package":"autoFRK","topic":"autoFRK","snippet":"### Name: autoFRK\n### Title: Automatic Fixed Rank Kriging\n### Aliases: autoFRK\n\n### ** Examples\n\n#### generating data from two eigenfunctions\noriginalPar <- par(no.readonly = TRUE)\nset.seed(0)\nn <- 150\ns <- 5\ngrid1 <- grid2 <- seq(0, 1, l = 30)\ngrids <- expand.grid(grid1, grid2)\nfn <- matrix(0, 900, 2)\nfn[, 1] <- cos(sqrt((grids[, 1] - 0)^2 + (grids[, 2] - 1)^2) * pi)\nfn[, 2] <- cos(sqrt((grids[, 1] - 0.75)^2 + (grids[, 2] - 0.25)^2) * 2 * pi)\n\n#### single realization simulation example\nw <- c(rnorm(1, sd = 5), rnorm(1, sd = 3))\ny <- fn %*% w\nobs <- sample(900, n)\nz <- y[obs] + rnorm(n) * sqrt(s)\nX <- grids[obs, ]\n\n#### method1: automatic selection and prediction\none.imat <- autoFRK(Data = z, loc = X, maxK = 15)\nyhat <- predict(one.imat, newloc = grids)\n\n#### method2: user-specified basis functions\nG <- mrts(X, 15)\nGpred <- predict(G, newx = grids)\none.usr <- autoFRK(Data = z, loc = X, G = G)\nyhat2 <- predict(one.usr, newloc = grids, basis = Gpred)\n\nrequire(fields)\npar(mfrow = c(2, 2))\nimage.plot(matrix(y, 30, 30), main = \"True\")\npoints(X, cex = 0.5, col = \"grey\")\nimage.plot(matrix(yhat$pred.value, 30, 30), main = \"Predicted\")\npoints(X, cex = 0.5, col = \"grey\")\nimage.plot(matrix(yhat2$pred.value, 30, 30), main = \"Predicted (method 2)\")\npoints(X, cex = 0.5, col = \"grey\")\nplot(yhat$pred.value, yhat2$pred.value, mgp = c(2, 0.5, 0))\npar(originalPar)\n#### end of single realization simulation example\n\n#### independent multi-realization simulation example\nset.seed(0)\nwt <- matrix(0, 2, 20)\nfor (tt in 1:20) wt[, tt] <- c(rnorm(1, sd = 5), rnorm(1, sd = 3))\nyt <- fn %*% wt\nobs <- sample(900, n)\nzt <- yt[obs, ] + matrix(rnorm(n * 20), n, 20) * sqrt(s)\nX <- grids[obs, ]\nmulti.imat <- autoFRK(Data = zt, loc = X, maxK = 15)\nGpred <- predict(multi.imat$G, newx = grids)\n\nG <- multi.imat$G\nMhat <- multi.imat$M\ndec <- eigen(G %*% Mhat %*% t(G))\nfhat <- Gpred %*% Mhat %*% t(G) %*% dec$vector[, 1:2]\npar(mfrow = c(2, 2))\nimage.plot(matrix(fn[, 1], 30, 30), main = \"True Eigenfn 1\")\nimage.plot(matrix(fn[, 2], 30, 30), main = \"True Eigenfn 2\")\nimage.plot(matrix(fhat[, 1], 30, 30), main = \"Estimated Eigenfn 1\")\nimage.plot(matrix(fhat[, 2], 30, 30), main = \"Estimated Eigenfn 2\")\npar(originalPar)\n#### end of independent multi-realization simulation example\n\n\n"} {"package":"autoFRK","topic":"mrts","snippet":"### Name: mrts\n### Title: Multi-Resolution Thin-plate Spline Basis Functions\n### Aliases: mrts\n\n### ** Examples\n\noriginalPar <- par(no.readonly = TRUE)\nknot <- seq(0, 1, l = 30)\nb <- mrts(knot, 30)\nx0 <- seq(0, 1, l = 200)\nbx <- predict(b, x0)\npar(mfrow = c(5, 6), mar = c(0, 0, 0, 0))\nfor (i in 1:30) {\n plot(bx[, i], type = \"l\", axes = FALSE)\n box()\n}\npar(originalPar)\n\n\n"} {"package":"MVQuickGraphs","topic":"bvNormalContour","snippet":"### Name: bvNormalContour\n### Title: Bivariate Normal Contour Ellipse\n### Aliases: bvNormalContour\n\n### ** Examples\n\nmu <- c(-1,8)\nSigma <- matrix(c(3,2,2,4), ncol = 2)\n# Draw a 90% contour\nbvNormalContour(mu = mu, Sigma = Sigma, alpha = 0.10)\n\n\n"} {"package":"MVQuickGraphs","topic":"confidenceEllipse","snippet":"### Name: confidenceEllipse\n### Title: Bivariate Normal Confidence Ellipse\n### Aliases: confidenceEllipse\n\n### ** Examples\n\n# 90% Confidence Ellipse for Reading and Vocab from ability.cov\nx.bar <- ability.cov$center[5:6]\nSigma <- ability.cov$cov[5:6,5:6]\nn <- ability.cov$n.obs\np <- length(ability.cov$center)\n\nconfidenceEllipse(X.mean = x.bar,\n eig = eigen(Sigma),\n n = n, p = p,\n alpha = 0.10)\n\n\n"} {"package":"MVQuickGraphs","topic":"plot4in1","snippet":"### Name: plot4in1\n### Title: Plot 4-in-1\n### Aliases: plot4in1\n\n### ** Examples\n\nout <- lm(Girth ~ Volume, data = trees)\nplot4in1(out)\n\n\n"} {"package":"cytofan","topic":"do_fan","snippet":"### Name: do_fan\n### Title: Compute summary statistics for 'stat_fan'\n### Aliases: do_fan\n\n### ** Examples\n\nFanEuStockMarkets <- lapply(colnames(EuStockMarkets),function(id) {\n res <- do_fan(EuStockMarkets[,id])\n res$id <- id\n return(res)\n})\nFanEuStockMarkets <- do.call(rbind,FanEuStockMarkets)\n\n\n\n"} {"package":"cytofan","topic":"geom_fan","snippet":"### Name: geom_fan\n### Title: Fan plots for trend and population visualizations\n### Aliases: geom_fan stat_fan\n\n### ** Examples\n\n# reformat dataset from short-wide to tall-skinny\nEuStockMarkets_ts <- lapply(colnames(EuStockMarkets),function(id) {\n data.frame(id=id,value=as.numeric(EuStockMarkets[,id]))\n})\nEuStockMarkets_ts <- do.call('rbind',EuStockMarkets_ts)\n\n# plot the distribution of the different stock markets\nggplot(EuStockMarkets_ts,aes(x=id,y=value))+\n geom_fan()\n\n# Change the step\nggplot(EuStockMarkets_ts,aes(x=id,y=value))+\n geom_fan(step=0.05)\n\n# change the default color\nggplot(EuStockMarkets_ts,aes(x=id,y=value))+\n geom_fan(colorbase='Greens')\n\n# any valid RColorBrewer palette will work\nggplot(EuStockMarkets_ts,aes(x=id,y=value))+\n geom_fan(colorbase='RdYlGn')\n\n\n\n"} {"package":"PUPAIM","topic":"BET.LM","snippet":"### Name: BET.LM\n### Title: Brunauer-Emett-Teller (BET) Isotherm Linear Analysis\n### Aliases: BET.LM\n\n### ** Examples\n\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nBET.LM(Ce,Qe)\n\n\n"} {"package":"PUPAIM","topic":"BETanalysis","snippet":"### Name: BETanalysis\n### Title: Brunauer-Emett-Teller (BET) Isotherm Non-Linear Analysis\n### Aliases: BETanalysis\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nBETanalysis(Ce,Qe)\n\n\n"} {"package":"PUPAIM","topic":"FS3analysis","snippet":"### Name: FS3analysis\n### Title: Fritz-Schlunder Three Parameter Non-Linear Analysis\n### Aliases: FS3analysis\n\n### ** Examples\n\nCe <- c(0.9613, 1.0895, 1.5378, 1.9862, 3.3314, 7.8153, 11.4024, 15.8862)\nQe <- c(2.5546, 4.4150, 5.8558, 7.1387, 8.8092, 13.1921, 15.7966, 18.4483)\nFS3analysis(Ce,Qe)\n\n\n"} {"package":"PUPAIM","topic":"FS4analysis","snippet":"### Name: FS4analysis\n### Title: Fritz-Schlunder Four Parameter Isotherm Non-Linear Analysis\n### Aliases: FS4analysis\n\n### ** Examples\n\n## Not run: \n##D Ce <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600,0.63607, 0.80435, 1.10327, 1.58223)\n##D Qe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299,0.15379, 0.15735, 0.15735, 0.16607)\n##D FS4analysis(Ce,Qe)\n## End(Not run)\n\n\n"} {"package":"PUPAIM","topic":"SSLangmuir1analysis","snippet":"### Name: SSLangmuir1analysis\n### Title: Langmuir Isotherm Nonlinear Analysis via selfStart and Langmuir\n### First Linear Model\n### Aliases: SSLangmuir1analysis\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nSSLangmuir1analysis(Ce,Qe)\n\n\n"} {"package":"PUPAIM","topic":"SSLangmuir2analysis","snippet":"### Name: SSLangmuir2analysis\n### Title: Langmuir Isotherm Nonlinear Analysis via selfStart and Langmuir\n### Second Linear Model\n### Aliases: SSLangmuir2analysis\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nSSLangmuir2analysis(Ce,Qe)\n\n\n"} {"package":"PUPAIM","topic":"SSLangmuir3analysis","snippet":"### Name: SSLangmuir3analysis\n### Title: Langmuir Isotherm Nonlinear Analysis via selfStart and Langmuir\n### Third Linear Model\n### Aliases: SSLangmuir3analysis\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nSSLangmuir3analysis(Ce,Qe)\n\n\n"} {"package":"PUPAIM","topic":"SSLangmuir4analysis","snippet":"### Name: SSLangmuir4analysis\n### Title: Langmuir Isotherm Nonlinear Analysis via selfStart and Langmuir\n### Fourth Linear Model\n### Aliases: SSLangmuir4analysis\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nSSLangmuir4analysis(Ce,Qe)\n\n\n"} {"package":"PUPAIM","topic":"aranovichanalysis","snippet":"### Name: aranovichanalysis\n### Title: Aranovich Isotherm Non-Linear Analysis\n### Aliases: aranovichanalysis\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\naranovichanalysis(Ce,Qe)\n\n\n"} {"package":"PUPAIM","topic":"bauduanalysis","snippet":"### Name: bauduanalysis\n### Title: Baudu Isotherm Non-Linear Analysis\n### Aliases: bauduanalysis\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nbauduanalysis(Ce,Qe)\n\n\n"} {"package":"PUPAIM","topic":"dubininradushkevichanalysis","snippet":"### Name: dubininradushkevichanalysis\n### Title: Dubinin-Radushkevich Isotherm Non-Linear Analysis\n### Aliases: dubininradushkevichanalysis\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nTemp <- 298\ndubininradushkevichanalysis(Ce, Qe, Temp)\n\n\n"} {"package":"PUPAIM","topic":"dubininraduskevich.LM","snippet":"### Name: dubininraduskevich.LM\n### Title: Dubinin-Radushkevich Isotherm Linear Analysis\n### Aliases: dubininraduskevich.LM dubininradushkevich.LM\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nTemp <- 298\ndubininradushkevich.LM (Ce,Qe,Temp)\n\n\n"} {"package":"PUPAIM","topic":"elovich.LM","snippet":"### Name: elovich.LM\n### Title: Elovich Isotherm Linear Analysis\n### Aliases: elovich.LM\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nelovich.LM(Ce,Qe)\n\n\n"} {"package":"PUPAIM","topic":"elovichanalysis","snippet":"### Name: elovichanalysis\n### Title: Elovich Isotherm Non-Linear Analysis\n### Aliases: elovichanalysis\n\n### ** Examples\n\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nelovichanalysis(Ce,Qe)\n\n\n"} {"package":"PUPAIM","topic":"floryhuggins.LM","snippet":"### Name: floryhuggins.LM\n### Title: Flory-Huggins Isotherm Linear Analysis\n### Aliases: floryhuggins.LM\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607,\n0.80435, 1.10327, 1.58223)\ntheta <- c(0.1972984, 0.3487013, 0.6147560, 0.7432401, 0.8854408,\n0.8900708, 0.9106746, 0.9106746, 0.9611422)\nfloryhuggins.LM (Ce,theta)\n\n\n"} {"package":"PUPAIM","topic":"floryhugginsanalysis","snippet":"### Name: floryhugginsanalysis\n### Title: Flory-Huggins Isotherm Non-Linear Analysis\n### Aliases: floryhugginsanalysis\n\n### ** Examples\n\ntheta <- c(0.19729, 0.34870, 0.61475, 0.74324, 0.88544, 0.89007, 0.91067, 0.91067, 0.96114)\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nfloryhugginsanalysis(Ce, theta)\n\n\n"} {"package":"PUPAIM","topic":"fowlerguggenheim.LM","snippet":"### Name: fowlerguggenheim.LM\n### Title: Fowler-Guggenheim Isotherm Linear Analysis\n### Aliases: fowlerguggenheim.LM\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607,\n0.80435, 1.10327, 1.58223)\ntheta <- c(0.1972984, 0.3487013, 0.6147560, 0.7432401, 0.8854408,\n0.8900708, 0.9106746, 0.9106746, 0.9611422)\nTemp <- 298\nfowlerguggenheim.LM(Ce, theta, Temp)\n\n\n"} {"package":"PUPAIM","topic":"fowlerguggenheimanalysis","snippet":"### Name: fowlerguggenheimanalysis\n### Title: Fowler-Guggenheim Isotherm Non-Linear Analysis\n### Aliases: fowlerguggenheimanalysis\n\n### ** Examples\n\ntheta <- c(0.19729, 0.34870, 0.61475, 0.74324, 0.88544, 0.89007, 0.91067, 0.91067, 0.96114)\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nTemp <- 298\nfowlerguggenheimanalysis(Ce,theta,Temp)\n\n\n"} {"package":"PUPAIM","topic":"freundlich.LM","snippet":"### Name: freundlich.LM\n### Title: Freundlich Isotherm Linear Analysis\n### Aliases: freundlich.LM\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nfreundlich.LM(Ce,Qe)\n\n\n"} {"package":"PUPAIM","topic":"freundlichanalysis","snippet":"### Name: freundlichanalysis\n### Title: Freundlich Isotherm Non-Linear Analysis\n### Aliases: freundlichanalysis\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nfreundlichanalysis(Ce,Qe)\n\n\n"} {"package":"PUPAIM","topic":"halsey.LM","snippet":"### Name: halsey.LM\n### Title: Halsey Isotherm Linear Analysis\n### Aliases: halsey.LM\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nhalsey.LM(Ce, Qe)\n\n\n"} {"package":"PUPAIM","topic":"halseyanalysis","snippet":"### Name: halseyanalysis\n### Title: Halsey Isotherm Non-Linear Analysis\n### Aliases: halseyanalysis\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nhalseyanalysis(Ce, Qe)\n\n\n"} {"package":"PUPAIM","topic":"harkinsjura.LM","snippet":"### Name: harkinsjura.LM\n### Title: HarkinsJura Isotherm Linear Analysis\n### Aliases: harkinsjura.LM\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nharkinsjura.LM(Ce, Qe)\n\n\n"} {"package":"PUPAIM","topic":"harkinsjuraanalysis","snippet":"### Name: harkinsjuraanalysis\n### Title: Harkins-Jura Isotherm Non-Linear Analysis\n### Aliases: harkinsjuraanalysis\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nharkinsjuraanalysis(Ce, Qe)\n\n\n"} {"package":"PUPAIM","topic":"henryanalysis","snippet":"### Name: henryanalysis\n### Title: Henry Isotherm Linear Analysis\n### Aliases: henryanalysis\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nhenryanalysis(Ce, Qe)\n\n\n"} {"package":"PUPAIM","topic":"hill.LM","snippet":"### Name: hill.LM\n### Title: Hill Isotherm Linear Analysis\n### Aliases: hill.LM\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nhill.LM(Ce, Qe)\n\n\n"} {"package":"PUPAIM","topic":"hillanalysis","snippet":"### Name: hillanalysis\n### Title: Hill Isotherm Non-Linear Analysis\n### Aliases: hillanalysis\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nhillanalysis(Ce,Qe)\n\n\n"} {"package":"PUPAIM","topic":"hilldeboer.LM","snippet":"### Name: hilldeboer.LM\n### Title: Hill-Deboer Isotherm Linear Analysis\n### Aliases: hilldeboer.LM\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607,\n0.80435, 1.10327, 1.58223)\ntheta <- c(0.1972984, 0.3487013, 0.6147560, 0.7432401, 0.8854408,\n0.8900708, 0.9106746, 0.9106746, 0.9611422)\nTemp <- 298.15\nhilldeboer.LM(Ce,theta, Temp)\n\n\n"} {"package":"PUPAIM","topic":"hilldeboeranalysis","snippet":"### Name: hilldeboeranalysis\n### Title: Hill-Deboer Isotherm Non-Linear Analysis\n### Aliases: hilldeboeranalysis\n\n### ** Examples\n\ntheta <- c(0.19729, 0.34870, 0.61475, 0.74324, 0.88544, 0.89007, 0.91067, 0.91067, 0.96114)\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nTemp <- 298\nhilldeboeranalysis(Ce,theta, Temp)\n\n\n"} {"package":"PUPAIM","topic":"jossens.LM","snippet":"### Name: jossens.LM\n### Title: Jossens Isotherm Linear Analysis\n### Aliases: jossens.LM\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\njossens.LM(Ce, Qe)\n\n\n"} {"package":"PUPAIM","topic":"jossensanalysis","snippet":"### Name: jossensanalysis\n### Title: Jossens Isotherm Non-Linear Analysis\n### Aliases: jossensanalysis\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\njossensanalysis(Ce, Qe)\n\n\n"} {"package":"PUPAIM","topic":"jovanovic.LM","snippet":"### Name: jovanovic.LM\n### Title: Jovanovic Isotherm Linear Analysis\n### Aliases: jovanovic.LM\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\njovanovic.LM(Ce,Qe)\n\n\n"} {"package":"PUPAIM","topic":"jovanovicanalysis","snippet":"### Name: jovanovicanalysis\n### Title: Jovanovic Isotherm Non-Linear Analysis\n### Aliases: jovanovicanalysis\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\njovanovicanalysis(Ce, Qe)\n\n\n"} {"package":"PUPAIM","topic":"kahnanalysis","snippet":"### Name: kahnanalysis\n### Title: Kahn Isotherm Non-Linear Analysis\n### Aliases: kahnanalysis\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nkahnanalysis(Ce, Qe)\n\n\n"} {"package":"PUPAIM","topic":"kiselev.LM","snippet":"### Name: kiselev.LM\n### Title: Kiselev Isotherm Linear Analysis\n### Aliases: kiselev.LM\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607,\n0.80435, 1.10327, 1.58223)\ntheta <- c(0.1972984, 0.3487013, 0.6147560, 0.7432401, 0.8854408,\n0.8900708, 0.9106746, 0.9106746, 0.9611422)\nkiselev.LM(Ce,theta)\n\n\n"} {"package":"PUPAIM","topic":"kiselevanalysis","snippet":"### Name: kiselevanalysis\n### Title: Kiselev Isotherm Non linear Analysis\n### Aliases: kiselevanalysis\n\n### ** Examples\n\ntheta <- c(0.19729, 0.34870, 0.61475, 0.74324, 0.88544, 0.89007, 0.91067, 0.91067, 0.96114)\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nkiselevanalysis(Ce, theta)\n\n\n"} {"package":"PUPAIM","topic":"koblecarrigan.LM","snippet":"### Name: koblecarrigan.LM\n### Title: Koble-Carrigan Isotherm Linear Analysis\n### Aliases: koblecarrigan.LM\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nkoblecarrigan.LM(Ce, Qe)\n\n\n"} {"package":"PUPAIM","topic":"koblecarrigananalysis","snippet":"### Name: koblecarrigananalysis\n### Title: Koble-Carrigan Isotherm Nonlinear Analysis\n### Aliases: koblecarrigananalysis\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nkoblecarrigananalysis(Ce, Qe)\n\n\n"} {"package":"PUPAIM","topic":"langmuir1.LM","snippet":"### Name: langmuir1.LM\n### Title: Langmuir Isotherm First Linear Form Analysis\n### Aliases: langmuir1.LM\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nlangmuir1.LM(Ce,Qe)\n\n\n"} {"package":"PUPAIM","topic":"langmuir2.LM","snippet":"### Name: langmuir2.LM\n### Title: Langmuir Isotherm Second Linear Form Analysis\n### Aliases: langmuir2.LM\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nlangmuir2.LM(Ce,Qe)\n\n\n"} {"package":"PUPAIM","topic":"langmuir3.LM","snippet":"### Name: langmuir3.LM\n### Title: Langmuir Isotherm Third Linear Form Analysis\n### Aliases: langmuir3.LM\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nlangmuir3.LM(Ce,Qe)\n\n\n"} {"package":"PUPAIM","topic":"langmuir4.LM","snippet":"### Name: langmuir4.LM\n### Title: Langmuir Isotherm Fourth Linear Form Analysis\n### Aliases: langmuir4.LM\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nlangmuir4.LM(Ce,Qe)\n\n\n"} {"package":"PUPAIM","topic":"langmuiranalysis","snippet":"### Name: langmuiranalysis\n### Title: Langmuir Isotherm Nonlinear Analysis\n### Aliases: langmuiranalysis\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nlangmuiranalysis(Ce,Qe)\n\n\n"} {"package":"PUPAIM","topic":"marckzewskijaroniecanalysis","snippet":"### Name: marckzewskijaroniecanalysis\n### Title: Marckzewski-Jaroniec Isotherm Nonlinear Analysis\n### Aliases: marckzewskijaroniecanalysis\n\n### ** Examples\n\nQe <- c(0.19729, 0.34870, 0.61475, 0.74324, 0.88544, 0.89007, 0.91067, 0.91067, 0.96114)\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nmarckzewskijaroniecanalysis(Ce,Qe)\n\n\n"} {"package":"PUPAIM","topic":"radkeprausnitzanalysis","snippet":"### Name: radkeprausnitzanalysis\n### Title: Radke-Prausnitz Isotherm Nonlinear Analysis\n### Aliases: radkeprausnitzanalysis\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nradkeprausnitzanalysis(Ce,Qe)\n\n\n"} {"package":"PUPAIM","topic":"radkepraustnitz.LM","snippet":"### Name: radkepraustnitz.LM\n### Title: Radke-Prausnitz Isotherm Linear Analysis\n### Aliases: radkepraustnitz.LM\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nradkepraustnitz.LM(Ce,Qe)\n\n\n"} {"package":"PUPAIM","topic":"redlichpeterson.LM","snippet":"### Name: redlichpeterson.LM\n### Title: Redlich-Peterson Isotherm Linear Analysis\n### Aliases: redlichpeterson.LM\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nredlichpeterson.LM(Ce,Qe)\n\n\n"} {"package":"PUPAIM","topic":"redlichpetersonanalysis","snippet":"### Name: redlichpetersonanalysis\n### Title: Redlich-Peterson Isotherm Nonlinear Analysis\n### Aliases: redlichpetersonanalysis\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nredlichpetersonanalysis(Ce,Qe)\n\n\n"} {"package":"PUPAIM","topic":"sips.LM","snippet":"### Name: sips.LM\n### Title: Sips Isotherm Linear Analysis\n### Aliases: sips.LM\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nsips.LM(Ce,Qe)\n\n\n"} {"package":"PUPAIM","topic":"sipsanalysis","snippet":"### Name: sipsanalysis\n### Title: Sips Isotherm Nonlinear Analysis\n### Aliases: sipsanalysis\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nsipsanalysis(Ce,Qe)\n\n\n"} {"package":"PUPAIM","topic":"temkin.LM","snippet":"### Name: temkin.LM\n### Title: Temkin Isotherm Linear Analysis\n### Aliases: temkin.LM\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nTemp <- 298.15\ntemkin.LM(Ce,Qe,Temp)\n\n\n"} {"package":"PUPAIM","topic":"temkinanalysis","snippet":"### Name: temkinanalysis\n### Title: Temkin Isotherm Nonlinear Analysis\n### Aliases: temkinanalysis\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nTemp <- 298\ntemkinanalysis(Ce, Qe, Temp)\n\n\n"} {"package":"PUPAIM","topic":"tothanalysis","snippet":"### Name: tothanalysis\n### Title: Toth Isotherm Nonlinear Analysis\n### Aliases: tothanalysis\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\ntothanalysis(Ce,Qe)\n\n\n"} {"package":"PUPAIM","topic":"volmeranalysis","snippet":"### Name: volmeranalysis\n### Title: Volmer Isotherm Non-Linear Analysis\n### Aliases: volmeranalysis\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nvolmeranalysis(Ce,Qe)\n\n\n"} {"package":"PUPAIM","topic":"webervanvlietanalysis","snippet":"### Name: webervanvlietanalysis\n### Title: Weber-Van Vliet Isotherm Nonlinear Analysis\n### Aliases: webervanvlietanalysis\n\n### ** Examples\n\nCe <- c(0.01353, 0.04648, 0.13239, 0.27714, 0.41600, 0.63607, 0.80435, 1.10327, 1.58223)\nQe <- c(0.03409, 0.06025, 0.10622, 0.12842, 0.15299, 0.15379, 0.15735, 0.15735, 0.16607)\nwebervanvlietanalysis(Ce,Qe)\n\n\n"} {"package":"clusterCons","topic":"auc-class","snippet":"### Name: auc-class\n### Title: Class \"auc\"\n### Aliases: auc-class\n### Keywords: classes\n\n### ** Examples\n\nshowClass(\"auc\")\n\n\n"} {"package":"clusterCons","topic":"auc","snippet":"### Name: auc\n### Title: Calculate area under the curve statistics\n### Aliases: auc aucs\n\n### ** Examples\n\n#load up a test cluscomp result\ndata('testcmr');\n\n#look at the result structure\nsummary(testcmr);\n\n#calculate an individual AUC value for a consensus matrix\nac <- auc(testcmr$e1_kmeans_k2@cm);\n\n#calculate all of the AUC values from the \\code{cluscomp} result for algorithm 'kmeans'\nkmeanscmr <- testcmr[grep('kmeans',names(testcmr))];\nacs <- aucs(kmeanscmr);\n\n\n"} {"package":"clusterCons","topic":"aucplot","snippet":"### Name: aucplot\n### Title: Generate an area under the curve plot using lattice graphics\n### Aliases: aucplot\n\n### ** Examples\n\n#load up a test cluscomp result\ndata('testcmr');\n\n#look at the result structure\nsummary(testcmr);\n\n#calculate all of the AUC values from the \\code{cluscomp} result for algorithm 'kmeans'\nkmeanscmr <- testcmr[grep('kmeans',names(testcmr))];\nacs <- aucs(kmeanscmr);\n\n#plot the AUC curve\naucplot(acs);\n\n\n"} {"package":"clusterCons","topic":"checks","snippet":"### Name: checks\n### Title: Functions to check the integrity of various objects\n### Aliases: checks data_check validConsMatrixObject validMemRobListObject\n### validMemRobMatrixObject validMergeMatrixObject validAUCObject\n### validDkObject\n\n### ** Examples\n\n#load data\ndata(sim_profile);\n\n#check if this can be used by cluscomp\ndata_check(sim_profile);\n\n#perform a clusomp run\ncmr <- cluscomp(sim_profile,clmin=2,clmax=2,rep=10);\n\n#check one of the consensus matrices\nvalidConsMatrixObject(cmr$e1_kmeans_k2)\n\n\n\n"} {"package":"clusterCons","topic":"clrob","snippet":"### Name: clrob\n### Title: Calculate the cluster robustness from consensus clustering\n### results\n### Aliases: clrob\n\n### ** Examples\n\n#load cmr (consensus clustering result produced by cluscomp)\ndata(testcmr);\n\n#calculate the cluster robustness of the consensus matrix for pam where k=4\nclrob(testcmr$e1_kmeans_k4);\n\n#calculate the cluster robustness of the merge matrix in reference\n#to the clustering structrure of pam where k=4\nclrob(testcmr$merge_k4,testcmr$e1_kmeans_k4@rm);\n\n\n"} {"package":"clusterCons","topic":"cluscomp","snippet":"### Name: cluscomp\n### Title: Perform consensus clustering with the option of using multiple\n### algorithms and parameters and merging\n### Aliases: cluscomp\n\n### ** Examples\n\n#load test data\ndata(sim_profile);\n\n#perform a group of re-sampling clustering experiments accepting default parameters \n#for the clustering algorithms\ncmr <- cluscomp(\n sim_profile,\n algorithms=list('kmeans','pam'),\n merge=1,\n clmin=2,\n clmax=5,\n reps=5\n)\n\n#display resulting matrices contained in the consensus result list\nsummary(cmr);\n\n#display the cluster robusteness for the kmeans k=4 consensus matrix\nclrob(cmr$e2_pam_k4);\n\n#plot a heatmap of the consensus matrix, note you access the cluster matrix object \n#through the cm slot\n#heatmap(cmr$e2_pam_k4@cm);\n\n#display the membership robustness for kmeans k=4 cluster 1\nmemrob(cmr$e2_pam_k4)$cluster1;\n\n#merged consensus example\n#data(testcmr);\n\n#calculate the membership robustness for the merge matrix when cluster number k=4,\n#in reference to the pam scaffold. (see memrob for more details). \n#mr <- memrob(testcmr$merge_k4,testcmr$e1_kmeans_k4@rm);\n\n#show the membership robustness for cluster 1\n#mr$cluster1;\n\n\n"} {"package":"clusterCons","topic":"consmatrix-class","snippet":"### Name: consmatrix-class\n### Title: Class \"consmatrix\"\n### Aliases: consmatrix-class consmatrix\n### Keywords: classes\n\n### ** Examples\n\nshowClass(\"consmatrix\");\n\n#you can access the slots in useful ways\n\n#load a cmr\ndata(testcmr);\n\n#get a consensus clustering matrix via the 'cm' slot\ncm <- testcmr$e1_kmeans_k4@cm;\n\n#this can be used as a distance matrix, e.g. for a heatmap\nheatmap(cm);\n\n#or as a new distance matrix\ndm <- data.frame(cm) #first convert to a data.frame\n#make sure names are the same for rows and columns\nnames(dm) <- row.names(dm);\n\n#you need to explicitly tell cluscomp that you are passing a distance matrix\ncmr2 <- cluscomp(dm,diss=TRUE,clmin=2,clmax=4,rep=2);\n\n#for merge consensus clustering you take advantage of the reference matrix (rm) slot\n#cluster robustness for agnes with cluster number (k) = 3\nclrob(testcmr$merge_k3,testcmr$e1_kmeans_k3@rm);\n#membership robustness for cluster 1\nmemrob(testcmr$merge_k3,testcmr$e1_kmeans_k3@rm)$cluster1;\n\n\n"} {"package":"clusterCons","topic":"golub","snippet":"### Name: data\n### Title: Data sets for the clusterCons package\n### Aliases: golub sim_class sim_profile testcmr\n\n### ** Examples\n\n#cluster by class\ndata(sim_class);\ncutree(agnes(t(sim_class)),4);\n\n#cluster by profile\ndata(sim_profile);\ncutree(agnes(sim_profile),4);\n\n\n"} {"package":"clusterCons","topic":"deltak","snippet":"### Name: deltak\n### Title: Function to calculate the change in the area under the curve\n### (AUC) across a range of cluster number values\n### Aliases: deltak\n\n### ** Examples\n\n#load a test cluscomp result set\ndata(testcmr)\n\n#calculate all of the AUC values from the \\code{cluscomp} result for algorithm 'kmeans'\nkmeanscmr <- testcmr[grep('kmeans',names(testcmr))];\nacs <- aucs(kmeanscmr);\n\n#calculate the delta-K values\ndks <- deltak(acs);\n\n\n"} {"package":"clusterCons","topic":"dk-class","snippet":"### Name: dk-class\n### Title: Class \"dk\"\n### Aliases: dk-class\n### Keywords: classes\n\n### ** Examples\n\nshowClass(\"dk\")\n\n\n"} {"package":"clusterCons","topic":"dkplot","snippet":"### Name: dkplot\n### Title: Generate a delta-K plot from area under the curve (AUC) values\n### across multiple cluster numbers.\n### Aliases: dkplot\n\n### ** Examples\n\n#load up a test cluscomp result\ndata('testcmr');\n\n#look at the result structure\nsummary(testcmr);\n\n#calculate all of the AUC values from the \\code{cluscomp} result for algorithm 'kmeans'\nkmeanscmr <- testcmr[grep('kmeans',names(testcmr))];\nacs <- aucs(kmeanscmr);\n\n#calculate all of the delta-K values\ndks <- deltak(acs);\n\n#plot the delta-K curve\ndkplot(dks);\n\n\n"} {"package":"clusterCons","topic":"expressionPlot","snippet":"### Name: expressionPlot\n### Title: Generate a profile plot for the data partitioned by cluster\n### membership.\n### Aliases: expressionPlot\n\n### ** Examples\n\n#load up the data set\ndata(sim_profile);\n\n#load up an example cluscomp result with this data\ndata('testcmr');\n\n#plot the expression profiles\nexpressionPlot(sim_profile,testcmr$e1_kmeans_k4);\n\n\n\n"} {"package":"clusterCons","topic":"membBoxPlot","snippet":"### Name: membBoxPlot\n### Title: Generate a box and whisker plot of membership robustness for all\n### clusters\n### Aliases: membBoxPlot\n\n### ** Examples\n\n#load up a test cluscomp result\ndata('testcmr');\n\n#calculate the membershpi robustness for one of the clustering results\nmr <- memrob(testcmr$e1_kmeans_k5);\n\n#plot the bwplot\nmembBoxPlot(mr);\n\n\n"} {"package":"clusterCons","topic":"memrob","snippet":"### Name: memrob\n### Title: Calculate the membership robustness from consensus clustering\n### results\n### Aliases: memrob\n\n### ** Examples\n\n#load cmr (consensus clustering result produced by cluscomp)\ndata(testcmr);\n\n#calculate the cluster robustness of the consensus matrix for pam where k=4\nmr1 <- memrob(testcmr$e1_kmeans_k4);\n\n#show the membership robustness of cluster 1\nmr1$cluster1;\n\n#calculate the cluster robustness of the merge matrix in reference\n#to the clustering structure of pam where k=4\nmr2 <- memrob(testcmr$merge_k4,testcmr$e1_kmeans_k4@rm);\n\n#plot a heatmap of the full membership robustness matrix\nheatmap(mr2$resultmatrix@mrm)\n\n\n"} {"package":"clusterCons","topic":"memroblist-class","snippet":"### Name: memroblist-class\n### Title: Class \"memroblist\"\n### Aliases: memroblist-class memroblist\n### Keywords: classes\n\n### ** Examples\n\nshowClass(\"memroblist\")\n\n#load a cmr\ndata(testcmr);\n\n#calculate the membership robustness for agnes, k=4\nmr <- memrob(testcmr$e2_agnes_k4);\n\n#get a membership robustness list\nmrl <- mr$cluster1;\n\n\n\n"} {"package":"clusterCons","topic":"memrobmatrix-class","snippet":"### Name: memrobmatrix-class\n### Title: Class \"memrobmatrix\"\n### Aliases: memrobmatrix-class memrobmatrix\n### Keywords: classes\n\n### ** Examples\n\nshowClass(\"memrobmatrix\")\n\n#load cmr\ndata(testcmr);\n\n#calculate membership robustness\nmr <- memrob(testcmr$e1_kmeans_k3)\n\n#get the full membership robustness matrix (matrix itself held in slot 'mrm')\nmrm <- mr$resultmatrix@mrm;\n\n\n\n"} {"package":"clusterCons","topic":"mergematrix-class","snippet":"### Name: mergematrix-class\n### Title: Class \"mergematrix\"\n### Aliases: mergematrix-class mergematrix\n### Keywords: classes\n\n### ** Examples\n\nshowClass(\"mergematrix\")\n\n#load the cmr\ndata(testcmr);\n\n#get a merge matrix object\nmm <- testcmr$merge_k4;\n\n#plot a heatmap of the merge matrix\nheatmap(mm@cm);\n\n\n"} {"package":"clusterCons","topic":"wrappers","snippet":"### Name: wrappers\n### Title: Functions to wrap command calls to clustering functions\n### Aliases: wrappers agnes_clmem pam_clmem hclust_clmem diana_clmem\n### kmeans_clmem apcluster_clmem\n\n### ** Examples\n\n#load some data\ndata(sim_profile);\n\n#run a basic agnes clustering with 3 clusters\ncm <- agnes_clmem(sim_profile,3);\n\n#pass some more complex parameters\nagnes_params = list(metric='manhattan',method='single');\ncm <- agnes_clmem(sim_profile, 3,params=agnes_params);\n\n\n\n"} {"package":"pdR","topic":"HEGY.test","snippet":"### Name: HEGY.test\n### Title: Seasonal unit root test based on Hylleberg et al. (1990)\n### Aliases: HEGY.test\n### Keywords: Seasonal unit root test\n\n### ** Examples\n\ndata(inf_Q)\ny<-inf_Q[,1]\nhegy.out<-HEGY.test(wts=y, itsd=c(1,0,c(1:3)),regvar=0, selectlags=list(mode=\"aic\", Pmax=12))\n\nhegy.out$stats #HEGY test statistics\nnames(hegy.out) # HEGY objects, which can be called by using $, see below.\nhegy.out$hegycoefs\nhegy.out$regvarcoefs\n\n\n"} {"package":"pdR","topic":"IGF","snippet":"### Name: IGF\n### Title: Unit root test based on Change(2002)\n### Aliases: IGF\n### Keywords: instrument generating functions unit root\n\n### ** Examples\n\ndata(inf19)\ny <- inf19[,1]\nIGF(y,maxp=35,ic=\"BIC\",spec=2)$tstat.IGF\n\n\n\n"} {"package":"pdR","topic":"SMPLSplit_est","snippet":"### Name: SMPLSplit_est\n### Title: Estimation of sub-sampled data\n### Aliases: SMPLSplit_est\n\n### ** Examples\n\n## Not run, becasue of bootstrap replicaiton takes time. Users may unmark # and run. \ndata(\"dur_john\")\nrep <- 500\ntrim_per <- 0.15\ndep <- \"gdpGrowth\"\nindep <- colnames(dur_john)[c(2,3,4,5)]\n\nSMPLSplit_est(data=dur_john,dep,indep,th=\"GDP60\",plot=0,h=1,nonpar=2)\n\n\n\n\n"} {"package":"pdR","topic":"SMPLSplit_example","snippet":"### Name: SMPLSplit_example\n### Title: Example code for sample splitting\n### Aliases: SMPLSplit_example\n\n### ** Examples\n\n## Not run, becasue of bootstrap replicaiton takes time. Users may unmark # and run. \ndata(\"dur_john\")\n#rep <- 500\n#trim_per <- 0.15\n#dep <- \"gdpGrowth\"\n#indep <- colnames(dur_john)[c(2,3,4,5)]\n#th1 <- \"GDP60\"\n#th2 <- \"Literacy\"\n#OUT=SMPLSplit_est(data=dur_john,dep,indep,th=th1,plot=0,h=1,nonpar=2)\n#OUT$TEST\n#OUT$Hypothesis\n#OUT$Threshold\n#stat=matrix(as.numeric(OUT$TEST),byrow = TRUE,8,2)\n#colnames(stat)=c(\"F-Stat\",\"P-value\")\n#rownames(stat)=OUT$Hypothesis\n#stat\n\n\n\n"} {"package":"pdR","topic":"SeasComponent","snippet":"### Name: SeasComponent\n### Title: Generate a data matrix of seasonal components\n### Aliases: SeasComponent\n\n### ** Examples\n\ndata(inf_Q)\ny=inf_Q[,2]\nSeasComponent(y,type=\"dummyCycle\")\nSeasComponent(y,type=\"trgCycle\")\n\n\n"} {"package":"pdR","topic":"bank_income","snippet":"### Name: bank_income\n### Title: Panel data of bank,2001Q1~2010Q1\n### Aliases: bank_income\n\n### ** Examples\n\ndata(bank_income)\n\n\n"} {"package":"pdR","topic":"cigaretts","snippet":"### Name: cigaretts\n### Title: Cigaretts consumption of US states\n### Aliases: cigaretts\n\n### ** Examples\n\ndata(cigaretts)\nhead(cigaretts)\n\n\n"} {"package":"pdR","topic":"contts","snippet":"### Name: contts\n### Title: Function for extracting components from a lm object\n### Aliases: contts\n\n### ** Examples\n\nx=rnorm(100)\ny=1+0.2*x+rnorm(100)\nLMout=lm(y~x)\ncontts(LMout,1)\n\n#$se.coef\n#[1] 0.1081023\n\n#$t.stat\n#(Intercept) \n# 10.60401 \n\n\n\n"} {"package":"pdR","topic":"dur_john","snippet":"### Name: dur_john\n### Title: The cross-country growth data in Durlauf and Johnson(1995)\n### Aliases: dur_john\n\n### ** Examples\n\ndata(dur_john)\nhead(dur_john)\n\n\n"} {"package":"pdR","topic":"hegy.reg","snippet":"### Name: hegy.reg\n### Title: Generate the HEGY regressors.\n### Aliases: hegy.reg\n\n### ** Examples\n\ndata(inf_Q)\ny=inf_Q[,1]\nhegy.reg(y)\n\n\n"} {"package":"pdR","topic":"inf19","snippet":"### Name: inf19\n### Title: Monthly inflation time series of 19 countries\n### Aliases: inf19\n\n### ** Examples\n\ndata(inf19)\nhead(inf19)\n\n\n"} {"package":"pdR","topic":"inf_M","snippet":"### Name: inf_M\n### Title: Monthly inflation time series of 20 countries\n### Aliases: inf_M\n\n### ** Examples\n\ndata(inf_M)\nhead(inf_M)\n\n\n"} {"package":"pdR","topic":"inf_Q","snippet":"### Name: inf_Q\n### Title: Quarterly inflation time series of 20 countries\n### Aliases: inf_Q\n\n### ** Examples\n\ndata(inf_Q)\nhead(inf_Q)\n\n\n"} {"package":"pdR","topic":"invest","snippet":"### Name: invest\n### Title: investment data of 565 listed companies, 1973-1987\n### Aliases: invest\n\n### ** Examples\n\n#data(invest)\n#head(invest)\n\n\n"} {"package":"pdR","topic":"ipsHEGY","snippet":"### Name: ipsHEGY\n### Title: IPS-HEGY seasonal unit root test in panel data, Otero et\n### al.(2007).\n### Aliases: ipsHEGY\n\n### ** Examples\n\ndata(inf_Q)\ndataz<-inf_Q\nitsd<-c(1,0,c(1:3))\n#Seasonal dummy only takes quarters 1:3, \n#becasue of the presence of common intercept.\nSel<-\"bic\" # \"aic\",\"bic\", \"signf\".\npmax<-12\n\nOUT<-ipsHEGY(dataz,itsd,Sel,pmax,CIPS=FALSE)\nOUT$P_HEGY\nOUT$U_HEGY\n\n# Simulation of critical values\n\n\n\n\n\n\n\n"} {"package":"pdR","topic":"lagSelect","snippet":"### Name: lagSelect\n### Title: Select the optimal number of lags, given criteria\n### Aliases: lagSelect\n\n### ** Examples\n\n#library(pdR)\n#data(inf19)\n#y<-inf19[,1]\n#lagSelect(y,maxp=25,ic=\"BIC\")\n\n\n"} {"package":"pdR","topic":"pIGF","snippet":"### Name: pIGF\n### Title: Panel unit root test of Chang(2002)\n### Aliases: pIGF\n\n### ** Examples\n\ndata(inf19)\ndatam <- inf19\npIGF(datam,maxp=25,ic=\"BIC\",spec=2)\n\n\n"} {"package":"pdR","topic":"productivity","snippet":"### Name: productivity\n### Title: Productivity data of 48 US state,1970-1986\n### Aliases: productivity\n\n### ** Examples\n\ndata(productivity)\nhead(productivity)\n\n\n"} {"package":"pdR","topic":"ptm","snippet":"### Name: ptm\n### Title: Threshold specification of panel data\n### Aliases: ptm\n\n### ** Examples\n\n# library(pdR)\n#data(invest)\n#dat<-invest[1:1500,] # subsetting the first 1500 obs., #for simplicity\n#t <- 15 #Length of time period\n#nt <- nrow(dat)\n#n <- nt/t # number of cross-section units\n\n#dep<- as.matrix(dat[,1]) # investment/assets\n#th1<- as.matrix(dat[,2]) #Tobin's Q\n#th2<- as.matrix(dat[,3]) # cash-flow/assets\n#ind1<- cbind(th1,th2) #regime-dep covariates \n#d <- as.matrix(dat[,4]) # Threshold variable \n#ind2 <- cbind((th1^2),(th1^3),(th1*d)) # regime-indep covariates:\n#bootn<-c(100,200,300) # bootstrapping replications for each threshold esitmation\n#trimn<-c(0.05,0.05,0.05) #trimmed percentage for each threshold esitmation\n\n#qn<-400\n#conf_lev<-0.95\n\n#Output=ptm(dep,ind1,ind2,d,bootn,trimn,qn,conf_lev,t,n)\n#Output[[1]] #Formatted output of 1st threshold, 2 regimes\n#Output[[2]] #Formatted output of 2nd threshold, 3 regimes\n#Output[[3]] #Formatted output of 3rd threshold, 4 regimes\n\n# In the output, the Regime-dependent Coefficients matrix\n# is, from top to bottom, regime-wise.\n\n\n"} {"package":"pdR","topic":"ret","snippet":"### Name: ret\n### Title: Returns a data.frame of sequential lag matrix.\n### Aliases: ret\n\n### ** Examples\n\ndata(inf_Q)\ny=inf_Q[,2]\nret(y,3)\n\n\n"} {"package":"pdR","topic":"selPabic","snippet":"### Name: selPabic\n### Title: Selection of lags.\n### Aliases: selPabic\n\n### ** Examples\n\ndata(inf_Q)\ny=inf_Q[,1]\nhegy.out<-HEGY.test(wts=y, itsd=c(1,0,c(1:3)),regvar=0, selectlags=list(mode=\"aic\", Pmax=12))\nhegy.out$lagsorder\nhegy.out$lagcoefs\n\n\n"} {"package":"pdR","topic":"selPsignf","snippet":"### Name: selPsignf\n### Title: Selection of lags.\n### Aliases: selPsignf\n\n### ** Examples\n\ndata(inf_Q)\ny=inf_Q[,1]\nhegy.out<-HEGY.test(wts=y, itsd=c(1,0,c(1:3)),regvar=0, selectlags=list(mode=\"signf\", Pmax=12))\nhegy.out$lagsorder\nhegy.out$lagcoefs\n\n\n"} {"package":"pdR","topic":"tbar","snippet":"### Name: tbar\n### Title: Compute the resursive mean\n### Aliases: tbar\n\n### ** Examples\n\ndata(inf19)\ny <- inf19[,1]\ntbar(y)\n\n\n\n"} {"package":"xaringan","topic":"decktape","snippet":"### Name: decktape\n### Title: Convert HTML presentations to PDF via DeckTape\n### Aliases: decktape\n\n### ** Examples\n## Don't show: \nif (interactive()) (if (getRversion() >= \"3.4\") withAutoprint else force)({ # examplesIf\n## End(Don't show)\nxaringan::decktape(\"https://slides.yihui.org/xaringan\", \"xaringan.pdf\", docker = FALSE)\n## Don't show: \n}) # examplesIf\n## End(Don't show)\n\n\n"} {"package":"xaringan","topic":"moon_reader","snippet":"### Name: moon_reader\n### Title: An R Markdown output format for remark.js slides\n### Aliases: moon_reader tsukuyomi\n\n### ** Examples\n\n# rmarkdown::render('foo.Rmd', 'xaringan::moon_reader')\n\n\n"} {"package":"DBfit","topic":"dbfit.default","snippet":"### Name: dbfit\n### Title: The main function for the double bootstrap method\n### Aliases: dbfit.default dbfit.formula dbfit\n\n### ** Examples\n\n# make sure the dependent package Rfit is installed\n# To save users time, we set both bootstrap sizes to be 100 in this example. \n# Defaults are both 500. \n\n# data(testdata)\n# This data is generated by a two-phase design, with autoregressive order being one, \n# autoregressive coefficient being 0.6 and all regression coefficients being 0. \n# Both the first and second phase have 20 observations.\n\n# y <- testdata[,5]\n# x <- testdata[,1:4]\n# fit1 <- dbfit(x,y,1, nbs = 100, nbscov = 100) # OLS fit, default\n# summary(fit1) \n# Note that the CI's of autoregressive coef are not shown in the summary.\n# Instead, they are attributes of model fit.\n# fit1$rho_CI_1\n\n# fit2 <- dbfit(x,y,1, nbs = 100, nbscov = 100 ,method=\"RANK\") # rank-based fit\n\n# When fitting with autoregressive order 2, \n# the estimate of the second order autoregressive coefficient should not be significant,\n# since this data is generated with order 1.\n\n# fit3 <- dbfit(x,y,2, nbs = 100, nbscov = 100)\n# fit3$rho_CI_1 # The first row is lower bounds, and second row is upper bounds\n\n\n\n"} {"package":"DBfit","topic":"hmdesign2","snippet":"### Name: hmdesign2\n### Title: the Two-Phase Design Matrix\n### Aliases: hmdesign2\n\n### ** Examples\n\nn1 <- 15\nn2 <- 15\nhmdesign2(n1, n2)\n\n\n"} {"package":"DBfit","topic":"hmmat","snippet":"### Name: hmmat\n### Title: K-Phase Design Matrix\n### Aliases: hmmat\n\n### ** Examples\n\n# a three-phase design matrix\nhmmat(c(10,10,10),3)\n\n\n"} {"package":"DBfit","topic":"hypothmat","snippet":"### Name: hypothmat\n### Title: General Linear Tests of the regression coefficients\n### Aliases: hypothmat\n\n### ** Examples\n\n# data(testdata)\n# y<-testdata[,5]\n# x<-testdata[,1:4]\n# fit1<-dbfit(x,y,1) # OLS fit, default\n# a test that H0: b1 = b3 vs HA: b1 != b3\n# mat<-matrix(c(1,0,0,-1),nrow=1) \n# hypothmat(sfit=fit1,mmat=mat,n=40,p=4)\n\n\n"} {"package":"DBfit","topic":"simpgen1hm2","snippet":"### Name: simpgen1hm2\n### Title: Simulation Data Generating Function\n### Aliases: simpgen1hm2\n\n### ** Examples\n\n n1 <- 15\n n2 <- 15\n rho <- 0.6\n beta <- c(0,0,0,0)\n dat <- simpgen1hm2(n1, n2, rho, beta)\n dat\n\n\n"} {"package":"DBfit","topic":"summary.dbfit","snippet":"### Name: summary.dbfit\n### Title: Summarize the double bootstrap (DB) fit\n### Aliases: summary.dbfit\n\n### ** Examples\n\n# data(testdata)\n# y<-testdata[,5]\n# x<-testdata[,1:4]\n# fit1<-dbfit(x,y,1) # OLS fit, default\n# summary(fit1)\n\n\n"} {"package":"DBfit","topic":"testdata","snippet":"### Name: testdata\n### Title: testdata\n### Aliases: testdata\n### Keywords: datasets\n\n### ** Examples\n\ndata(testdata)\n\n\n"} {"package":"sgstar","topic":"plot_sgstar","snippet":"### Name: plot_sgstar\n### Title: Timeseries Plot for Model\n### Aliases: plot_sgstar\n\n### ** Examples\n\nlibrary(sgstar)\ndata(\"coords\")\ndata(\"simulatedata\")\n\n#create weight matrix using distance inverse matrix\n\nz<-dist(coords,method = \"euclidean\")\nz <- as.matrix(z)\n\nmatriksd <- 1/z\nmatriksd[is.infinite(matriksd)] <- 0\n\nmatriksd_w <- matriksd / rowSums(as.data.frame(matriksd))\n\nfit <- sgstar(data = simulatedata, w = matriksd_w, p = 2,ps = 1, s =4)\nplot1 <- plot_sgstar(fit)\n\n\n\n\n\n"} {"package":"sgstar","topic":"predict_sgstar","snippet":"### Name: predict_sgstar\n### Title: Predict for Seasonal GSTAR model.\n### Aliases: predict_sgstar\n\n### ** Examples\n\nlibrary(sgstar)\ndata(\"coords\")\ndata(\"simulatedata\")\n\n#create weight matrix using distance inverse matrix\nz<-dist(coords,method = \"euclidean\")\nz <- as.matrix(z)\n\nmatriksd <- 1/z\nmatriksd[is.infinite(matriksd)] <- 0\n\nmatriksd_w <- matriksd / rowSums(as.data.frame(matriksd))\n\n\nfit <- sgstar(data = simulatedata, w = matriksd_w, p = 2,ps = 1, s =4)\n\n#predicting for 12 time ahead\npredict.fit <-predict_sgstar(fit,12)\n\n\n"} {"package":"sgstar","topic":"sgstar","snippet":"### Name: sgstar\n### Title: Fit Seasonal Generalized Space Time Autoregressive Model\n### Aliases: sgstar\n\n### ** Examples\n\nlibrary(sgstar)\ndata(\"coords\")\ndata(\"simulatedata\")\n\n#create weight matrix using distance inverse matrix\n\nz<-dist(coords,method = \"euclidean\")\nz <- as.matrix(z)\n\nmatriksd <- 1/z\nmatriksd[is.infinite(matriksd)] <- 0\n\nmatriksd_w <- matriksd / rowSums(as.data.frame(matriksd))\n\nfit <- sgstar(data = simulatedata, w = matriksd_w, p = 2,ps = 1, s =4)\nfit\n\n\n\n\n\n\n\n\n"} {"package":"Ternary","topic":"AddToHoldridge","snippet":"### Name: AddToHoldridge\n### Title: Add elements to ternary or Holdridge plot\n### Aliases: AddToHoldridge HoldridgeArrows HoldridgeLines HoldridgePoints\n### HoldridgePolygon HoldridgeText AddToTernary TernarySegments\n### TernaryArrows TernaryLines TernaryPoints TernaryPolygon TernaryText\n### JoinTheDots\n\n### ** Examples\n\n# Data to plot\ncoords <- list(\n A = c(1, 0, 2),\n B = c(1, 1, 1),\n C = c(1.5, 1.5, 0),\n D = c(0.5, 1.5, 1)\n)\n\n# Set up plot\noPar <- par(mar = rep(0, 4), xpd = NA) # reduce margins and write in them\nTernaryPlot()\n\n# Add elements to ternary diagram\nAddToTernary(lines, coords, col = \"darkgreen\", lty = \"dotted\", lwd = 3)\nTernaryLines(coords, col = \"darkgreen\")\nTernaryArrows(coords[1], coords[2:4], col = \"orange\", length = 0.2, lwd = 1)\nTernaryText(coords, cex = 0.8, col = \"red\", font = 2)\nTernaryPoints(coords, pch = 1, cex = 2, col = \"blue\")\nAddToTernary(graphics::points, coords, pch = 1, cex = 3)\n\n# An equivalent syntax applies to Holdridge plots:\nHoldridgePlot()\npet <- c(0.8, 2, 0.42)\nprec <- c(250, 400, 1337)\nHoldridgeText(pet, prec, c(\"A\", \"B\", \"C\"))\nAddToHoldridge(graphics::points, pet, prec, cex = 3)\n\n# Restore original plotting parameters\npar(oPar)\n\n\n"} {"package":"Ternary","topic":"Annotate","snippet":"### Name: Annotate\n### Title: Annotate points on a ternary plot\n### Aliases: Annotate\n\n### ** Examples\n\n# Load some data\ndata(\"Seatbelts\")\nseats <- c(\"drivers\", \"front\", \"rear\")\nseat <- Seatbelts[month.abb %in% \"Oct\", seats]\nlaw <- Seatbelts[month.abb %in% \"Oct\", \"law\"]\n\n# Set up plot\noPar <- par(mar = c(2, 0, 0, 0))\nTernaryPlot(alab = seats[1], blab = seats[2], clab = seats[3])\nTernaryPoints(seat, cex = 0.8, col = 2 + law)\n\n# Annotate points by year\nAnnotate(seat, labels = 1969:1984, col = 2 + law)\n\n# Restore original graphical parameters\npar(oPar) \n\n\n"} {"package":"Ternary","topic":"ColourTernary","snippet":"### Name: ColourTernary\n### Title: Colour a ternary plot according to the output of a function\n### Aliases: ColourTernary ColorTernary\n\n### ** Examples\n\nTernaryPlot(alab = \"a\", blab = \"b\", clab = \"c\")\n \nFunctionToContour <- function (a, b, c) {\n a - c + (4 * a * b) + (27 * a * b * c)\n}\n\nvalues <- TernaryPointValues(FunctionToContour, resolution = 24L)\nColourTernary(\n values,\n x = \"topleft\",\n bty = \"n\", # No box\n legend = signif(seq(max(values), min(values), length.out = 4), 3)\n)\nTernaryContour(FunctionToContour, resolution = 36L)\n\n\nTernaryPlot()\nvalues <- TernaryPointValues(rgb, resolution = 20)\nColourTernary(values, spectrum = NULL)\n\n# Create a helper function to place white centrally:\nrgbWhite <- function (r, g, b) {\n highest <- apply(rbind(r, g, b), 2L, max)\n rgb(r/highest, g/highest, b/highest)\n}\n\nTernaryPlot()\nvalues <- TernaryPointValues(rgbWhite, resolution = 20)\nColourTernary(values, spectrum = NULL)\n\n\n\n\n"} {"package":"Ternary","topic":"HoldridgeHypsometricCol","snippet":"### Name: HoldridgeHypsometricCol\n### Title: Convert a point in evapotranspiration-precipitation space to an\n### appropriate cross-blended hypsometric colour\n### Aliases: HoldridgeHypsometricCol\n\n### ** Examples\n\nHoldridgePlot(hex.col = HoldridgeHypsometricCol)\nVeryTransparent <- function(...) HoldridgeHypsometricCol(..., opacity = 0.3)\nHoldridgePlot(hex.col = VeryTransparent)\npet <- holdridge$PET\nprec <- holdridge$Precipitation\nptCol <- HoldridgeHypsometricCol(pet, prec)\nHoldridgePoints(pet, prec, pch = 21, bg = ptCol)\n\n\n"} {"package":"Ternary","topic":"HoldridgePlot","snippet":"### Name: HoldridgePlot\n### Title: Plot life zones on a Holdridge plot\n### Aliases: HoldridgePlot HoldridgeBelts HoldridgeHexagons\n\n### ** Examples\n\ndata(holdridgeLifeZonesUp, package = \"Ternary\")\nHoldridgePlot(hex.labels = holdridgeLifeZonesUp)\nHoldridgeBelts()\n\n\n"} {"package":"Ternary","topic":"OutsidePlot","snippet":"### Name: OutsidePlot\n### Title: Is a point in the plotting area?\n### Aliases: OutsidePlot\n\n### ** Examples\n\n\nTernaryPlot()\npoints(0.5, 0.5, col = \"darkgreen\")\nOutsidePlot(0.5, 0.5)\n\npoints(0.1, 0.5, col = \"red\")\nOutsidePlot(0.1, 0.5)\n\nOutsidePlot(c(0.5, 0.1), 0.5)\n\n\n"} {"package":"Ternary","topic":"Polygon-Geometry","snippet":"### Name: Polygon-Geometry\n### Title: Polygon geometry\n### Aliases: Polygon-Geometry PolygonArea PolygonCentre PolygonCenter\n### GrowPolygon\n\n### ** Examples\n\nx <- c(-3, -1, 6, 3, -4)\ny <- c(-2, 4, 1, 10, 9)\nplot(x, y, frame.plot = FALSE)\npolygon(x, y)\nPolygonArea(x, y)\npoints(PolygonCentre(x, y), pch = 3, cex = 2)\npolygon(GrowPolygon(x, y, 1), border = \"darkgreen\",\n xpd = NA # Allow drawing beyond plot border\n )\n\n# Negative values shrink the polygon\npolygon(GrowPolygon(x, y, -1), border = \"red\")\n\n\n"} {"package":"Ternary","topic":"ReflectedEquivalents","snippet":"### Name: ReflectedEquivalents\n### Title: Reflected equivalents of points outside the ternary plot\n### Aliases: ReflectedEquivalents\n\n### ** Examples\n\nTernaryPlot(axis.labels = FALSE, point = 4)\n\nxy <- cbind(\n TernaryCoords(0.9, 0.08, 0.02),\n TernaryCoords(0.15, 0.8, 0.05),\n TernaryCoords(0.05, 0.1, 0.85)\n)\nx <- xy[1, ]\ny <- xy[2, ]\n\npoints(x, y, col = \"red\", pch = 1:3)\nref <- ReflectedEquivalents(x, y)\npoints(ref[[1]][, 1], ref[[1]][, 2], col = \"blue\", pch = 1)\npoints(ref[[2]][, 1], ref[[2]][, 2], col = \"green\", pch = 2)\npoints(ref[[3]][, 1], ref[[3]][, 2], col = \"orange\", pch = 3)\n\n\n"} {"package":"Ternary","topic":"TernaryContour","snippet":"### Name: TernaryContour\n### Title: Add contours to a ternary plot\n### Aliases: TernaryContour\n\n### ** Examples\n\nFunctionToContour <- function (a, b, c) {\n a - c + (4 * a * b) + (27 * a * b * c)\n}\n\n# Set up plot\noriginalPar <- par(mar = rep(0, 4))\nTernaryPlot(alab = \"a\", blab = \"b\", clab = \"c\")\nvalues <- TernaryPointValues(FunctionToContour, resolution = 24L)\nColourTernary(values,\n legend = signif(seq(max(values), min(values), length.out = 4), 2),\n bty = \"n\")\nTernaryContour(FunctionToContour, resolution = 36L)\n\n# Note that FunctionToContour is sent a vector.\n# Instead of\nBadMax <- function (a, b, c) {\n max(a, b, c) \n}\n\n# Use\nGoodMax <- function (a, b, c) {\n pmax(a, b, c)\n}\nTernaryPlot(alab = \"a\", blab = \"b\", clab = \"c\")\nColourTernary(TernaryPointValues(GoodMax))\nTernaryContour(GoodMax)\n\n# Or, for a generalizable example,\nGeneralMax <- function (a, b, c) {\n apply(rbind(a, b, c), 2, max)\n}\nTernaryPlot(alab = \"a\", blab = \"b\", clab = \"c\")\n# Fill the contour areas, rather than using tiles\nTernaryContour(GeneralMax, filled = TRUE,\n legend = c(\"Max\", \"Min\"), bty = \"n\",\n fill.col = viridisLite::viridis(14, alpha = 0.6))\n# Re-draw edges of plot triangle over fill\nTernaryPolygon(diag(3))\n\n# Restore plotting parameters\npar(originalPar)\n\n\n"} {"package":"Ternary","topic":"TernaryCoords","snippet":"### Name: TernaryCoords\n### Title: Convert ternary coordinates to Cartesian space\n### Aliases: TernaryCoords TernaryToXY.matrix TernaryToXY.numeric\n### TernaryToXY\n\n### ** Examples\n\nTernaryCoords(100, 0, 0)\nTernaryCoords(c(0, 100, 0))\n\ncoords <- matrix(1:12, nrow = 3)\nTernaryToXY(coords)\n\n\n"} {"package":"Ternary","topic":"TernaryDensityContour","snippet":"### Name: TernaryDensityContour\n### Title: Add contours of estimated point density to a ternary plot\n### Aliases: TernaryDensityContour\n\n### ** Examples\n\n# Generate some example data\nnPoints <- 400L\ncoordinates <- cbind(abs(rnorm(nPoints, 2, 3)),\n abs(rnorm(nPoints, 1, 1.5)),\n abs(rnorm(nPoints, 1, 0.5)))\n# Set up plot\noPar <- par(mar = rep(0, 4))\nTernaryPlot(axis.labels = seq(0, 10, by = 1))\n\n# Colour background by density\nColourTernary(TernaryDensity(coordinates, resolution = 10L),\n legend = TRUE, bty = \"n\", title = \"Density\")\n\n# Plot points\nTernaryPoints(coordinates, col = \"red\", pch = \".\")\n\n# Contour by density\nTernaryDensityContour(coordinates, resolution = 30L)\n\n# Reset plotting parameters\npar(oPar)\n\n\n"} {"package":"Ternary","topic":"TernaryPlot","snippet":"### Name: TernaryPlot\n### Title: Create a ternary plot\n### Aliases: TernaryPlot HorizontalGrid\n\n### ** Examples\n\nTernaryPlot(\n atip = \"Top\", btip = \"Bottom\", ctip = \"Right\", axis.col = \"red\",\n col = rgb(0.8, 0.8, 0.8)\n)\nHorizontalGrid(grid.lines = 2, grid.col = \"blue\", grid.lty = 1)\n# the second line corresponds to the base of the triangle, and is not drawn\n\n\n"} {"package":"Ternary","topic":"TernaryPointValues","snippet":"### Name: TernaryPointValues\n### Title: Value of a function at regularly spaced points\n### Aliases: TernaryPointValues TernaryDensity\n\n### ** Examples\n\nTernaryPointValues(function (a, b, c) a * b * c, resolution = 2)\n\nTernaryPlot(grid.lines = 4)\ncols <- TernaryPointValues(rgb, resolution = 4)\ntext(as.numeric(cols[\"x\", ]), as.numeric(cols[\"y\", ]),\n labels = ifelse(cols[\"down\", ] == \"1\", \"v\", \"^\"),\n col = cols[\"z\", ])\n\nTernaryPlot(axis.labels = seq(0, 10, by = 1))\n\nnPoints <- 4000L\ncoordinates <- cbind(abs(rnorm(nPoints, 2, 3)),\n abs(rnorm(nPoints, 1, 1.5)),\n abs(rnorm(nPoints, 1, 0.5)))\n\ndensity <- TernaryDensity(coordinates, resolution = 10L)\nColourTernary(density, legend = TRUE, bty = \"n\", title = \"Density\")\nTernaryPoints(coordinates, col = \"red\", pch = \".\")\n\n\n"} {"package":"Ternary","topic":"TernaryTiles","snippet":"### Name: TernaryTiles\n### Title: Paint tiles on ternary plot\n### Aliases: TernaryTiles TernaryUpTiles TernaryDownTiles TernaryLeftTiles\n### TernaryRightTiles\n\n### ** Examples\n\nTernaryPlot()\nTernaryXRange()\nTernaryYRange()\n\nTernaryTiles(0, 0.5, TRUE, 10, \"red\")\nxy <- TernaryCoords(c(4, 3, 3))\nTernaryTiles(xy[1], xy[2], FALSE, 5, \"darkblue\")\n\n\n"} {"package":"Ternary","topic":"TriangleCentres","snippet":"### Name: TriangleCentres\n### Title: Coordinates of triangle mid-points\n### Aliases: TriangleCentres\n\n### ** Examples\n\nTernaryPlot(grid.lines = 4)\ncentres <- TriangleCentres(4)\ntext(centres[\"x\", ], centres[\"y\", ], ifelse(centres[\"triDown\", ], \"v\", \"^\"))\n\n\n\n"} {"package":"Ternary","topic":"TriangleInHull","snippet":"### Name: TriangleInHull\n### Title: Does triangle overlap convex hull of points?\n### Aliases: TriangleInHull\n\n### ** Examples\n\nset.seed(0)\nnPts <- 50\na <- runif(nPts, 0.3, 0.7)\nb <- 0.15 + runif(nPts, 0, 0.7 - a)\nc <- 1 - a - b\ncoordinates <- rbind(a, b, c)\n\nTernaryPlot(grid.lines = 5)\nTernaryPoints(coordinates, pch = 3, col = 4)\ntriangles <- TriangleCentres(resolution = 5)\ninHull <- TriangleInHull(triangles, coordinates)\npolygon(inHull$hull, border = 4)\nvalues <- rbind(triangles,\n z = ifelse(inHull$inside, \"#33cc3333\", \"#cc333333\"))\npoints(triangles[\"x\", ], triangles[\"y\", ],\n pch = ifelse(triangles[\"triDown\", ], 6, 2),\n col = ifelse(inHull$inside, \"#33cc33\", \"#cc3333\"))\nColourTernary(values)\n\n\n"} {"package":"Ternary","topic":"XYToTernary","snippet":"### Name: XYToTernary\n### Title: Cartesian coordinates to ternary point\n### Aliases: XYToTernary XYToHoldridge XYToPetPrec\n\n### ** Examples\n\nXYToTernary(c(0.1, 0.2), 0.5)\n\n\n"} {"package":"Ternary","topic":"cbPalettes","snippet":"### Name: cbPalettes\n### Title: Palettes compatible with colour blindness\n### Aliases: cbPalettes cbPalette8 cbPalette13 cbPalette15\n### Keywords: datasets\n\n### ** Examples\n\ndata(\"cbPalette8\")\nplot.new()\nplot.window(xlim = c(1, 16), ylim = c(0, 3))\ntext(1:8 * 2, 3, 1:8, col = cbPalette8)\npoints(1:8 * 2, rep(2, 8), col = cbPalette8, pch = 15)\n\ndata(\"cbPalette15\")\ntext(1:15, 1, col = cbPalette15)\ntext(c(4, 7), 1, \"[ ]\")\npoints(1:15, rep(0, 15), col = cbPalette15, pch = 15)\n\n\n"} {"package":"Ternary","topic":"holdridge","snippet":"### Name: holdridge\n### Title: Random sample of points for Holdridge plotting\n### Aliases: holdridge\n### Keywords: datasets\n\n### ** Examples\n\ndata(\"holdridge\", package = \"Ternary\")\nhead(holdridge)\n\n\n"} {"package":"sbo","topic":"as_sbo_dictionary","snippet":"### Name: as_sbo_dictionary\n### Title: Coerce to dictionary\n### Aliases: as_sbo_dictionary as_sbo_dictionary.character\n\n### ** Examples\n\ndict <- as_sbo_dictionary(c(\"a\",\"b\",\"c\"), .preprocess = tolower, EOS = \".\")\n\n\n"} {"package":"sbo","topic":"babble","snippet":"### Name: babble\n### Title: Babble!\n### Aliases: babble\n\n### ** Examples\n\n# Babble!\np <- sbo_predictor(twitter_predtable)\nset.seed(840) # Set seed for reproducibility\nbabble(p)\n\n\n"} {"package":"sbo","topic":"eval_sbo_predictor","snippet":"### Name: eval_sbo_predictor\n### Title: Evaluate Stupid Back-off next-word predictions\n### Aliases: eval_sbo_predictor\n\n### ** Examples\n\n## No test: \n# Evaluating next-word predictions from a Stupid Back-off N-gram model\nif (suppressMessages(require(dplyr) && require(ggplot2))) {\n p <- sbo_predictor(twitter_predtable)\n set.seed(840) # Set seed for reproducibility\n test <- sample(twitter_test, 500)\n eval <- eval_sbo_predictor(p, test)\n \n ## Compute three-word accuracies\n eval %>% summarise(accuracy = sum(correct)/n()) # Overall accuracy\n eval %>% # Accuracy for in-sentence predictions\n filter(true != \"\") %>%\n summarise(accuracy = sum(correct) / n())\n \n ## Make histogram of word-rank distribution for correct predictions\n dict <- attr(twitter_predtable, \"dict\")\n eval %>%\n filter(correct, true != \"\") %>%\n transmute(rank = match(true, table = dict)) %>%\n ggplot(aes(x = rank)) + geom_histogram(binwidth = 30)\n}\n## End(No test)\n\n\n"} {"package":"sbo","topic":"kgram_freqs","snippet":"### Name: kgram_freqs\n### Title: k-gram frequency tables\n### Aliases: kgram_freqs sbo_kgram_freqs kgram_freqs_fast\n### sbo_kgram_freqs_fast\n\n### ** Examples\n\n## No test: \n# Obtain k-gram frequency table from corpus\n## Get k-gram frequencies, for k <= N = 3.\n## The dictionary is built on the fly, using the most frequent 1000 words.\nfreqs <- kgram_freqs(corpus = twitter_train, N = 3, dict = max_size ~ 1000,\n .preprocess = preprocess, EOS = \".?!:;\")\nfreqs\n## Using a predefined dictionary\nfreqs <- kgram_freqs_fast(twitter_train, N = 3, dict = twitter_dict,\n erase = \"[^.?!:;'\\\\w\\\\s]\", lower_case = TRUE,\n EOS = \".?!:;\")\nfreqs\n## 2-grams, no preprocessing, use a dictionary covering 50% of corpus\nfreqs <- kgram_freqs(corpus = twitter_train, N = 2, dict = target ~ 0.5,\n EOS = \".?!:;\")\nfreqs\n## End(No test)\n## No test: \n# Obtain k-gram frequency table from corpus\nfreqs <- kgram_freqs_fast(twitter_train, N = 3, dict = twitter_dict)\n## Print result\nfreqs\n## End(No test)\n\n\n"} {"package":"sbo","topic":"plot.word_coverage","snippet":"### Name: plot.word_coverage\n### Title: Plot method for word_coverage objects\n### Aliases: plot.word_coverage\n\n### ** Examples\n\n## No test: \nc <- word_coverage(twitter_dict, twitter_test)\nplot(c)\n## End(No test)\n\n\n"} {"package":"sbo","topic":"predict.sbo_kgram_freqs","snippet":"### Name: predict.sbo_kgram_freqs\n### Title: Predict method for k-gram frequency tables\n### Aliases: predict.sbo_kgram_freqs\n\n### ** Examples\n\npredict(twitter_freqs, \"i love\")\n\n\n"} {"package":"sbo","topic":"predict.sbo_predictor","snippet":"### Name: predict.sbo_predictor\n### Title: Predict method for Stupid Back-off text predictor\n### Aliases: predict.sbo_predictor\n\n### ** Examples\n\np <- sbo_predictor(twitter_predtable)\nx <- predict(p, \"i love\")\nx\nx <- predict(p, \"you love\")\nx\n#N.B. the top predictions here are x[1], followed by x[2] and x[3].\npredict(p, c(\"i love\", \"you love\")) # Behaviour with length()>1 input.\n\n\n"} {"package":"sbo","topic":"preprocess","snippet":"### Name: preprocess\n### Title: Preprocess text corpus\n### Aliases: preprocess\n\n### ** Examples\n\npreprocess(\"Hi @ there! I'm using `sbo`.\")\n\n\n"} {"package":"sbo","topic":"prune","snippet":"### Name: prune\n### Title: Prune k-gram objects\n### Aliases: prune prune.sbo_kgram_freqs prune.sbo_predtable\n\n### ** Examples\n\n# Drop k-gram frequencies for k > 2 \nfreqs <- twitter_freqs\nsummary(freqs)\nfreqs <- prune(freqs, N = 2)\nsummary(freqs)\n# Extract a 2-gram model from a larger 3-gram model \npt <- twitter_predtable\nsummary(pt)\npt <- prune(pt, N = 2)\nsummary(pt)\n\n\n"} {"package":"sbo","topic":"sbo_dictionary","snippet":"### Name: sbo_dictionary\n### Title: Dictionaries\n### Aliases: sbo_dictionary dictionary\n\n### ** Examples\n\n## No test: \n# Extract dictionary from `twitter_train` corpus (all words)\ndict <- sbo_dictionary(twitter_train)\n# Extract dictionary from `twitter_train` corpus (top 1000 words)\ndict <- sbo_dictionary(twitter_train, max_size = 1000)\n# Extract dictionary from `twitter_train` corpus (coverage target = 50%)\ndict <- sbo_dictionary(twitter_train, target = 0.5)\n## End(No test)\n\n\n"} {"package":"sbo","topic":"sbo_predictions","snippet":"### Name: sbo_predictions\n### Title: Stupid Back-off text predictions\n### Aliases: sbo_predictions sbo_predictor predictor\n### sbo_predictor.character sbo_predictor.sbo_kgram_freqs\n### sbo_predictor.sbo_predtable sbo_predtable predtable\n### sbo_predtable.character sbo_predtable.sbo_kgram_freqs\n\n### ** Examples\n\n## No test: \n# Train a text predictor directly from corpus\np <- sbo_predictor(twitter_train, N = 3, dict = max_size ~ 1000,\n .preprocess = preprocess, EOS = \".?!:;\")\n## End(No test)\n## No test: \n# Train a text predictor from previously computed 'kgram_freqs' object\np <- sbo_predictor(twitter_freqs)\n## End(No test)\n## No test: \n# Load a text predictor from a Stupid Back-Off prediction table\np <- sbo_predictor(twitter_predtable)\n## End(No test)\n## No test: \n# Predict from Stupid Back-Off text predictor\np <- sbo_predictor(twitter_predtable)\npredict(p, \"i love\")\n## End(No test)\n## No test: \n# Build Stupid Back-Off prediction tables directly from corpus\nt <- sbo_predtable(twitter_train, N = 3, dict = max_size ~ 1000, \n .preprocess = preprocess, EOS = \".?!:;\")\n## End(No test)\n## No test: \n# Build Stupid Back-Off prediction tables from kgram_freqs object\nt <- sbo_predtable(twitter_freqs)\n## End(No test)\n## Not run: \n##D # Save and reload a 'sbo_predtable' object with base::save()\n##D save(t)\n##D load(\"t.rda\")\n## End(Not run)\n\n\n"} {"package":"sbo","topic":"tokenize_sentences","snippet":"### Name: tokenize_sentences\n### Title: Sentence tokenizer\n### Aliases: tokenize_sentences\n\n### ** Examples\n\ntokenize_sentences(\"Hi there! I'm using `sbo`.\")\n\n\n"} {"package":"sbo","topic":"twitter_dict","snippet":"### Name: twitter_dict\n### Title: Top 1000 dictionary from Twitter training set\n### Aliases: twitter_dict\n### Keywords: datasets\n\n### ** Examples\n\nhead(twitter_dict, 10)\n\n\n"} {"package":"sbo","topic":"twitter_test","snippet":"### Name: twitter_test\n### Title: Twitter test set\n### Aliases: twitter_test\n### Keywords: datasets\n\n### ** Examples\n\nhead(twitter_test)\n\n\n"} {"package":"sbo","topic":"twitter_train","snippet":"### Name: twitter_train\n### Title: Twitter training set\n### Aliases: twitter_train\n### Keywords: datasets\n\n### ** Examples\n\nhead(twitter_train)\n\n\n"} {"package":"sbo","topic":"word_coverage","snippet":"### Name: word_coverage\n### Title: Word coverage fraction\n### Aliases: word_coverage word_coverage.sbo_dictionary\n### word_coverage.character word_coverage.sbo_kgram_freqs\n### word_coverage.sbo_predictions\n\n### ** Examples\n\n## No test: \nc <- word_coverage(twitter_dict, twitter_train)\nprint(c)\nsummary(c)\n# Plot coverage fraction, including the End-Of-Sentence in word counts.\nplot(c, include_EOS = TRUE)\n## End(No test)\n\n\n"} {"package":"santoku","topic":"brk_default","snippet":"### Name: brk_default\n### Title: Create a standard set of breaks\n### Aliases: brk_default\n\n### ** Examples\n\n\nchop(1:10, c(2, 5, 8))\nchop(1:10, brk_default(c(2, 5, 8)))\n\n\n\n"} {"package":"santoku","topic":"brk_manual","snippet":"### Name: brk_manual\n### Title: Create a 'breaks' object manually\n### Aliases: brk_manual\n\n### ** Examples\n\nlbrks <- brk_manual(1:3, rep(TRUE, 3))\nchop(1:3, lbrks, extend = FALSE)\n\nrbrks <- brk_manual(1:3, rep(FALSE, 3))\nchop(1:3, rbrks, extend = FALSE)\n\nbrks_singleton <- brk_manual(\n c(1, 2, 2, 3),\n c(TRUE, TRUE, FALSE, TRUE))\n\nchop(1:3, brks_singleton, extend = FALSE)\n\n\n\n"} {"package":"santoku","topic":"brk_width-for-datetime","snippet":"### Name: brk_width-for-datetime\n### Title: Equal-width intervals for dates or datetimes\n### Aliases: brk_width-for-datetime brk_width.Duration\n\n### ** Examples\n\n\nif (requireNamespace(\"lubridate\")) {\n year2001 <- as.Date(\"2001-01-01\") + 0:364\n tab_width(year2001, months(1),\n labels = lbl_discrete(\" to \", fmt = \"%e %b %y\"))\n}\n\n\n\n"} {"package":"santoku","topic":"chop","snippet":"### Name: chop\n### Title: Cut data into intervals\n### Aliases: chop kiru tab\n\n### ** Examples\n\n\nchop(1:7, c(2, 4, 6))\n\nchop(1:7, c(2, 4, 6), extend = FALSE)\n\n# Repeat a number for a singleton break:\nchop(1:7, c(2, 4, 4, 6))\n\nchop(1:7, c(2, 4, 6), left = FALSE)\n\nchop(1:7, c(2, 4, 6), close_end = FALSE)\n\nchop(1:7, brk_quantiles(c(0.25, 0.75)))\n\n# A single break is fine if `extend` is not `FALSE`:\nchop(1:7, 4)\n\n# Floating point inaccuracy:\nchop(0.3/3, c(0, 0.1, 0.1, 1), labels = c(\"< 0.1\", \"0.1\", \"> 0.1\"))\n\n# -- Labels --\n\nchop(1:7, c(Lowest = 1, Low = 2, Mid = 4, High = 6))\n\nchop(1:7, c(2, 4, 6), labels = c(\"Lowest\", \"Low\", \"Mid\", \"High\"))\n\nchop(1:7, c(2, 4, 6), labels = lbl_dash())\n\n# Mixing names and other labels:\nchop(1:7, c(\"<2\" = 1, 2, 4, \">=6\" = 6), labels = lbl_dash())\n\n# -- Non-standard types --\n\nchop(as.Date(\"2001-01-01\") + 1:7, as.Date(\"2001-01-04\"))\n\nsuppressWarnings(chop(LETTERS[1:7], \"D\"))\n\n\ntab(1:10, c(2, 5, 8))\n\n\n\n"} {"package":"santoku","topic":"chop_equally","snippet":"### Name: chop_equally\n### Title: Chop equal-sized groups\n### Aliases: chop_equally brk_equally tab_equally\n\n### ** Examples\n\nchop_equally(1:10, 5)\n\n# You can't always guarantee `groups` groups:\ndupes <- c(1, 1, 1, 2, 3, 4, 4, 4)\nquantile(dupes, 0:4/4)\nchop_equally(dupes, 4)\n\n\n"} {"package":"santoku","topic":"chop_evenly","snippet":"### Name: chop_evenly\n### Title: Chop into equal-width intervals\n### Aliases: chop_evenly brk_evenly tab_evenly\n\n### ** Examples\n\nchop_evenly(0:10, 5)\n\n\n\n"} {"package":"santoku","topic":"chop_fn","snippet":"### Name: chop_fn\n### Title: Chop using an existing function\n### Aliases: chop_fn brk_fn tab_fn\n\n### ** Examples\n\n\nif (requireNamespace(\"scales\")) {\n chop_fn(rlnorm(10), scales::breaks_log(5))\n # same as\n # x <- rlnorm(10)\n # chop(x, scales::breaks_log(5)(x))\n}\n\n\n\n"} {"package":"santoku","topic":"chop_mean_sd","snippet":"### Name: chop_mean_sd\n### Title: Chop by standard deviations\n### Aliases: chop_mean_sd brk_mean_sd tab_mean_sd\n\n### ** Examples\n\nchop_mean_sd(1:10)\n\nchop(1:10, brk_mean_sd())\n\ntab_mean_sd(1:10)\n\n\n\n"} {"package":"santoku","topic":"chop_n","snippet":"### Name: chop_n\n### Title: Chop into fixed-sized groups\n### Aliases: chop_n brk_n tab_n\n\n### ** Examples\n\nchop_n(1:10, 5)\n\nchop_n(1:5, 2)\nchop_n(1:5, 2, tail = \"merge\")\n\n# too many duplicates\nx <- rep(1:2, each = 3)\nchop_n(x, 2)\n\ntab_n(1:10, 5)\n\n# fewer elements in one group\ntab_n(1:10, 4)\n\n\n\n"} {"package":"santoku","topic":"chop_pretty","snippet":"### Name: chop_pretty\n### Title: Chop using pretty breakpoints\n### Aliases: chop_pretty brk_pretty tab_pretty\n\n### ** Examples\n\nchop_pretty(1:10)\n\nchop(1:10, brk_pretty(n = 5, high.u.bias = 0))\n\ntab_pretty(1:10)\n\n\n\n"} {"package":"santoku","topic":"chop_proportions","snippet":"### Name: chop_proportions\n### Title: Chop into proportions of the range of x\n### Aliases: chop_proportions brk_proportions tab_proportions\n\n### ** Examples\n\nchop_proportions(0:10, c(0.2, 0.8))\nchop_proportions(0:10, c(Low = 0, Mid = 0.2, High = 0.8))\n\n\n\n"} {"package":"santoku","topic":"chop_quantiles","snippet":"### Name: chop_quantiles\n### Title: Chop by quantiles\n### Aliases: chop_quantiles chop_deciles brk_quantiles tab_quantiles\n### tab_deciles\n\n### ** Examples\n\nchop_quantiles(1:10, 1:3/4)\n\nchop_quantiles(1:10, c(Q1 = 0, Q2 = 0.25, Q3 = 0.5, Q4 = 0.75))\n\nchop(1:10, brk_quantiles(1:3/4))\n\nchop_deciles(1:10)\n\n# to label by the quantiles themselves:\nchop_quantiles(1:10, 1:3/4, raw = TRUE)\n\nset.seed(42)\ntab_quantiles(rnorm(100), probs = 1:3/4, raw = TRUE)\n\n\n\n"} {"package":"santoku","topic":"chop_width","snippet":"### Name: chop_width\n### Title: Chop into fixed-width intervals\n### Aliases: chop_width brk_width brk_width.default tab_width\n\n### ** Examples\n\nchop_width(1:10, 2)\n\nchop_width(1:10, 2, start = 0)\n\nchop_width(1:9, -2)\n\nchop(1:10, brk_width(2, 0))\n\ntab_width(1:10, 2, start = 0)\n\n\n\n"} {"package":"santoku","topic":"exactly","snippet":"### Name: exactly\n### Title: Define singleton intervals explicitly\n### Aliases: exactly\n\n### ** Examples\n\nchop(1:10, c(2, exactly(5), 8))\n\n# same:\nchop(1:10, c(2, 5, 5, 8))\n\n\n"} {"package":"santoku","topic":"fillet","snippet":"### Name: fillet\n### Title: Chop data precisely (for programmers)\n### Aliases: fillet\n\n### ** Examples\n\nfillet(1:10, c(2, 5, 8))\n\n\n"} {"package":"santoku","topic":"lbl_dash","snippet":"### Name: lbl_dash\n### Title: Label chopped intervals like 1-4, 4-5, ...\n### Aliases: lbl_dash\n\n### ** Examples\n\nchop(1:10, c(2, 5, 8), lbl_dash())\n\nchop(1:10, c(2, 5, 8), lbl_dash(\" to \", fmt = \"%.1f\"))\n\nchop(1:10, c(2, 5, 8), lbl_dash(first = \"<{r}\"))\n\npretty <- function (x) prettyNum(x, big.mark = \",\", digits = 1)\nchop(runif(10) * 10000, c(3000, 7000), lbl_dash(\" to \", fmt = pretty))\n\n\n"} {"package":"santoku","topic":"lbl_discrete","snippet":"### Name: lbl_discrete\n### Title: Label discrete data\n### Aliases: lbl_discrete\n\n### ** Examples\n\ntab(1:7, c(1, 3, 5), lbl_discrete())\n\ntab(1:7, c(3, 5), lbl_discrete(first = \"<= {r}\"))\n\ntab(1:7 * 1000, c(1, 3, 5) * 1000, lbl_discrete(unit = 1000))\n\n# Misleading labels for non-integer data\nchop(2.5, c(1, 3, 5), lbl_discrete())\n\n\n\n"} {"package":"santoku","topic":"lbl_endpoints","snippet":"### Name: lbl_endpoints\n### Title: Label chopped intervals by their left or right endpoints\n### Aliases: lbl_endpoints lbl_endpoint\n\n### ** Examples\n\nchop(1:10, c(2, 5, 8), lbl_endpoints(left = TRUE))\nchop(1:10, c(2, 5, 8), lbl_endpoints(left = FALSE))\nif (requireNamespace(\"lubridate\")) {\n tab_width(\n as.Date(\"2000-01-01\") + 0:365,\n months(1),\n labels = lbl_endpoints(fmt = \"%b\")\n )\n}\n\n## Not run: \n##D # This gives breaks `[1, 2) [2, 3) {3}` which lead to\n##D # duplicate labels `\"2\", \"3\", \"3\"`:\n##D chop(1:3, 1:3, lbl_endpoints(left = FALSE))\n## End(Not run)\n\n\n"} {"package":"santoku","topic":"lbl_glue","snippet":"### Name: lbl_glue\n### Title: Label chopped intervals using the 'glue' package\n### Aliases: lbl_glue\n\n### ** Examples\n\ntab(1:10, c(1, 3, 3, 7),\n labels = lbl_glue(\"{l} to {r}\", single = \"Exactly {l}\"))\n\ntab(1:10 * 1000, c(1, 3, 5, 7) * 1000,\n labels = lbl_glue(\"{l}-{r}\",\n fmt = function(x) prettyNum(x, big.mark=',')))\n\n# reproducing lbl_intervals():\ninterval_left <- \"{ifelse(l_closed, '[', '(')}\"\ninterval_right <- \"{ifelse(r_closed, ']', ')')}\"\nglue_string <- paste0(interval_left, \"{l}\", \", \", \"{r}\", interval_right)\ntab(1:10, c(1, 3, 3, 7), labels = lbl_glue(glue_string, single = \"{{{l}}}\"))\n\n\n\n"} {"package":"santoku","topic":"lbl_intervals","snippet":"### Name: lbl_intervals\n### Title: Label chopped intervals using set notation\n### Aliases: lbl_intervals\n\n### ** Examples\n\n\ntab(-10:10, c(-3, 0, 0, 3),\n labels = lbl_intervals())\n\ntab(-10:10, c(-3, 0, 0, 3),\n labels = lbl_intervals(fmt = list(nsmall = 1)))\n\ntab_evenly(runif(20), 10,\n labels = lbl_intervals(fmt = percent))\n\n\n\n"} {"package":"santoku","topic":"lbl_manual","snippet":"### Name: lbl_manual\n### Title: Label chopped intervals in a user-defined sequence\n### Aliases: lbl_manual\n### Keywords: internal\n\n### ** Examples\n\nchop(1:10, c(2, 5, 8), lbl_manual(c(\"w\", \"x\", \"y\", \"z\")))\n# ->\nchop(1:10, c(2, 5, 8), labels = c(\"w\", \"x\", \"y\", \"z\"))\n\n\n"} {"package":"santoku","topic":"lbl_midpoints","snippet":"### Name: lbl_midpoints\n### Title: Label chopped intervals by their midpoints\n### Aliases: lbl_midpoints\n\n### ** Examples\n\nchop(1:10, c(2, 5, 8), lbl_midpoints())\n\n\n"} {"package":"santoku","topic":"lbl_seq","snippet":"### Name: lbl_seq\n### Title: Label chopped intervals in sequence\n### Aliases: lbl_seq\n\n### ** Examples\n\nchop(1:10, c(2, 5, 8), lbl_seq())\n\nchop(1:10, c(2, 5, 8), lbl_seq(\"i.\"))\n\nchop(1:10, c(2, 5, 8), lbl_seq(\"(A)\"))\n\n\n"} {"package":"santoku","topic":"percent","snippet":"### Name: percent\n### Title: Simple percentage formatter\n### Aliases: percent\n\n### ** Examples\n\npercent(0.5)\n\n\n"} {"package":"discretefit","topic":"chisq_gof","snippet":"### Name: chisq_gof\n### Title: Simulated Chi-squared goodness-of-fit test\n### Aliases: chisq_gof\n\n### ** Examples\n\nx <- c(15, 36, 17)\np <- c(0.25, 0.5, 0.25)\n\nchisq_gof(x, p)\n\n\n\n"} {"package":"discretefit","topic":"cvm_gof","snippet":"### Name: cvm_gof\n### Title: Simulated Cramer-von Mises goodness-of-fit test\n### Aliases: cvm_gof\n\n### ** Examples\n\nx <- c(15, 36, 17)\np <- c(0.25, 0.5, 0.25)\n\ncvm_gof(x, p)\n\n\n\n"} {"package":"discretefit","topic":"ft_gof","snippet":"### Name: ft_gof\n### Title: Simulated Freeman-Tukey (Hellinger-distance) goodness-of-fit\n### test\n### Aliases: ft_gof\n\n### ** Examples\n\nx <- c(15, 36, 17)\np <- c(0.25, 0.5, 0.25)\n\nft_gof(x, p)\n\n\n\n"} {"package":"discretefit","topic":"g_gof","snippet":"### Name: g_gof\n### Title: Simulated log-likelihood-ratio (G^2) goodness-of-fit test\n### Aliases: g_gof\n\n### ** Examples\n\nx <- c(15, 36, 17)\np <- c(0.25, 0.5, 0.25)\n\ng_gof(x, p)\n\n\n\n"} {"package":"discretefit","topic":"ks_gof","snippet":"### Name: ks_gof\n### Title: Simulated Kolmogorov-Smirnov goodness-of-fit test\n### Aliases: ks_gof\n\n### ** Examples\n\nx <- c(15, 36, 17)\np <- c(0.25, 0.5, 0.25)\n\nks_gof(x, p)\n\n\n\n"} {"package":"discretefit","topic":"rms_gof","snippet":"### Name: rms_gof\n### Title: Simulated root-mean-square goodness-of-fit test\n### Aliases: rms_gof\n\n### ** Examples\n\nx <- c(15, 36, 17)\np <- c(0.25, 0.5, 0.25)\n\nrms_gof(x, p)\n\n\n\n"} {"package":"readsdmx","topic":"read_sdmx","snippet":"### Name: read_sdmx\n### Title: Read SDMX data\n### Aliases: read_sdmx\n\n### ** Examples\n\nf <- system.file(\"extdata/compact_2.0.xml\", package = \"readsdmx\")\nd <- readsdmx::read_sdmx(f)\n\n## No test: \nu <-\n \"https://stats.oecd.org/restsdmx/sdmx.ashx/GetData/HH_DASH/..Q/all?format=compact_v2\"\nd <- readsdmx::read_sdmx(u)\n## End(No test)\n\n\n\n"} {"package":"cnaOpt","topic":"cnaOpt","snippet":"### Name: cnaOpt\n### Title: Find atomic solution formulas with optimal consistency and\n### coverage\n### Aliases: cnaOpt\n\n### ** Examples\n\n# Example 1: Real-life crisp-set data, d.educate.\n(res_opt1 <- cnaOpt(d.educate, \"E\"))\n\n# Using the pipe operator (%>%), the steps processed by cnaOpt in the \n# call above can be reproduced as follows:\nlibrary(dplyr)\nconCovOpt(d.educate, \"E\") %>% selectMax %>% DNFbuild(reduce = \"ereduce\") %>% \n paste(\"<-> E\") %>% condTbl(d.educate)\n\n# Example 2: Simulated crisp-set data.\ndat1 <- data.frame(\n A = c(1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0), \n B = c(0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0), \n C = c(0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0), \n D = c(1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1), \n E = c(1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1), \n F = c(0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1)\n)\n\n(res_opt2 <- cnaOpt(dat1, \"E\"))\n\n# Change the maximality criterion.\ncnaOpt(dat1, \"E\", crit = quote(min(con, cov)))\n# Change the selection condition.\ncnaOpt(dat1, \"E\", cond = quote(con >= 0.9))\n# Build all con-cov optima with coverage above 0.9 that maximize min(con, cov).\ncnaOpt(dat1, \"E\", crit = quote(min(con, cov)), cond = quote(cov > 0.9))\n# Different values of the reduce argument.\ncnaOpt(dat1, \"E\", reduce = \"none\") # canonical DNF\ncnaOpt(dat1, \"E\", reduce = \"rreduce\") # one randomly drawn optimal solution\n# Iterate random solution generation 10 times.\ncnaOpt(dat1, \"E\", reduce = \"rreduce\", niter = 10) \n\n# Example 3: All logically possible configurations.\n(res_opt3 <- cnaOpt(full.ct(4), \"D\")) # All combinations are equally bad.\n\n# Example 4: Real-life multi-value data, d.pban.\ncnaOpt(d.pban, outcome = \"PB=1\")\ncnaOpt(d.pban, outcome = \"PB=1\", crit = quote(0.8*con + 0.2*cov))\ncnaOpt(d.pban, outcome = \"PB=1\", cond = quote(con > 0.9))\ncnaOpt(d.pban, outcome = \"PB=0\")\ncnaOpt(d.pban, outcome = \"PB=0\", cond = quote(con > 0.9))\ncnaOpt(d.pban, outcome = \"F=2\")\ncnaOpt(d.pban, outcome = \"F=2\", crit = quote(0.8*con + 0.2*cov))\n\n# Example 5: High computational demand.\ndat2 <- configTable(d.performance[,1:8], frequency = d.performance$frequency)\ntry(cnaOpt(dat2, outcome = \"SP\")) # error because too computationally demanding\n# The following call does not terminate because of reduce = \"ereduce\".\ntry(cnaOpt(dat2, outcome = \"SP\", approx = TRUE))\n# We could increase maxCombs, as in the line below\n## Not run: cnaOpt(dat2, outcome = \"SP\", approx = TRUE, maxCombs = 1.08e+09) \n# but this takes very long to terminate.\n# Alternative approach: Produce one (randomly selected) optimal solution using reduce = \"rreduce\".\ncnaOpt(dat2, outcome = \"SP\", approx = TRUE, reduce = \"rreduce\")\n# Iterate the previous call 10 times.\n## No test: \ncnaOpt(dat2, outcome = \"SP\", approx = TRUE, reduce = \"rreduce\", niter = 10)\n## End(No test)\n# Another alternative: Use ereduce for minimization but introduce a case.cutoff.\ncnaOpt(dat2, outcome = \"SP\", case.cutoff = 10)\n\n\n"} {"package":"cnaOpt","topic":"conCovOpt","snippet":"### Name: conCovOpt\n### Title: Find consistency and coverage optima for configurational data\n### Aliases: conCovOpt print.conCovOpt plot.conCovOpt\n\n### ** Examples\n\n(cco.irrigate <- conCovOpt(d.irrigate))\nconCovOpt(d.irrigate, outcome = c(\"R\",\"W\"))\n# Plot method.\nplot(cco.irrigate)\nplot(cco.irrigate, con = .8, cov = .8)\n\ndat1 <- d.autonomy[15:30, c(\"EM\",\"SP\",\"CO\",\"AU\")]\n(cco1 <- conCovOpt(dat1, outcome = \"AU\"))\n\nprint(cco1, digits = 3, row.names = TRUE)\nplot(cco1)\n\n# Exo-groups (configurations with constant values in all factors other than the outcome).\nattr(cco1$A, \"exoGroups\")\n\n# Rep-list (list of values optimally reproducing the outcome).\nattr(cco1$A, \"reprodList\")\n\ndat2 <- d.pacts\n# Maximal number of combinations exceeds maxCombs.\n(cco2 <- conCovOpt(dat2, outcome = \"PACT\")) # Generates a warning\n# Increase maxCombs.\n## No test: \n(cco2_full <- try(conCovOpt(dat2, outcome = \"PACT\", \n maxCombs=1e+08))) # Takes a long time to terminate\n## End(No test)\n# Approximate an exhaustive search.\n(cco2_approx1 <- conCovOpt(dat2, outcome = \"PACT\", approx = TRUE))\nselectMax(cco2_approx1)\n# The search space can also be reduced by means of a case cutoff.\n(cco2_approx2 <- conCovOpt(dat2, outcome = \"PACT\", case.cutoff=2))\nselectMax(cco2_approx2)\n\n\n\n"} {"package":"cnaOpt","topic":"reprodAssign","snippet":"### Name: conCovOpt_utils\n### Title: Build disjunctive normal forms realizing con-cov optima\n### Aliases: reprodAssign DNFbuild\n\n### ** Examples\n\n# CS data, d.educate\ncco1 <- conCovOpt(d.educate)\nbest1 <- selectMax(cco1)\nreprodAssign(best1, outcome = \"E\")\nDNFbuild(best1, outcome = \"E\")\nDNFbuild(best1, outcome = \"E\", reduce = FALSE) # canonical DNF\nDNFbuild(best1, outcome = \"E\", reduce = \"ereduce\") # all redundancy-free DNFs\nDNFbuild(best1, outcome = \"E\", reduce = \"rreduce\") # one redundancy-free DNF\nDNFbuild(best1, outcome = \"E\", reduce = \"none\") # canonical DNF\n\n# Simulated mv data\ndatMV <- data.frame(\n A = c(3,2,1,1,2,3,2,2,2,1,1,2,3,2,2,2,1,2,3,3,3,1,1,1,3,1,2,1,2,3,3,2,2,2,1,2,2,3,2,1,2,1,3,3),\n B = c(1,2,3,2,1,1,2,1,2,2,3,1,1,1,2,3,1,3,3,3,1,1,3,2,2,1,1,3,3,2,3,1,2,1,2,2,1,1,2,2,3,3,3,3),\n C = c(1,3,3,3,1,1,1,2,2,3,3,1,1,2,2,2,3,1,1,2,1,2,2,3,3,1,2,2,2,3,2,1,1,2,2,2,1,1,1,2,2,1,1,2),\n D = c(3,1,2,2,1,1,1,1,1,1,1,2,2,2,2,2,2,3,3,3,1,1,1,1,1,2,2,2,2,2,3,1,1,1,1,1,2,2,2,2,2,3,3,3),\n E = c(3,2,2,3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3)\n)\n\n# Apply conCovOpt and selectMax.\n(cco2 <- conCovOpt(datMV))\n(best2 <- selectMax(cco2))\n\n# Apply DNFbuild to build the redundancy-free DNFs reaching best2.\n(formula1 <- DNFbuild(best2, outcome = \"D=3\"))\n# Both DNFs in formula1 reache the con-cov score stored in best2 for outcome \"D=3\".\ncondTbl(paste0(formula1, \"<-> D=3\"), datMV)\n# Build only one redundancy-free DNF reaching best2.\nDNFbuild(best2, outcome = \"D=3\", reduce = \"rreduce\")\n# Any factor value in datMV can be treated as outcome.\n(formula2 <- DNFbuild(best2, outcome = \"E=3\", reduce = \"rreduce\"))\ncondTbl(paste0(formula2, \"<-> E=3\"), datMV)\n# Any con-cov optimum in cco2 can be targeted via its identifier.\n(formula3 <- DNFbuild(best2, outcome = \"E=3\", id = 508))\ncondTbl(paste0(formula3, \"<-> E=3\"), datMV)\n\n# Simulated fs data\ndatFS <- data.frame(\n A = c(.73, .85, .94, .36, .73, .79, .39, .82, .15, .12, .67, .27, .3), \n B = c(.21, .03, .91, .64, .39, .12, .06, .7, .73, .15, .88, .73, .36), \n C = c(.61, 0, .61, 1, .94, .15, .88, .27, .12, .12, .27, .15, .15), \n D = c(.64, .67, .3, .06, .33, .03, .76, .94, .67, .76, .18, .27, .36), \n E = c(.91, .94, .67, .85, .73, .79, .24, .09, .03, .21, .33, .36, .27)\n)\n\n# Apply conCovOpt and selectMax.\n(cco3 <- conCovOpt(datFS, outcome = \"E\"))\n(best3 <- selectMax(cco3))\n\n# Apply reprodAssign.\nreprodAssign(best3, outcome = \"E\")\n# Select a con-cov optimum in cco3 via its identifier.\nreprodAssign(best3, outcome = \"E\", id = 252)\n\n# DNFbuild does not work for fs data; it generates an error.\ntry(DNFbuild(best3, outcome = \"E\"))\n\n\n"} {"package":"cnaOpt","topic":"ereduce","snippet":"### Name: ereduce\n### Title: Find all minimal disjunctive normal forms (DNF) of an input DNF\n### Aliases: ereduce\n\n### ** Examples\n\n# Logical redundancies.\ncond1 <- \"A*b + a*B + A*C + B*C\"\nereduce(cond1)\nrreduce(cond1) # repeated calls generate different outputs\ncond2 <- \"A*b + a*B + A*B + a*b\"\nereduce(cond2)\nereduce(cond2, simplify2constant = FALSE)\n\n# Redundancy elimination relative to simulated cs data.\ndat1 <- data.frame(\n A = c(0, 0, 0, 0, 1, 1, 0, 1), \n B = c(0, 1, 0, 1, 1, 0, 0, 0), \n C = c(1, 1, 0, 1, 1, 0, 1, 1), \n D = c(0, 0, 0, 0, 0, 1, 1, 1))\ncco1 <- conCovOpt(dat1, \"D\")\nbest1 <- selectMax(cco1)\n(formula1 <- DNFbuild(best1, outcome = \"D\", reduce = FALSE))\n# ereduce\nereduce(formula1, dat1, full = FALSE)\n# rreduce\nrreduce(formula1, dat1, full = FALSE)\n\n# Redundancy elimination relative to simulated mv data.\ndat2 <- data.frame(\n A = c(3,2,1,1,2,3,2,2,2,1,1,2,3,2,2,2,1,2,3,3,3,1,1,1,3,1,2,1,2,3,3,2,2,2,1,2,2,3,2,1,2,1,3,3),\n B = c(1,2,3,2,1,1,2,1,2,2,3,1,1,1,2,3,1,3,3,3,1,1,3,2,2,1,1,3,3,2,3,1,2,1,2,2,1,1,2,2,3,3,3,3),\n C = c(1,3,3,3,1,1,1,2,2,3,3,1,1,2,2,2,3,1,1,2,1,2,2,3,3,1,2,2,2,3,2,1,1,2,2,2,1,1,1,2,2,1,1,2),\n D = c(3,1,2,2,1,1,1,1,1,1,1,2,2,2,2,2,2,3,3,3,1,1,1,1,1,2,2,2,2,2,3,1,1,1,1,1,2,2,2,2,2,3,3,3),\n E = c(3,2,2,3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,2,2,2,2,2,2,2,2,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3)\n)\ncco2 <- conCovOpt(dat2, \"D=3\")\nbest2 <- selectMax(cco2)\n(formula2 <- DNFbuild(best2, outcome = \"D=3\", reduce = FALSE))\n# ereduce\nereduce(formula2, dat2, full = FALSE)\n# rreduce\nrreduce(formula2, dat2, full = FALSE)\n\n# Any Boolean expressions.\ncond <- \"!(A*B*C)*!(a*b*c)\" # or \"A + B*!(D + e) <-> C\" \nx <- selectCases(cond) \n(cond <- cna:::getCond(x)) # returns a DNF equivalent to cond, but with many redundancies\nereduce(cond)\nrreduce(cond)\n\n\n"} {"package":"cnaOpt","topic":"findOutcomes","snippet":"### Name: findOutcomes\n### Title: Identify the factors that can possibly be modeled as outcomes\n### prior to running CNA\n### Aliases: findOutcomes\n\n### ** Examples\n\n# Crisp-set data.\nfindOutcomes(d.educate)\nfindOutcomes(d.educate, con = 0.75, cov = 0.75)\nx <- configTable(d.performance[,1:8], frequency = d.performance$frequency)\nfindOutcomes(x, con = .7, cov = .7) # too computationally demanding\n# Approximate by passing approx = TRUE to conCovOpt().\nfindOutcomes(x, con = .7, cov = .7, approx = TRUE) \n# Approximate by passing a case cutoff to configTable().\nfindOutcomes(x, con = .7, cov = .7, case.cutoff = 10)\n\n# A causal chain.\ntarget1 <- \"(A + B <-> C)*(C + D <-> E)\"\ndat1 <- selectCases(target1)\nfindOutcomes(dat1)\n\n# A causal cycle.\ntarget2 <- \"(A + Y1 <-> B)*(B + Y2 <-> A)*(A + Y3 <-> C)\" \ndat2 <- selectCases(target2, full.ct(target2))\nfindOutcomes(dat2)\n\n# Multi-value data.\nfindOutcomes(d.pban) # no possible outcomes at con = cov = 1 \nfindOutcomes(d.pban, con = 0.8) \nfindOutcomes(d.pban, con = 0.8, cov= 0.8) \n\n# Fuzzy-set data.\nfindOutcomes(d.jobsecurity) # no possible outcomes at con = cov = 1 \nfindOutcomes(d.jobsecurity, con = 0.86) \n\n\n"} {"package":"cnaOpt","topic":"selectMax","snippet":"### Name: selectMax\n### Title: Select the con-cov optima from a \"conCovOpt\" object that\n### maximize a specified optimality criterion\n### Aliases: selectMax multipleMax\n\n### ** Examples\n\ndat1 <- d.autonomy[15:30, c(\"EM\",\"SP\",\"CO\",\"AU\")]\n(cco1 <- conCovOpt(dat1, outcome = \"AU\"))\nselectMax(cco1)\nselectMax(cco1, cond = quote(con > 0.95))\nselectMax(cco1, cond = quote(cov > 0.98))\nselectMax(cco1, crit = quote(min(con, cov)))\nselectMax(cco1, crit = quote(max(con, cov)), cond = quote(cov > 0.9))\n\n# Multiple equally good maxima.\n(cco2 <- conCovOpt(dat1, outcome = \"AU\")) \n(sm2 <- selectMax(cco2, cond = quote(con > 0.93)))\n# Each maximum corresponds to a different rep-assignment, which can be selected\n# using the id argument.\nreprodAssign(sm2, \"AU\", id = 10)\nreprodAssign(sm2, \"AU\", id = 11)\nreprodAssign(sm2, \"AU\", id = 13)\n\n\n"} {"package":"adw","topic":"adw","snippet":"### Name: adw\n### Title: Angular Distance Weighting Interpolation.\n### Aliases: adw\n\n### ** Examples\n\nset.seed(2)\ndd <- data.frame(lon = runif(100, min = 110, max = 117),\n lat = runif(100, min = 31, max = 37),\n value = runif(100, min = -10, max = 10))\nhead(dd)\n\n# example 1\ngrd <- adw(dd, extent = c(110, 117, 31, 37), gridsize = 0.5, cdd = 500)\nhead(grd)\n\n# example 2\nurlmap <- \"https://geo.datav.aliyun.com/areas_v3/bound/410000.json\"\nhmap <- sf::read_sf(urlmap, as_tibble = FALSE) |> sf::st_make_valid() # return a 'sf' object.\ngrd <- adw_sf(dd, extent = hmap, gridsize = 0.5, cdd = 500)\nhead(grd)\n\n# example 3\nurlmap <- \"https://geo.datav.aliyun.com/areas_v3/bound/410000.json\"\nhmap <- terra::vect(urlmap) # return a 'SpatVector' object.\ngrd <- adw(dd, extent = hmap, gridsize = 0.5, cdd = 500)\nhead(grd)\n\n\n"} {"package":"adw","topic":"adw_sf","snippet":"### Name: adw_sf\n### Title: Angular Distance Weighting Interpolation, adw_sf.\n### Aliases: adw_sf\n\n### ** Examples\n\nset.seed(2)\ndd <- data.frame(lon = runif(100, min = 110, max = 117),\n lat = runif(100, min = 31, max = 37),\n value = runif(100, min = -10, max = 10))\nhead(dd)\nurlmap <- \"https://geo.datav.aliyun.com/areas_v3/bound/410000.json\"\nhmap <- sf::read_sf(urlmap, as_tibble = FALSE) |> sf::st_make_valid() # return a 'sf' object.\ngrd <- adw_sf(dd, extent = hmap, gridsize = 0.5, cdd = 500)\nhead(grd)\n\n\n"} {"package":"adw","topic":"adw_terra","snippet":"### Name: adw_terra\n### Title: Angular Distance Weighting Interpolation, adw_terra.\n### Aliases: adw_terra\n\n### ** Examples\n\nset.seed(2)\ndd <- data.frame(lon = runif(100, min = 110, max = 117),\n lat = runif(100, min = 31, max = 37),\n value = runif(100, min = -10, max = 10))\nhead(dd)\n# example\nurlmap <- \"https://geo.datav.aliyun.com/areas_v3/bound/410000.json\"\nhmap <- terra::vect(urlmap) # return a 'SpatVector' object.\ngrd <- adw(dd, extent = hmap, gridsize = 0.5, cdd = 500)\nhead(grd)\n\n\n"} {"package":"adw","topic":"adw_vector","snippet":"### Name: adw_vector\n### Title: Angular Distance Weighting Interpolation, adw_vector.\n### Aliases: adw_vector\n\n### ** Examples\n\nset.seed(2)\ndd <- data.frame(lon = runif(100, min = 110, max = 117),\n lat = runif(100, min = 31, max = 37),\n value = runif(100, min = -10, max = 10))\nhead(dd)\n# example\ngrd <- adw(dd, extent = c(110, 117, 31, 37), gridsize = 0.5, cdd = 500)\nhead(grd)\n\n\n"} {"package":"jetset","topic":"jmap","snippet":"### Name: jmap\n### Title: Retrieve jetset mapped probe sets\n### Aliases: jmap\n### Keywords: misc\n\n### ** Examples\n\n genes <- c('MKI67', 'CHD5', 'ESR1', 'FGF19', 'ERBB2', 'NoSuchGene')\n\n # This generates several informative warnings\n jmap('hgu133a', symbol = genes)\n\n\n\n"} {"package":"jetset","topic":"jscores","snippet":"### Name: jscores\n### Title: Retrieve jetset scores for probe sets\n### Aliases: jscores\n### Keywords: misc\n\n### ** Examples\n\n genes <- c('MKI67', 'CHD5', 'ESR1', 'FGF19', 'ERBB2', 'NoSuchGene')\n\n # This generates several informative warnings\n jscores('hgu133a', symbol = genes)\n\n\n\n"} {"package":"jetset","topic":"scores.hgu95av2","snippet":"### Name: scores\n### Title: Data: Probe set quality scores\n### Aliases: scores.hgu95av2 scores.hgu133a scores.hgu133plus2\n### scores.u133x3p\n### Keywords: datasets\n\n### ** Examples\n\n ## Here is the EntrezID for the ESR1 gene\n id <- \"2099\"\n \n ## Extract the scores for all probe sets detecting ESR1\n scores.hgu95av2[which(scores.hgu95av2$EntrezID == id), ]\n\n ## Compare to the recommended function 'jscores'\n jscores(\"hgu95av2\", eg = \"2099\")\n\n\n\n"} {"package":"wkb","topic":"hex2raw","snippet":"### Name: hex2raw\n### Title: Convert String Hex Representation to Raw Vector\n### Aliases: hex2raw\n\n### ** Examples\n\n# create a character string containing a hexadecimal representation\nhex <- \"0101000000000000000000f03f0000000000000840\"\n\n# convert to raw vector\nwkb <- hex2raw(hex)\n\n\n# create a character vector containing a hexadecimal representation\nhex <- c(\"01\", \"01\", \"00\", \"00\", \"00\", \"00\", \"00\", \"00\", \"00\", \"00\", \"00\",\n \"f0\", \"3f\", \"00\", \"00\", \"00\", \"00\", \"00\", \"00\", \"08\", \"40\")\n\n# convert to raw vector\nwkb <- hex2raw(hex)\n\n\n# create vector of two character strings each containing a hex representation\nhex <- c(\"0101000000000000000000f03f0000000000000840\",\n \"010100000000000000000000400000000000000040\")\n\n# convert to list of two raw vectors\nwkb <- hex2raw(hex)\n\n\n"} {"package":"wkb","topic":"readWKB","snippet":"### Name: readWKB\n### Title: Convert WKB to Spatial Objects\n### Aliases: readWKB\n### Keywords: wkb\n\n### ** Examples\n\n# create a list of WKB geometry representations of type Point\nwkb <- list(\n as.raw(c(0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x40)),\n as.raw(c(0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n 0x00, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40))\n)\n\n# convert to object of class SpatialPoints\nobj <- readWKB(wkb)\n\n\n# create a list of WKB geometry representations of type MultiPoint\nwkb <- list(\n as.raw(c(0x01, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x01,\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x3f,\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x40)),\n as.raw(c(0x01, 0x04, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x01,\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40,\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40)))\n\n# convert to list of objects of class SpatialPoints\nobj <- readWKB(wkb)\n\n\n# create a list of WKB geometry representations of type MultiLineString\nwkb <- list(\n as.raw(c(0x01, 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x02,\n 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08,\n 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00,\n 0x00, 0x00, 0x00, 0x00, 0x00, 0x40)),\n as.raw(c(0x01, 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x02,\n 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\n 0x00, 0x00, 0xf0, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0,\n 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00,\n 0x00, 0x00, 0x00, 0x00, 0xf8, 0x3f)))\n\n# convert to object of class SpatialLines\nobj <- readWKB(wkb)\n\n\n# create a list of WKB geometry representations of type Polygon\nwkb <- list(\n as.raw(c(0x01, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x05, 0x00,\n 0x00, 0x00, 0x34, 0x03, 0xf0, 0xac, 0xce, 0x66, 0x5d, 0xc0, 0x8f,\n 0x27, 0x95, 0x21, 0xab, 0xa6, 0x44, 0x40, 0xa0, 0x32, 0x81, 0x18,\n 0x78, 0x83, 0x5d, 0xc0, 0xc8, 0xd2, 0xa0, 0xee, 0x23, 0x0b, 0x41,\n 0x40, 0x80, 0xec, 0x72, 0x54, 0xde, 0xb1, 0x5f, 0xc0, 0xc8, 0xd2,\n 0xa0, 0xee, 0x23, 0x0b, 0x41, 0x40, 0xec, 0x1b, 0x04, 0xc0, 0x87,\n 0xce, 0x5f, 0xc0, 0x8f, 0x27, 0x95, 0x21, 0xab, 0xa6, 0x44, 0x40,\n 0x34, 0x03, 0xf0, 0xac, 0xce, 0x66, 0x5d, 0xc0, 0x8f, 0x27, 0x95,\n 0x21, 0xab, 0xa6, 0x44, 0x40)),\n as.raw(c(0x01, 0x03, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x05, 0x00,\n 0x00, 0x00, 0x08, 0x36, 0xdc, 0x8b, 0x9f, 0x3d, 0x51, 0xc0, 0x0f,\n 0xb3, 0x2a, 0x6a, 0x3f, 0x1c, 0x46, 0x40, 0x47, 0xcb, 0x54, 0xe7,\n 0xcb, 0x5e, 0x51, 0xc0, 0x45, 0x81, 0x50, 0x31, 0xfa, 0x80, 0x42,\n 0x40, 0xa9, 0xba, 0x74, 0x6d, 0xf5, 0xa1, 0x53, 0xc0, 0x45, 0x81,\n 0x50, 0x31, 0xfa, 0x80, 0x42, 0x40, 0xe8, 0x4f, 0xed, 0xc8, 0x21,\n 0xc3, 0x53, 0xc0, 0x0f, 0xb3, 0x2a, 0x6a, 0x3f, 0x1c, 0x46, 0x40,\n 0x08, 0x36, 0xdc, 0x8b, 0x9f, 0x3d, 0x51, 0xc0, 0x0f, 0xb3, 0x2a,\n 0x6a, 0x3f, 0x1c, 0x46, 0x40)))\n\n# convert to object of class SpatialPolygons\nobj <- readWKB(wkb)\n\n\n# specify id and proj4string\nobj <- readWKB(\n wkb,\n id = c(\"San Francisco\", \"New York\"),\n proj4string = sp::CRS(\"+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs\")\n)\n\n\n"} {"package":"wkb","topic":"writeWKB","snippet":"### Name: writeWKB\n### Title: Convert Spatial Objects to WKB\n### Aliases: writeWKB\n### Keywords: wkb\n\n### ** Examples\n\n# load package sp\nlibrary(sp)\n\n# create an object of class SpatialPoints\nx = c(1, 2)\ny = c(3, 2)\nobj <- SpatialPoints(data.frame(x, y))\n\n# convert to WKB Point\nwkb <- writeWKB(obj)\n\n\n# create a list of objects of class SpatialPoints\nx1 = c(1, 2, 3, 4, 5)\ny1 = c(3, 2, 5, 1, 4)\nx2 <- c(9, 10, 11, 12, 13)\ny2 <- c(-1, -2, -3, -4, -5)\nSp1 <- SpatialPoints(data.frame(x1, y1))\nSp2 <- SpatialPoints(data.frame(x2, y2))\nobj <- list(\"a\"=Sp1, \"b\"=Sp2)\n\n# convert to WKB MultiPoint\nwkb <- writeWKB(obj)\n\n\n# create an object of class SpatialLines\nl1 <- data.frame(x = c(1, 2, 3), y = c(3, 2, 2))\nl1a <- data.frame(x = l1[, 1] + .05, y = l1[, 2] + .05)\nl2 <- data.frame(x = c(1, 2, 3), y = c(1, 1.5, 1))\nSl1 <- Line(l1)\nSl1a <- Line(l1a)\nSl2 <- Line(l2)\nS1 <- Lines(list(Sl1, Sl1a), ID = \"a\")\nS2 <- Lines(list(Sl2), ID = \"b\")\nobj <- SpatialLines(list(S1, S2))\n\n# convert to WKB MultiLineString\nwkb <- writeWKB(obj)\n\n\n# create an object of class SpatialPolygons\ntriangle <- Polygons(\n list(\n Polygon(data.frame(x = c(2, 2.5, 3, 2), y = c(2, 3, 2, 2)))\n ), \"triangle\")\nrectangles <- Polygons(\n list(\n Polygon(data.frame(x = c(0, 0, 1, 1, 0), y = c(0, 1, 1, 0, 0))),\n Polygon(data.frame(x = c(0, 0, 2, 2, 0), y = c(-2, -1, -1, -2, -2)))\n ), \"rectangles\")\nobj <- SpatialPolygons(list(triangle, rectangles))\n\n# convert to WKB MultiPolygon\nwkb <- writeWKB(obj)\n\n\n# use the WKB as a column in a data frame\nds <- data.frame(ID = c(\"a\",\"b\"), Geometry = wkb)\n\n# calculate envelope columns and cbind to the data frame\ncoords <- writeEnvelope(obj)\nds <- cbind(ds, coords)\n\n\n"} {"package":"LSX","topic":"as.textmodel_lss","snippet":"### Name: as.textmodel_lss\n### Title: Create a dummy textmodel_lss object from external objects\n### Aliases: as.textmodel_lss\n### Keywords: internal\n\n### ** Examples\n\nv <- c(\"a\" = 0.1, \"z\" = -0.2, \"d\" = 0.3, \"h\" = -0.05)\nlss <- as.textmodel_lss(v)\n\n\n\n"} {"package":"LSX","topic":"data_dictionary_ideology","snippet":"### Name: data_dictionary_ideology\n### Title: Seed words for analysis of left-right political ideology\n### Aliases: data_dictionary_ideology\n\n### ** Examples\n\nas.seedwords(data_dictionary_ideology)\n\n\n"} {"package":"LSX","topic":"data_dictionary_sentiment","snippet":"### Name: data_dictionary_sentiment\n### Title: Seed words for analysis of positive-negative sentiment\n### Aliases: data_dictionary_sentiment\n\n### ** Examples\n\nas.seedwords(data_dictionary_sentiment)\n\n\n"} {"package":"LSX","topic":"seedwords","snippet":"### Name: seedwords\n### Title: Seed words for Latent Semantic Analysis\n### Aliases: seedwords\n\n### ** Examples\n\nseedwords('sentiment')\n\n\n"}