content
stringlengths 0
14.9M
| filename
stringlengths 44
136
|
---|---|
# Start of environment setup code
# The level of detail in the information messages
ve <- 2
# The name of the folder that will contain all the files. It will be created in
# the current directory. NULL implies tempdir will be used
fn <- NULL
# The required files. They are default files that are part of the package
rf <- c("def-model.RDS", "validate-clean.txt")
# An object of class EnvManager is created
em <- EnvManager$new(ve = ve, rp = "./")
# The required files are downloaded
ed <- em$setup_env(rf, fn)
# End of environment setup code
# The model file name
mfn <- paste0(ed, "/def-model.RDS")
# The validation file name
vfn <- paste0(ed, "/validate-clean.txt")
# ModelEvaluator class object is created
me <- ModelEvaluator$new(mf = mfn, ve = ve)
# The intrinsic evaluation is performed
stats <- me$extrinsic_evaluation(lc = 100, fn = vfn)
# The evaluation stats are printed
print(stats)
# The test environment is removed. Comment the below line, so the files
# generated by the function can be viewed
em$td_env()
|
/scratch/gouwar.j/cran-all/cranData/wordpredictor/inst/examples/extrinsic-evaluation.R
|
# Start of environment setup code
# The level of detail in the information messages
ve <- 2
# The name of the folder that will contain all the files. It will be created in
# the current directory. NULL implies tempdir will be used.
fn <- NULL
# The required files. They are default files that are part of the package
rf <- c("input.txt")
# An object of class EnvManager is created
em <- EnvManager$new(ve = ve)
# The required files are downloaded
ed <- em$setup_env(rf, fn)
# End of environment setup code
# The files to clean
fns <- c("train", "test", "validate")
# An object of class DataSampler is created
ds <- DataSampler$new(dir = ed, ve = ve)
# The train, test and validation files are generated
ds$generate_data(
fn = "input.txt",
percs = list(
"train" = 0.8,
"test" = 0.1,
"validate" = 0.1
)
)
# The test environment is removed. Comment the below line, so the files
# generated by the function can be viewed
em$td_env()
|
/scratch/gouwar.j/cran-all/cranData/wordpredictor/inst/examples/generate-data.R
|
# Start of environment setup code
# The level of detail in the information messages
ve <- 2
# The name of the folder that will contain all the files. It will be created in
# the current directory. NULL implies tempdir will be used
fn <- NULL
# The required files. They are default files that are part of the package
rf <- c("input.txt")
# An object of class EnvManager is created
em <- EnvManager$new(ve = ve, rp = "./")
# The required files are downloaded
ed <- em$setup_env(rf, fn)
# End of environment setup code
# ModelGenerator class object is created
mg <- ModelGenerator$new(
name = "default-model",
desc = "1 MB size and default options",
fn = "def-model.RDS",
df = "input.txt",
n = 4,
ssize = 0.99,
dir = ed,
dc_opts = list(),
tg_opts = list(),
ve = ve
)
# The n-gram model is generated
mg$generate_model()
# The test environment is removed. Comment the below line, so the files
# generated by the function can be viewed
em$td_env()
|
/scratch/gouwar.j/cran-all/cranData/wordpredictor/inst/examples/generate-model.R
|
# Start of environment setup code
# The level of detail in the information messages
ve <- 2
# The name of the folder that will contain all the files. It will be created in
# the current directory. NULL implies tempdir will be used
fn <- NULL
# The required files. They are default files that are part of the package
rf <- c("input.txt")
# An object of class EnvManager is created
em <- EnvManager$new(ve = ve, rp = "./")
# The required files are downloaded
ed <- em$setup_env(rf, fn)
# End of environment setup code
# The sample file name
sfn <- paste0(ed, "/sample.txt")
# An object of class DataSampler is created
ds <- DataSampler$new(dir = ed, ve = ve)
# The sample file is generated
ds$generate_sample(
fn = "input.txt",
ss = 0.5,
ic = FALSE,
ir = FALSE,
ofn = "sample.txt",
is = TRUE
)
# The test environment is removed. Comment the below line, so the files
# generated by the function can be viewed
em$td_env()
|
/scratch/gouwar.j/cran-all/cranData/wordpredictor/inst/examples/generate-sample.R
|
# Start of environment setup code
# The level of detail in the information messages
ve <- 2
# The name of the folder that will contain all the files. It will be created in
# the current directory. NULL implies tempdir will be used
fn <- NULL
# The required files. They are default files that are part of the package
rf <- c("test-clean.txt")
# An object of class EnvManager is created
em <- EnvManager$new(ve = ve, rp = "./")
# The required files are downloaded
ed <- em$setup_env(rf, fn)
# End of environment setup code
# The n-gram size
n <- 4
# The test file name
tfn <- paste0(ed, "/test-clean.txt")
# The ngram number is set
tg_opts <- list("n" = n, "save_ngrams" = TRUE, "dir" = ed)
# The TokenGenerator object is created
tg <- TokenGenerator$new(tfn, tg_opts, ve = ve)
# The ngram tokens are generated
tg$generate_tokens()
# The test environment is removed. Comment the below line, so the files
# generated by the function can be viewed
em$td_env()
|
/scratch/gouwar.j/cran-all/cranData/wordpredictor/inst/examples/generate-tokens.R
|
# Start of environment setup code
# The level of detail in the information messages
ve <- 2
# The name of the folder that will contain all the files. It will be created in
# the current directory. NULL implies tempdir will be used
fn <- NULL
# The required files. They are default files that are part of the package
rf <- c("n1.RDS", "n2.RDS", "n3.RDS", "n4.RDS")
# An object of class EnvManager is created
em <- EnvManager$new(ve = ve, rp = "./")
# The required files are downloaded
ed <- em$setup_env(rf, fn)
# End of environment setup code
# The list of output files
fns <- c("words", "model-4", "tp2", "tp3", "tp4")
# The TPGenerator object is created
tp <- TPGenerator$new(opts = list(n = 4, dir = ed), ve = ve)
# The combined transition probabilities are generated
tp$generate_tp()
# The test environment is removed. Comment the below line, so the files
# generated by the function can be viewed
em$td_env()
|
/scratch/gouwar.j/cran-all/cranData/wordpredictor/inst/examples/generate-tp.R
|
# Start of environment setup code
# The level of detail in the information messages
ve <- 2
# The name of the folder that will contain all the files. It will be created in
# the current directory. NULL implies tempdir will be used
fn <- NULL
# The required files. They are default files that are part of the package
rf <- c("test.txt")
# An object of class EnvManager is created
em <- EnvManager$new(ve = ve, rp = "./")
# The required files are downloaded
ed <- em$setup_env(rf, fn)
# End of environment setup code
# The test file name
cfn <- paste0(ed, "/test.txt")
# The DataAnalyzer object is created
da <- DataAnalyzer$new(ve = ve)
# The file info is fetched
fi <- da$get_file_info(cfn)
# The file information is printed
print(fi)
# The test environment is removed. Comment the below line, so the files
# generated by the function can be viewed
em$td_env()
|
/scratch/gouwar.j/cran-all/cranData/wordpredictor/inst/examples/get-file-info.R
|
# Start of environment setup code
# The level of detail in the information messages
ve <- 2
# The name of the folder that will contain all the files. It will be created in
# the current directory. NULL implies tempdir will be used
fn <- NULL
# The required files. They are default files that are part of the package
rf <- c("n2.RDS")
# An object of class EnvManager is created
em <- EnvManager$new(ve = ve, rp = "./")
# The required files are downloaded
ed <- em$setup_env(rf, fn)
# End of environment setup code
# The n-gram file name
nfn <- paste0(ed, "/n2.RDS")
# The DataAnalyzer object is created
da <- DataAnalyzer$new(nfn, ve = ve)
# Bi-grams starting with "and_" are returned
df <- da$get_ngrams(fn = nfn, c = 10, pre = "^and_*")
# The data frame is sorted by frequency
df <- df[order(df$freq, decreasing = TRUE), ]
# The data frame is printed
print(df)
# The test environment is removed. Comment the below line, so the files
# generated by the function can be viewed
em$td_env()
|
/scratch/gouwar.j/cran-all/cranData/wordpredictor/inst/examples/get-n-grams.R
|
# Start of environment setup code
# The level of detail in the information messages
ve <- 2
# The name of the folder that will contain all the files. It will be created in
# the current directory. NULL implies tempdir will be used
fn <- NULL
# The required files. They are default files that are part of the package
rf <- c("def-model.RDS")
# An object of class EnvManager is created
em <- EnvManager$new(ve = ve)
# The required files are downloaded
ed <- em$setup_env(rf, fn)
# End of environment setup code
# The model file name
mfn <- paste0(ed, "/def-model.RDS")
# ModelPredictor class object is created
mp <- ModelPredictor$new(mf = mfn, ve = ve)
# The probability that the next word is "you" given the prev words "how" and
# "are"
prob <- mp$get_word_prob(word = "you", pw = c("how", "are"))
# The probability is printed
print(prob)
# The test environment is removed. Comment the below line, so the files
# generated by the function can be viewed
em$td_env()
|
/scratch/gouwar.j/cran-all/cranData/wordpredictor/inst/examples/get-word-prob.R
|
# Start of environment setup code
# The level of detail in the information messages
ve <- 2
# The name of the folder that will contain all the files. It will be created in
# the current directory. NULL implies tempdir will be used
fn <- NULL
# The required files. They are default files that are part of the package
rf <- c("def-model.RDS", "validate-clean.txt")
# An object of class EnvManager is created
em <- EnvManager$new(ve = ve, rp = "./")
# The required files are downloaded
ed <- em$setup_env(rf, fn)
# End of environment setup code
# The model file name
mfn <- paste0(ed, "/def-model.RDS")
# The validation file name
vfn <- paste0(ed, "/validate-clean.txt")
# ModelEvaluator class object is created
me <- ModelEvaluator$new(mf = mfn, ve = ve)
# The intrinsic evaluation is performed
stats <- me$intrinsic_evaluation(lc = 20, fn = vfn)
# The evaluation stats are printed
print(stats)
# The test environment is removed. Comment the below line, so the files
# generated by the function can be viewed
em$td_env()
|
/scratch/gouwar.j/cran-all/cranData/wordpredictor/inst/examples/intrinsic-evaluation.R
|
# Start of environment setup code
# The level of detail in the information messages
ve <- 2
# The name of the folder that will contain all the files. It will be created in
# the current directory. NULL value implies tempdir will be used.
fn <- NULL
# The required files. They are default files that are part of the package
rf <- c("n2.RDS")
# An object of class EnvManager is created
em <- EnvManager$new(ve = ve, rp = "./")
# The required files are downloaded
ed <- em$setup_env(rf, fn)
# End of environment setup code
# The n-gram file name
nfn <- paste0(ed, "/n2.RDS")
# The DataAnalyzer object is created
da <- DataAnalyzer$new(nfn, ve = ve)
# The top features plot is checked
df <- da$plot_n_gram_stats(opts = list(
"type" = "top_features",
"n" = 10,
"save_to" = NULL,
"dir" = ed
))
# N-gram statistics are displayed
print(df)
# The test environment is removed. Comment the below line, so the files
# generated by the function can be viewed
em$td_env()
|
/scratch/gouwar.j/cran-all/cranData/wordpredictor/inst/examples/plot-n-gram-stats.R
|
# Start of environment setup code
# The level of detail in the information messages
ve <- 2
# The name of the folder that will contain all the files. It will be created in
# the current directory. NULL implies tempdir will be used
fn <- NULL
# The required files. They are default files that are part of the package
rf <- c("def-model.RDS")
# An object of class EnvManager is created
em <- EnvManager$new(ve = ve, "rp" = "./")
# The required files are downloaded
ed <- em$setup_env(rf, fn)
# End of environment setup code
# The model file name
mfn <- paste0(ed, "/def-model.RDS")
# ModelPredictor class object is created
mp <- ModelPredictor$new(mf = mfn, ve = ve)
# The next word is predicted
nws <- mp$predict_word("today is", count = 10)
# The predicted next words are printed
print(nws)
# The test environment is removed. Comment the below line, so the files
# generated by the function can be viewed
em$td_env()
|
/scratch/gouwar.j/cran-all/cranData/wordpredictor/inst/examples/predict-word.R
|
---
title: "Features"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Features}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r echo=FALSE, results='hide'}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.path = "reference/figures/"
)
```
```{r setup, echo=FALSE, results='hide', message=FALSE}
library(wordpredictor)
# The level of verbosity in the information messages
ve <- 0
#' @description
#' Used to setup the test environment
#' @param rf The required files.
#' @param ve The verbosity level.
#' @return The list of directories in the test environment
setup_env <- function(rf, ve) {
# An object of class EnvManager is created
em <- EnvManager$new(rp = "../", ve = ve)
# The required files are downloaded
ed <- em$setup_env(rf)
return(ed)
}
#' @description
#' Used to clean up the test environment
clean_up <- function(ve) {
# An object of class EnvManager is created
em <- EnvManager$new(ve = ve)
# The test environment is removed
em$td_env(T)
}
```
## Introduction
This document describes all the features provided by the **wordpredictor** package. It first describes how to generate n-gram models. Next it describes how to evaluate the performance of the n-gram models. Finally it describes how to make word predictions using the n-gram model.
## Model Generation
The **wordpredictor** package provides several classes that can be used to generate n-gram models. These classes may be used to generate n-gram models step by step. An alternative is to use the **ModelGenerator** class which combines all the steps and provides a single method for generating n-gram models.
The following steps are involved in generating n-gram models:
### Data Exploration
The first step in generating a n-gram model is data exploration. This involves determining the type of textual content and various text related statistics. The type of text may be news content, blog posts, Twitter feeds, product reviews, customer chat history etc. Example of text related statistics are line count, word count, average line length and input file size.
It is also important to determine the unwanted words and symbols in the data such as vulgar words, punctuation symbols, non-alphabetical symbols etc. The **wordpredictor** package provides the **DataAnalyzer** class which can be used to find out statistics about the input data. The following example shows how to get statistics on all text files within a folder:
```{r data-exploration, cache=FALSE}
# The required files
rf <- c(
"test.txt",
"validate.txt",
"validate-clean.txt",
"test-clean.txt"
)
# The test environment is setup
ed <- setup_env(rf, ve)
# The DataAnalyzer object is created
da <- DataAnalyzer$new(ve = ve)
# Information on all text files in the ed folder is returned
fi <- da$get_file_info(ed)
# The file information is printed
print(fi)
# The test environment is cleaned up
clean_up(ve)
```
The word count of a text file can be fetched using the command: `cat file-name | wc -w`. This command should work on all Unix based systems.
### Data Sampling
The next step is to generate training, testing and validation samples from the input text file. If there are many input text files, then they can be combined to a single file using the command: `cat file-1 file-2 file3 > output-file`. The contents of the combined text file may need to be randomized.
The **wordpredictor** package provides the **DataSampler** class which can be used to generate a random sample containing given number of lines. The following example shows how to generate a random sample of size 10 Mb from an input text file:
```{r data-sampling-1, cache=FALSE}
# The required files
rf <- c("input.txt")
# The test environment is setup
ed <- setup_env(rf, ve)
# The sample size as a proportion of the input.txt file
ssize <- 0.1
# The data file path
dfp <- paste0(ed, "/input.txt")
# The object size is formatted
obj_size <- file.size(dfp)/10^6
# The proportion of data to sample
prop <- (ssize/obj_size)
# An object of class DataSampler is created
ds <- DataSampler$new(dir = ed, ve = ve)
# The sample file is generated.
# The randomized sample is saved to the file train.txt in the ed folder
ds$generate_sample(
fn = "input.txt",
ss = prop,
ic = F,
ir = T,
ofn = "train.txt",
is = T
)
# The test environment is cleaned up
clean_up(ve)
```
Usually we need a train data set for generating the n-gram model. A test data set for testing the model and a validation data set for evaluating the performance of the model. The following example shows how to generate the train, test and validation files. The train file contains the first 80% of the lines, the test set contains the next 10% of the lines. The remaining lines are in the validation set.
The data in the validation file must be different from the data in the train file. Otherwise it can result in over-fitting of the model. When a model is over-fitted, the model evaluation results will be exaggerated, overly optimistic and unreliable. So care should be taken to ensure that the data in the validation and train files is different.
```{r data-sampling-2, cache=FALSE}
# The required files
rf <- c("input.txt")
# The test environment is setup
ed <- setup_env(rf, ve)
# An object of class DataSampler is created
ds <- DataSampler$new(dir = ed, ve = ve)
# The train, test and validation files are generated
ds$generate_data(
fn = "input.txt",
percs = list(
"train" = 0.8,
"test" = 0.1,
"validate" = 0.1
)
)
# The test environment is cleaned up
clean_up(ve)
```
In the above example, **dir** parameter is the directory containing the **input.txt** file and the generated test, validation and train data files.
### Data Cleaning
The next step is to remove unwanted symbols and words from the input text file. This reduces the memory requirement of the n-gram model and makes it more efficient. Example of unwanted words are vulgar words, words that are not part of the vocabulary, punctuation, numbers, non-printable characters and extra spaces.
The **wordpredictor** package provides the **DataCleaner** class which can be used to remove unwanted words and symbols from text files. The following example shows how to clean a given text file:
```{r data-cleaning, cache=FALSE}
# The required files
rf <- c("input.txt")
# The test environment is setup
ed <- setup_env(rf, ve)
# The data file path
fn <- paste0(ed, "/input.txt")
# The clean file path
cfn <- paste0(ed, "/input-clean.txt")
# The data cleaning options
dc_opts = list(
"min_words" = 2,
"to_lower" = T,
"remove_stop" = F,
"remove_punct" = T,
"remove_non_dict" = T,
"remove_non_alpha" = T,
"remove_extra_space" = T,
"remove_bad" = F,
"output_file" = cfn
)
# The data cleaner object is created
dc <- DataCleaner$new(fn, dc_opts, ve = ve)
# The sample file is cleaned and saved as input-clean.txt in the ed dir
dc$clean_file()
# The test environment is cleaned up
clean_up(ve)
```
The **clean_file** method reads a certain number of lines at a time, cleans the lines of text and saves them to an output text file. It can be used for cleaning large text files.
### Tokenization
The next step is to generate n-gram tokens from the cleaned text file. The **TokenGenerator** class allows generating n-gram tokens of given size from a given input text file. The following example shows how to generate n-grams tokens of size 1,2,3 and 4:
```{r tokenization-1, cache=FALSE}
# The required files
rf <- c("test-clean.txt")
# The test environment is setup
ed <- setup_env(rf, ve)
# The test file path
fn <- paste0(ed, "/test-clean.txt")
# The n-grams are generated
for (n in 1:4) {
# The ngram number is set
tg_opts = list("n" = n, "save_ngrams" = T, dir = ed)
# The TokenGenerator object is created
tg <- TokenGenerator$new(fn, tg_opts, ve = ve)
# The ngram tokens are generated
tg$generate_tokens()
}
# The test environment is cleaned up
clean_up(ve)
```
The above code generates the files **n1.RDS, n2.RDS, n3.RDS and n4.RDS** in the data directory. These files contains n-gram tokens along with their frequencies. N-grams of larger size provide more context. Usually n-grams of size 4 are generated.
Two important customization options supported by the **TokenGenerator** class are **min_freq** and **stem_words**. **min_freq** sets minimum frequency for n-gram tokens. All n-gram tokens with frequency less than **min_freq** are excluded.
The **stem_words** option is used to transform n-gram prefix components to their stems. The next word is not transformed.
The n-gram token frequencies may be analyzed using the **DataAnalyzer** class. The following example displays the top most occurring 2-gram tokens:
```{r tokenization-2, cache=FALSE, out.width="70%", out.height="70%"}
# The required files
rf <- c("n2.RDS")
# The test environment is setup
ed <- setup_env(rf, ve)
# The ngram file name
fn <- paste0(ed, "/n2.RDS")
# The DataAnalyzer object is created
da <- DataAnalyzer$new(fn, ve = ve)
# The top features plot is checked
df <- da$plot_n_gram_stats(opts = list(
"type" = "top_features",
"n" = 10,
"save_to" = "png",
"dir" = "./reference/figures"
))
# The output file path
fn <- paste0("./reference/figures/top_features.png")
knitr::include_graphics(fn)
# The test environment is cleaned up
clean_up(ve)
```
The following example shows the distribution of word frequencies:
```{r tokenization-3, cache=FALSE, out.width="70%", out.height="70%"}
# The required files
rf <- c("n2.RDS")
# The test environment is setup
ed <- setup_env(rf, ve)
# The ngram file name
fn <- paste0(ed, "/n2.RDS")
# The DataAnalyzer object is created
da <- DataAnalyzer$new(fn, ve = ve)
# The top features plot is checked
df <- da$plot_n_gram_stats(opts = list(
"type" = "coverage",
"n" = 10,
"save_to" = "png",
"dir" = "./reference/figures"
))
# The output file path
fn <- paste0("./reference/figures/coverage.png")
knitr::include_graphics(fn)
# The test environment is cleaned up
clean_up(ve)
```
The following example returns top 10 2-gram tokens that start with **and_**:
```{r tokenization-4, cache=FALSE}
# The required files
rf <- c("n2.RDS")
# The test environment is setup
ed <- setup_env(rf, ve)
# The ngram file name
fn <- paste0(ed, "/n2.RDS")
# The DataAnalyzer object is created
da <- DataAnalyzer$new(ve = ve)
# Bi-grams starting with "and_" are returned
df <- da$get_ngrams(fn = fn, c = 10, pre = "^and_*")
# The data frame is sorted by frequency
df <- df[order(df$freq, decreasing = T),]
# The first 10 rows of the data frame are printed
knitr::kable(df[1:10,], col.names = c("Prefix", "Frequency"))
# The test environment is cleaned up
clean_up(ve)
```
### Transition Probabilities
The next step in generating the n-gram model is to generate transition probabilities (tp) from the n-gram files. The **TPGenerator** class is used to generate the tps. For each n-gram token file a corresponding tp file is generated.
The tp files are then combined into a single file containing tp data for n-grams of size 1, 2, 3, 4 etc.
The following example shows how to generate combined tps for n-grams of size 1, 2, 3 and 4:
```{r transition-probabilities, cache=FALSE}
# The required files
rf <- c("n1.RDS", "n2.RDS", "n3.RDS", "n4.RDS")
# The test environment is setup
ed <- setup_env(rf, ve)
# The TPGenerator object is created
tp <- TPGenerator$new(opts = list(n = 4, dir = ed), ve = ve)
# The combined transition probabilities are generated
tp$generate_tp()
# The test environment is cleaned up
clean_up(ve)
```
The above code produces the file **model-4.RDS**.
### The model file
The final step is to generate a n-gram model file from the files generated in the previous steps. The **Model** class contains the method **load_model**, which reads the combined tps files and other files that are used by the model. An instance of the **Model** class represents the n-gram model.
### Generating the model in one step
All the previous steps may be combined into a single step. The **ModelGenerator** class allows generating the final n-gram model using a single method call. The following example generates a n-gram model using default data cleaning and tokenization options:
```{r generate-model, results='hide', cache=FALSE}
# The required files
rf <- c("input.txt")
# The test environment is setup
ed <- setup_env(rf, ve)
# The following code generates n-gram model using default options for data
# cleaning and tokenization. See the following section on how to customize these
# options. Note that input.txt is the name of the input data file. It should be
# present in the data directory. dir is the directory containing the input and output files. It is set to the path of the environment directory, ed.
# ModelGenerator class object is created
mg <- ModelGenerator$new(
name = "def-model",
desc = "N-gram model generating using default options",
fn = "def-model.RDS",
df = "input.txt",
n = 4,
ssize = 0.1,
dir = ed,
dc_opts = list(),
tg_opts = list(),
ve = ve
)
# Generates n-gram model. The output is the file def-model.RDS
mg$generate_model()
# The test environment is cleaned up
clean_up(ve)
```
## Evaluating the model performance
The **wordpredictor** package provides the **ModelEvaluator** class for evaluating the performance of the generated n-gram model. Intrinsic and Extrinsic evaluation are supported. Also the performance of several n-gram models may be compared.
The following example performs Intrinsic evaluation. It measures the Perplexity score for each sentence in the **validation.txt** file, that was generated in the data sampling step. It returns the minimum, mean and maximum Perplexity score for each line.
```{r model-evaluation-1, cache=FALSE}
# The required files
rf <- c("def-model.RDS", "validate-clean.txt")
# The test environment is setup
ed <- setup_env(rf, ve)
# The model file name
mfn <- paste0(ed, "/def-model.RDS")
# The path to the cleaned validation file
vfn <- paste0(ed, "/validate-clean.txt")
# ModelEvaluator class object is created
me <- ModelEvaluator$new(mf = mfn, ve = ve)
# The intrinsic evaluation is performed on first 20 lines
stats <- me$intrinsic_evaluation(lc = 20, fn = vfn)
# The test environment is cleaned up
clean_up(ve)
```
The following example performs Extrinsic evaluation. It measures the accuracy score for each sentence in **validation.txt** file. For each sentence the model is used to predict the last word in the sentence given the previous words. If the last word was correctly predicted, then the prediction is considered to be accurate.
```{r model-evaluation-2, cache=FALSE}
# The required files
rf <- c("def-model.RDS", "validate-clean.txt")
# The test environment is setup
ed <- setup_env(rf, ve)
# The model file name
mfn <- paste0(ed, "/def-model.RDS")
# The path to the cleaned validation file
vfn <- paste0(ed, "/validate-clean.txt")
# ModelEvaluator class object is created
me <- ModelEvaluator$new(mf = mfn, ve = ve)
# The intrinsic evaluation is performed on first 100 lines
stats <- me$extrinsic_evaluation(lc = 100, fn = vfn)
# The test environment is cleaned up
clean_up(ve)
```
## Making word predictions
The n-gram model generated in the previous step can be used to predict the next word given a set of words. The following example shows how to predict the next word. It returns the 3 possible next words along with their probabilities.
```{r predict-word, cache=FALSE}
# The required files
rf <- c("def-model.RDS")
# The test environment is setup
ed <- setup_env(rf, ve)
# The model file name
mfn <- paste0(ed, "/def-model.RDS")
# An object of class ModelPredictor is created. The mf parameter is the name of
# the model file that was generated in the previous example.
mp <- ModelPredictor$new(mf = mfn, ve = ve)
# Given the words: "how are", the next word is predicted. The top 3 most likely
# next words are returned along with their respective probabilities.
res <- mp$predict_word(words = "how are", 3)
# The test environment is cleaned up
clean_up(ve)
```
|
/scratch/gouwar.j/cran-all/cranData/wordpredictor/vignettes/features.Rmd
|
---
title: "Overview"
output: rmarkdown::html_vignette
vignette: >
%\VignetteIndexEntry{Overview}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
bibliography: references.bib
nocite: '@*'
---
```{r echo=FALSE, results='hide'}
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
fig.path = "reference/figures/"
)
```
```{r setup, echo=FALSE, results='hide', message=FALSE}
library(wordpredictor)
# The level of verbosity in the information messages
ve <- 0
#' @description
#' Used to setup the test environment
#' @param rf The required files.
#' @param ve The verbosity level.
#' @return The list of directories in the test environment
setup_env <- function(rf, ve) {
# An object of class EnvManager is created
em <- EnvManager$new(rp = "../", ve = ve)
# The required files are downloaded
ed <- em$setup_env(rf)
return(ed)
}
#' @description
#' Used to clean up the test environment
clean_up <- function(ve) {
# An object of class EnvManager is created
em <- EnvManager$new(ve = ve)
# The test environment is removed
em$td_env(T)
}
```
## Introduction
This document describes the theory behind the n-gram models generated by the **wordpredictor** package. It also provides code examples that describe how to use the package.
The goal of the **wordpredictor** package is to provide a flexible and easy to use framework for generating [n-gram models](https://en.wikipedia.org/wiki/N-gram) for word prediction.
The package allows generating n-gram models. It also allows exploring n-gram frequencies using plots. Additionally it provides methods for measuring n-gram model performance using [Perplexity](https://en.wikipedia.org/wiki/Perplexity) and accuracy.
The n-gram model may be customized using several options such as n-gram size, data cleaning options and options for converting text to tokens.
## How the model works
The n-gram model generated by the **wordpredictor** package uses the [Markov model](https://en.wikipedia.org/wiki/Markov_chain) for approximating the language model. It means that the probability of a word depends only on the probability of the n-1 previous words.
Maximum Likelihood Estimation (MLE) is used to calculate the probability of a word. The probability of a word is calculated by regarding the word as the last component of a n-gram. The total number of occurrences of the n-gram is divided by the total number of occurrences of the (n-1)-gram. This gives the probability for the word.
The n-gram model is generated in steps. In the first step, the input data is cleaned. Unwanted symbols and words are removed from the input data.
In the next step, the cleaned data file is read. N-grams are extracted from the file, starting from 1-grams up to the configured n-gram size. The 1-gram, 2-gram, 3-gram etc tokens are saved in separate files along with the frequency. So the 3-gram file contains all extracted 3-grams and their respective frequencies.
The next step is to generate transition probability tables for each n-gram file. For the 1-gram file the transition probability table is simply the list of unique words along with the word frequencies. For the other n-gram files, the transition probability table is a data frame with 3 columns. The hash of n-gram prefixes, the next word id and the next word probability.
The n-gram prefix is the set of n-1 components before the last component. The n-1 components are combined using "_" and converted to a numeric hash value using the digest2Int method of the [digest](https://cran.r-project.org/package=digest) package.
The next word id is the numeric index of the next word in the list of 1-grams. The next word probability is the probability of the next word given the previous n-1 words. It is calculated using Maximum Likelihood Estimation (MLE) as described above.
Instead of storing the n-gram prefix strings, a single number is saved. Also instead of storing the next word, the numeric index of the next word is saved. This saves a lot of memory and allows more data to be stored, which improves the n-gram model's efficiency.
In R, a number requires a fixed amount of storage, which about 56 bytes. In contrast the memory required to store a string increases with the number of characters in the string.
The data frames that represent each transition probability table are combined into a single data frame. The combined transition probability table is used to make word predictions.
## Using the model to predict words
To predict a word, the word along with the n-1 previous words are used as input. The model computes the hash of the previous words and looks up the hash in the combined transition probabilities table. If the hash was found, then the model extracts the top 3 next word ids that have the highest probabilities.
The model looks up the next word text that corresponds to the next word ids. The result is the top 3 most likely next words along with their probabilities.
If the hash was not found, then the hash of the n-2 previous words is calculated and looked up in the combined transition probabilities table.
This process is repeated until there are no previous words. When this happens, the model returns a "word not found" message.
This method of checking the transition probabilities of lower level n-grams is called **back-off**. An alternate method of predicting a word is to use **interpolation**. This involves weighing and summing the probabilities for each n-gram size.
## Predicting the model performance
The **wordpredictor** package provides methods for performing **intrinsic** and **extrinsic** evaluation of the n-gram model.
The **wordpredictor** package performs intrinsic evaluation by calculating the mean Perplexity score for all sentences in a validation text file. The Perplexity for a sentence is calculated by taking the N-th root of the inverse of the product of probabilities of all words in a sentence. N is the number of words in the sentence.
The probability of a word is calculated by considering all n-1 words before that word. If the word was not found in the transition probabilities table, then the n-2 words are looked up. This process is repeated until there are no previous words.
If the word was found in the 1-gram list, then the probability of the word is calculated by simply dividing the number of times the word occurs by the total number words.
If the word was not found in the 1-gram list, then the model uses a default probability as the probability of the word. The default probability is calculated using [Laplace Smoothing](https://towardsdatascience.com/n-gram-language-models-af6085435eeb#:~:text=Laplace%20Smoothing,algorithm%20is%20called%20Laplace%20smoothing.).
Laplace Smoothing involves adding 1 to the frequency count of each word in the vocabulary. Essentially this means that the total number of words in the data set are increased by vc, where vc is the number of words in the vocabulary.
In Laplace Smoothing 1 is added to the word count. Since an unknown word occurs zero times, after Laplace Smoothing it will have a count of 1. So the default probability is calculated as: **P(unk) = 1/(N+VC)**, where **N** is the total number of words in the data set and **VC** is the number of words in the vocabulary. This default probability is assigned to unknown words. Alternative methods to Laplace Smoothing are **Add-k smoothing**, **Kneser-Ney smoothing** and **Good-Turing Smoothing**.
The **wordpredictor** package uses the file **/usr/share/dict/cracklib-small** as the dictionary file. This file is pre-installed in most Linux distributions.
Extrinsic evaluation involves calculating the accuracy score. The model tries to predict the last word of a sentence. If the actual last word was one of the 3 words predicted by the model, then the prediction is considered to be accurate. The accuracy score is the number of sentences that were correctly predicted.
## Generating the model
The **ModelGenerator** class allows generating the final n-gram model using a single method call. The following example generates a n-gram model using default data cleaning and tokenization options:
```{r generate-model, results='hide', cache=FALSE}
# The required files
rf <- c("input.txt")
# The test environment is setup
ed <- setup_env(rf, ve)
# The following code generates n-gram model using default options for data
# cleaning and tokenization. See the following section on how to customize these
# options. Note that input.txt is the name of the input data file. It should be
# present in the ed directory. The generated model file is also placed in this
# directory.
# ModelGenerator class object is created
mg <- ModelGenerator$new(
name = "def-model",
desc = "N-gram model generating using default options",
fn = "def-model.RDS",
df = "input.txt",
n = 4,
ssize = 0.1,
dir = ed,
dc_opts = list(),
tg_opts = list(),
ve = ve
)
# Generates n-gram model. The output is the file
# ./data/model/def-model.RDS
mg$generate_model()
# The test environment is cleaned up
clean_up(ve)
```
## Evaluating the model performance
The **wordpredictor** package provides the **ModelEvaluator** class for evaluating the performance of the generated n-gram model.
The following example performs intrinsic evaluation. It measures the Perplexity score for each sentence in the **validation.txt** file. It returns the minimum, mean and maximum Perplexity score for each line.
```{r model-evaluation-1, cache=FALSE}
# The required files
rf <- c("def-model.RDS", "validate-clean.txt")
# The test environment is setup
ed <- setup_env(rf, ve)
# The model file name
mfn <- paste0(ed, "/def-model.RDS")
# The path to the cleaned validation file
vfn <- paste0(ed, "/validate-clean.txt")
# ModelEvaluator class object is created
me <- ModelEvaluator$new(mf = mfn, ve = ve)
# The intrinsic evaluation is performed on first 20 lines
stats <- me$intrinsic_evaluation(lc = 20, fn = vfn)
# The test environment is cleaned up
clean_up(ve)
```
The following example performs extrinsic evaluation. It measures the accuracy score for each sentence in **validation.txt** file. For each sentence the model is used to predict the last word in the sentence given the previous words. If the last word was correctly predicted, then the prediction is considered to be accurate. The extrinsic evaluation returns the number of correct and incorrect predictions.
```{r model-evaluation-2, cache=FALSE}
# The required files
rf <- c("def-model.RDS", "validate-clean.txt")
# The test environment is setup
ed <- setup_env(rf, ve)
# The model file name
mfn <- paste0(ed, "/def-model.RDS")
# The path to the cleaned validation file
vfn <- paste0(ed, "/validate-clean.txt")
# ModelEvaluator class object is created
me <- ModelEvaluator$new(mf = mfn, ve = ve)
# The intrinsic evaluation is performed on first 100 lines
stats <- me$extrinsic_evaluation(lc = 100, fn = vfn)
# The test environment is cleaned up
clean_up(ve)
```
## How to predict a word
The following example shows how to predict the next word. It returns the 3 possible next words along with their probabilities.
```{r predict-word, cache=FALSE}
# The required files
rf <- c("def-model.RDS", "validate-clean.txt")
# The test environment is setup
ed <- setup_env(rf, ve)
# The model file name
mfn <- paste0(ed, "/def-model.RDS")
# An object of class ModelPredictor is created. The mf parameter is the name of
# the model file that was generated in the previous example.
mp <- ModelPredictor$new(mf = mfn, ve = ve)
# Given the words: "how are", the next word is predicted. The top 3 most likely
# next words are returned along with their respective probabilities.
res <- mp$predict_word(words = "how are", 3)
# The test environment is cleaned up
clean_up(ve)
```
## Demo
The wordpredictor package includes a demo called "word-predictor". The demo is a Shiny application that displays the ten most likely words for a given set of words. To access the demo, run the following command from the R shell:
**`demo("word-predictor", package = "wordpredictor", ask = F)`**.
## Package dependencies
The wordpredictor package uses the following packages: [digest](https://cran.r-project.org/package=digest), [dply](https://cran.r-project.org/package=dplyr), [ggplot2](https://cran.r-project.org/package=ggplot2),
[R6](https://cran.r-project.org/package=R6),
[testthat](https://cran.r-project.org/package=testthat) and
[stingr](https://cran.r-project.org/package=stringr)
The following packages were useful during package development:
[quanteda](https://cran.r-project.org/package=quanteda),
[tm](https://cran.r-project.org/package=tm) and
[hash](https://cran.r-project.org/package=hash)
[lintr](https://cran.r-project.org/package=lintr)
[styler](https://cran.r-project.org/package=styler)
[pkgdown](https://cran.r-project.org/package=pkgdown)
[pryr](https://cran.r-project.org/package=pryr),
## Useful Links
The following articles and tutorials were very useful:
* [N-Gram Model](https://devopedia.org/n-gram-model)
* [Probability Smoothing for Natural Language Processing](https://lazyprogrammer.me/probability-smoothing-for-natural-language-processing/)
* [Natural Language Processing is Fun!](https://medium.com/@ageitgey/natural-language-processing-is-fun-9a0bff37854e)
* [Quanteda Tutorials](https://tutorials.quanteda.io/)
## Bibliography
|
/scratch/gouwar.j/cran-all/cranData/wordpredictor/vignettes/overview.Rmd
|
#' List of english words
#'
#' A dataset containing a list of english words and their length.
#'
#' @format A data frame with 175393 rows and 2 variables:
#' \describe{
#' \item{word}{an english word}
#' \item{word_length}{number of letters in the word}
#' ...
#' }
#' @source \url{https://www.wordgamedictionary.com/word-lists/}
"words"
|
/scratch/gouwar.j/cran-all/cranData/words/R/data.R
|
pre_tokenize <- function(text, tokenizer, collapse = "\t") {
tokens <- tokenizer(text)
vapply(tokens, paste, collapse = collapse, FUN.VALUE = character(1))
}
composer <- function(x, composition) {
if (composition == "tibble") {
x <- as.data.frame(x)
x <- tibble::rownames_to_column(x, "tokens")
x <- tibble::as_tibble(x)
}
if (composition == "data.frame") {
x <- as.data.frame(x)
}
x
}
|
/scratch/gouwar.j/cran-all/cranData/wordsalad/R/aaa.R
|
#' The text of H.C. Andersen's fairy tales in English
#'
#' A dataset containing 5 of H.C. andersens fairy tales
#' translated to English. The UTF-8 plain text was sourced from
#' http://www.andersenstories.com/.
#'
#' This is not representive of the size needed to generate good word vectors.
#' It is just used for examples.
#'
#' @format A character vector with 5 elements.
"fairy_tales"
|
/scratch/gouwar.j/cran-all/cranData/wordsalad/R/data.R
|
#' Extract word vectors from fasttext word embedding
#'
#' The calculations are done with the fastTextR package.
#'
#' @param text Character string.
#' @param tokenizer Function, function to perform tokenization. Defaults to
#' [text2vec::space_tokenizer].
#' @param dim Integer, number of dimension of the resulting word vectors.
#' @param min_count Integer, number of times a token should appear to be
#' considered in the model. Defaults to 5.
#' @param type Character, the type of algorithm to use, either 'cbow' or
#' 'skip-gram'. Defaults to 'skip-gram'.
#' @param window Integer, skip length between words. Defaults to 5.
#' @param n_iter Integer, number of training iterations. Defaults to 5.
#' \code{numeric = -1} defines early stopping strategy. Stop fitting
#' when one of two following conditions will be satisfied: (a) passed
#' all iterations (b) \code{cost_previous_iter / cost_current_iter - 1 <
#' convergence_tol}. Defaults to -1.
#' @param loss Charcter, choice of loss function must be one of "ns", "hs", or
#' "softmax". See details for more Defaults to "hs".
#' @param negative integer with the number of negative samples. Only used when
#' loss = "ns".
#' @param verbose Logical, controls whether progress is reported as operations
#' are executed.
#' @param threads number of CPU threads to use. Defaults to 1.
#' @param composition Character, Either "tibble", "matrix", or "data.frame" for
#' the format out the resulting word vectors.
#'
#' @details
#' The choice of loss functions are one of:
#' * "ns" negative sampling
#' * "hs" hierarchical softmax
#' * "softmax" full softmax
#'
#' @return A [tibble][tibble::tibble-package], data.frame or matrix containing
#' the token in the first column and word vectors in the remaining columns.
#'
#' @source <https://fasttext.cc/>
#' @references Enriching Word Vectors with Subword Information, 2016, P.
#' Bojanowski, E. Grave, A. Joulin, T. Mikolov.
#'
#' @export
#' @examples
#' fasttext(fairy_tales, n_iter = 2)
#'
#' # Custom tokenizer that splits on non-alphanumeric characters
#' fasttext(fairy_tales,
#' n_iter = 2,
#' tokenizer = function(x) strsplit(x, "[^[:alnum:]]+"))
fasttext <- function(text,
tokenizer = text2vec::space_tokenizer,
dim = 10L,
type = c("skip-gram", "cbow"),
window = 5L,
loss = "hs",
negative = 5L,
n_iter = 5L,
min_count = 5L,
threads = 1L,
composition = c("tibble", "data.frame", "matrix"),
verbose = FALSE) {
composition <- match.arg(composition)
type <- match.arg(type)
type <- gsub("-", "", type)
text <- pre_tokenize(text, tokenizer, " ")
tmp_file_txt <- tempfile()
tmp_file_model <- tempfile()
writeLines(text = text, con = tmp_file_txt)
control <- fastTextR::ft_control(
loss = loss,
word_vec_size = dim,
window_size = window,
epoch = n_iter,
neg = negative,
min_count = min_count,
nthreads = threads,
verbose = verbose
)
model <- fastTextR::ft_train(tmp_file_txt, method = type, control = control)
word_vectors <- model$word_vectors(model$words())
res <- composer(word_vectors, composition = composition)
res
}
|
/scratch/gouwar.j/cran-all/cranData/wordsalad/R/fasttext.R
|
#' Extract word vectors from GloVe word embedding
#'
#' The calculations are done with the text2vec package.
#'
#' @param text Character string.
#' @param tokenizer Function, function to perform tokenization. Defaults to
#' [text2vec::space_tokenizer].
#' @param dim Integer, number of dimension of the resulting word vectors.
#' @param x_max Integer, maximum number of co-occurrences to use in the
#' weighting function. Defaults to 10.
#' @param min_count Integer, number of times a token should appear to be
#' considered in the model. Defaults to 5.
#' @param stopwords Character, a vector of stop words to exclude from training.
#' @param window Integer, skip length between words. Defaults to 5.
#' @param n_iter Integer, number of training iterations. Defaults to 10.
#' @param convergence_tol Numeric, value determining the convergence criteria.
#' \code{numeric = -1} defines early stopping strategy. Stop fitting
#' when one of two following conditions will be satisfied: (a) passed
#' all iterations (b) \code{cost_previous_iter / cost_current_iter - 1 <
#' convergence_tol}. Defaults to -1.
#' @param verbose Logical, controls whether progress is reported as operations
#' are executed.
#' @param threads number of CPU threads to use. Defaults to 1.
#' @param composition Character, Either "tibble", "matrix", or "data.frame" for
#' the format out the resulting word vectors.
#'
#' @return A [tibble][tibble::tibble-package], data.frame or matrix containing
#' the token in the first column and word vectors in the remaining columns.
#' @export
#'
#' @source <https://nlp.stanford.edu/projects/glove/>
#' @references Jeffrey Pennington, Richard Socher, and Christopher D. Manning.
#' 2014. GloVe: Global Vectors for Word Representation.
#'
#' @examples
#' glove(fairy_tales, x_max = 5)
glove <- function(text,
tokenizer = text2vec::space_tokenizer,
dim = 10L,
window = 5L,
min_count = 5L,
n_iter = 10L,
x_max = 10L,
stopwords = character(),
convergence_tol = -1,
threads = 1,
composition = c("tibble", "data.frame", "matrix"),
verbose = FALSE) {
composition <- match.arg(composition)
tokens <- tokenizer(text)
it <- text2vec::itoken(tokens, progressbar = FALSE)
vocab <- text2vec::create_vocabulary(it, stopwords = stopwords)
vocab <- text2vec::prune_vocabulary(vocab, term_count_min = min_count)
vectorizer <- text2vec::vocab_vectorizer(vocab)
tcm <- text2vec::create_tcm(it, vectorizer, skip_grams_window = window)
glove <- text2vec::GlobalVectors$new(rank = dim, x_max = x_max)
if (verbose) {
wv_main <- glove$fit_transform(tcm,
n_iter = n_iter,
convergence_tol = convergence_tol,
n_threads = threads)
} else {
temp <- utils::capture.output(
wv_main <- glove$fit_transform(tcm,
n_iter = n_iter,
convergence_tol = convergence_tol,
n_threads = threads)
)
}
wv_context <- glove$components
word_vectors <- wv_main + t(wv_context)
res <- composer(word_vectors, composition = composition)
res
}
|
/scratch/gouwar.j/cran-all/cranData/wordsalad/R/glove.R
|
#' Extract word vectors from word2vec word embedding
#'
#' The calculations are done with the word2vec package.
#'
#' @param text Character string.
#' @param tokenizer Function, function to perform tokenization. Defaults to
#' [text2vec::space_tokenizer].
#' @param collapse_character Character vector with length 1. Character used to
#' glue together tokens after tokenizing. See details for more information.
#' Defaults to \code{"\\t"}.
#' @param composition Character, Either "tibble", "matrix", or "data.frame" for
#' the format out the resulting word vectors.
#' @param n_iter Integer, number of training iterations. Defaults to 5.
#' @param loss Charcter, choice of loss function must be one of "ns" or "hs".
#' See detaulsfor more Defaults to "ns".
#' @inheritParams word2vec::word2vec
#'
#'
#' @details
#' A trade-off have been made to allow for an arbitrary tokenizing function. The
#' text is first passed through the tokenizer. Then it is being collapsed back
#' together into strings using \code{collapse_character} as the separator. You
#' need to pick \code{collapse_character} to be a character that will not appear
#' in any of the tokens after tokenizing is done. The default value is a "tab"
#' character. If you pick a character that is present in the tokens then those
#' words will be split.
#'
#' The choice of loss functions are one of:
#' * "ns" negative sampling
#' * "hs" hierarchical softmax
#'
#' @return A [tibble][tibble::tibble-package], data.frame or matrix containing
#' the token in the first column and word vectors in the remaining columns.
#'
#' @source <https://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf>
#' @references Mikolov, Tomas and Sutskever, Ilya and Chen, Kai and Corrado,
#' Greg S and Dean, Jeff. 2013. Distributed Representations of Words and
#' Phrases and their Compositionality
#'
#' @export
#' @examples
#' word2vec(fairy_tales)
#'
#' # Custom tokenizer that splits on non-alphanumeric characters
#' word2vec(fairy_tales, tokenizer = function(x) strsplit(x, "[^[:alnum:]]+"))
word2vec <- function(text,
tokenizer = text2vec::space_tokenizer,
dim = 50,
type = c("cbow", "skip-gram"),
window = 5L,
min_count = 5L,
loss = c("ns", "hs"),
negative = 5L,
n_iter = 5L,
lr = 0.05,
sample = 0.001,
stopwords = character(),
threads = 1L,
collapse_character = "\t",
composition = c("tibble", "data.frame", "matrix")) {
if (dim < 0)
stop("`dim` Must be a positive integer.")
composition <- match.arg(composition)
loss <- match.arg(loss)
loss <- ifelse(loss == "hs", TRUE, FALSE)
text <- pre_tokenize(text, tokenizer, collapse_character)
stopwords <- paste(collapse_character,
stopwords,
collapse_character,
sep = "")
model <- word2vec::word2vec(x = text,
type = type,
dim = dim,
window = window,
iter = n_iter,
lr = lr,
hs = loss,
negative = negative,
sample = sample,
min_count = min_count,
split = c(collapse_character, ".\n?!"),
stopwords = stopwords,
threads = threads)
word_vectors <- as.matrix(model)
res <- composer(word_vectors, composition = composition)
res
}
|
/scratch/gouwar.j/cran-all/cranData/wordsalad/R/word2vec.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
CPP_col_dist_dense <- function(x, y, metric_code, param1, symmetric) {
.Call(`_wordspace_CPP_col_dist_dense`, x, y, metric_code, param1, symmetric)
}
CPP_col_dist_sparse <- function(nc1, xp, xrow, x, nc2, yp, yrow, y, metric_code, param1, symmetric) {
.Call(`_wordspace_CPP_col_dist_sparse`, nc1, xp, xrow, x, nc2, yp, yrow, y, metric_code, param1, symmetric)
}
CPP_get_openmp_threads <- function() {
.Call(`_wordspace_CPP_get_openmp_threads`)
}
CPP_set_openmp_threads <- function(n) {
invisible(.Call(`_wordspace_CPP_set_openmp_threads`, n))
}
CPP_random_indexing_sparse <- function(nr, nc, p, row_of, x, n_ri, rate, verbose = TRUE) {
.Call(`_wordspace_CPP_random_indexing_sparse`, nr, nc, p, row_of, x, n_ri, rate, verbose)
}
CPP_row_norms_dense <- function(x, norm_code, p_norm = 2.0) {
.Call(`_wordspace_CPP_row_norms_dense`, x, norm_code, p_norm)
}
CPP_row_norms_sparse <- function(nr, nc, p, row_of, x, norm_code, p_norm = 2.0) {
.Call(`_wordspace_CPP_row_norms_sparse`, nr, nc, p, row_of, x, norm_code, p_norm)
}
CPP_col_norms_dense <- function(x, norm_code, p_norm = 2.0) {
.Call(`_wordspace_CPP_col_norms_dense`, x, norm_code, p_norm)
}
CPP_col_norms_sparse <- function(nr, nc, p, row_of, x, norm_code, p_norm = 2.0) {
.Call(`_wordspace_CPP_col_norms_sparse`, nr, nc, p, row_of, x, norm_code, p_norm)
}
CPP_scale_margins_dense <- function(M, rows, cols, duplicate = TRUE) {
.Call(`_wordspace_CPP_scale_margins_dense`, M, rows, cols, duplicate)
}
CPP_scale_margins_sparse <- function(M, rows, cols, duplicate = TRUE) {
.Call(`_wordspace_CPP_scale_margins_sparse`, M, rows, cols, duplicate)
}
CPP_dsm_score_dense <- function(f, f1, f2, N, am_code, sparse, transform_code) {
.Call(`_wordspace_CPP_dsm_score_dense`, f, f1, f2, N, am_code, sparse, transform_code)
}
CPP_dsm_score_sparse <- function(nr, nc, p, row_of, f, f1, f2, N, am_code, sparse, transform_code) {
.Call(`_wordspace_CPP_dsm_score_sparse`, nr, nc, p, row_of, f, f1, f2, N, am_code, sparse, transform_code)
}
CPP_similarity_to_distance <- function(M, opcode, tol, duplicate = TRUE) {
.Call(`_wordspace_CPP_similarity_to_distance`, M, opcode, tol, duplicate)
}
CPP_signcount <- function(x) {
.Call(`_wordspace_CPP_signcount`, x)
}
CPP_signcount_int <- function(x) {
.Call(`_wordspace_CPP_signcount_int`, x)
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/RcppExports.R
|
as.distmat <- function (x, ...) UseMethod("as.distmat")
## default method for matrix
as.distmat.matrix <- function (x, similarity=FALSE, symmetric=FALSE, ...) {
if (!("dist.matrix" %in% class(x))) class(x) <- c("dist.matrix", class(x))
attr(x, "dist.matrix") <- TRUE
attr(x, "similarity") <- similarity
if (symmetric) {
if (nrow(x) != ncol(x)) stop("non-square matrix cannot be symmetric")
if (!is.null(rownames(x)) && !is.null(colnames(x)) && !all(rownames(x) == colnames(x))) stop("matrix cannot be symmetric because row and column labels differ")
attr(x, "symmetric") <- symmetric
}
x
}
# default method for sparse matrix
as.distmat.sparseMatrix <- function (x, similarity=FALSE, symmetric=FALSE, force.dense=FALSE, ...) {
if (force.dense) {
x <- as.matrix(x)
class(x) <- c("dist.matrix", class(x))
}
else {
if (!similarity) stop("only non-negative similarity matrix is supported in sparse format")
x <- dsm.canonical.matrix(x, annotate=TRUE, nonneg.check=TRUE)
if (!isTRUE(attr(x, "nonneg"))) stop("only non-negative similarity matrix is supported in sparse format")
}
attr(x, "dist.matrix") <- TRUE
attr(x, "similarity") <- similarity
if (symmetric) {
if (nrow(x) != ncol(x)) stop("non-square matrix cannot be symmetric")
if (!is.null(rownames(x)) && !is.null(colnames(x)) && !all(rownames(x) == colnames(x))) stop("matrix cannot be symmetric because row and column labels differ")
attr(x, "symmetric") <- symmetric
}
x
}
# default method for DSM object
as.distmat.dsm <- function (x, similarity=FALSE, symmetric=FALSE, force.dense=FALSE, ...) {
as.distmat(find.canonical.matrix(x), similarity=similarity, symmetric=symmetric, force.dense=force.dense, ...)
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/as_distmat.R
|
as.dsm <- function (obj, ...) UseMethod("as.dsm")
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/as_dsm.R
|
as.dsm.TermDocumentMatrix <- function (obj, ..., verbose=FALSE) {
if (! inherits(obj, c("TermDocumentMatrix", "DocumentTermMatrix"))) stop("argument must be a 'tm' object of class 'TermDocumentMatrix' or 'DocumentTermMatrix'")
if (any(is.na(obj$v))) stop("missing values in 'tm' matrix are not supported by wordspace package")
M <- sparseMatrix(i=obj$i, j=obj$j, x=obj$v, dims=c(obj$nrow, obj$ncol), dimnames=obj$dimnames)
raw.freq <- "tf" %in% attr(obj, "Weighting")
dsm(M, raw.freq=raw.freq, sort=FALSE, verbose=verbose)
}
as.dsm.DocumentTermMatrix <- as.dsm.TermDocumentMatrix
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/as_dsm_tm.R
|
as.matrix.dsm <- function (x, what=c("auto", "M", "S"), ...) {
what <- match.arg(what)
info <- check.dsm(x, validate=TRUE) # ensure that row/column names are correct
if (what == "auto") {
if (info$S$ok) what <- "S"
else if (info$M$ok) what <- "M"
else stop("neither M nor S are available")
}
if (what == "M") {
if (!info$M$ok) stop("co-occurrence matrix M is not available")
dsm.canonical.matrix(x$M)
}
else if (what == "S") {
if (!info$S$ok) stop("score matrix S is not available")
dsm.canonical.matrix(x$S)
}
else stop(sprintf("internal error -- unsupported what = '%s'", what))
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/as_matrix_dsm.R
|
rbind.dsm <- function (..., term.suffix=NULL, deparse.level=1) {
.Deprecated(msg="The 'rbind' method for DSM objects is experimental. It may be removed or modified in a future release of the 'wordspace' package.")
models <- list(...) # should be one or more objects of class "dsm" if rbind() dispatches here
if (!is.null(term.suffix) && length(term.suffix) != length(models)) stop("term.suffix must provide as many strings as there are DSMs")
models.info <- lapply(models, check.dsm, validate=TRUE) # validate models and extract dimensions etc.
have.M.vec <- sapply(models.info, function (i) i$M$ok) # are raw frequencies available?
have.M <- all(have.M.vec)
if (any(have.M.vec) && !have.M) stop("either all DSM objects must contain raw frequency data (matrix M), or none of them")
have.S.vec <- sapply(models.info, function (i) i$S$ok) # are scored matrices available?
have.S <- all(have.S.vec)
if (any(have.S.vec) && !have.S) warning("some but not all DSM objects contain score matrix S, dropped from result")
if (!(have.M || have.S)) stop("neither raw frequencies M nor score matrices S found consistently across all DSM objects")
any.locked <- any(sapply(models.info, function (i) i$locked)) # are any of the input DSMs locked already?
N.vec <- sapply(models.info, function (i) i$N) # extract sample sizes
cols.merged <- .combine.marginals(lapply(models, function (m) m$cols), margin="column", mode="same") # check feature dimensions, then combine column marginals
marginals.inconsistent <- attr(cols.merged, "adjusted") # if marginals or sample sizes differ between DSMs, they're adjusted to the maximum value to ensure consistency
if (have.M) {
if (diff(range(N.vec)) >= 1) marginals.inconsistent <- TRUE
N <- max(N.vec)
} else {
N <- NA
}
if (marginals.inconsistent && !have.S) warning("DSM objects have inconsistent column marginals / sample sizes, should calculate scores before combining them")
rows.merged <- .bind.marginals(lapply(models, function (m) m$rows), margin="row", term.suffix=term.suffix)
## TODO: rbind is memory-inefficient for sparse matrices because it recursively combines two matrices at a time (via dispatch to rbind2())
## - replace by custom implementation (using triplet representation?), which need not preserve row/col names
## - rbind() for dense matrices seems compact and fast
res <- list(
rows = rows.merged,
cols = cols.merged,
globals = models[[1]]$globals,
locked = marginals.inconsistent || any.locked
)
if (have.M) {
res$globals$N <- N
res$M <- do.call(rbind, lapply(models, function (m) m$M))
dimnames(res$M) <- list(res$rows$term, res$cols$term)
}
if (have.S) {
res$S <- do.call(rbind, lapply(models, function (m) m$S))
dimnames(res$S) <- list(res$rows$term, res$cols$term)
}
class(res) <- c("dsm", "list")
return(res)
}
cbind.dsm <- function (..., term.suffix=NULL, deparse.level=1) {
.Deprecated(msg="The 'cbind' method for DSM objects is experimental. It may be removed or modified in a future release of the 'wordspace' package.")
stop("not yet implemented")
}
merge.dsm <- function (x, y, ..., rows=TRUE, all=FALSE, term.suffix=NULL) {
.Deprecated(msg="The 'merge' method for DSM objects is deprecated. It will be removed in the next release of the 'wordspace' package and may be re-introduced later with different semantics.")
models <- list(x, y, ...)
n.models <- length(models)
if (!is.null(term.suffix) && length(term.suffix) != n.models) stop("term.suffix must provide as many strings as there are DSMs")
models.info <- lapply(models, check.dsm, validate=TRUE) # validate models and extract dimensions etc.
if (all) stop("all=TRUE is not yet implemented")
if (!rows) stop("rows=FALSE is not yet implemented")
have.M.vec <- sapply(models.info, function (i) i$have.M) # are raw frequencies available?
have.M <- all(have.M.vec)
if (any(have.M.vec) && !have.M) stop("either all DSM objects must contain raw frequency data (matrix M), or none of them")
have.S.vec <- sapply(models.info, function (i) i$have.S) # are scored matrices available
have.S <- all(have.S.vec)
if (any(have.S.vec) && !have.S) warning("some but not all DSM objects contain score matrix S, dropped from result")
if (!(have.M || have.S)) stop("neither raw frequencies M nor score matrices S found consistently across all DSM objects")
any.locked <- any(sapply(models.info, function (i) i$locked)) # are any of the input DSMs locked already?
any.sparse <- any(sapply(models.info, function (i) i$sparse)) # are there any sparse matrices?
N.vec <- sapply(models, function (i) i$N) # extract sample sizes
# bind rows of DSM objects, preserving only terms that are shared by all DSM
cols.merged <- .combine.marginals(lapply(models, function (m) m$cols), margin="column", mode="intersect")
marginals.inconsistent <- attr(cols.merged, "adjusted") # if marginals differ between DSMs, they're adjusted to the maximum value to ensure consistency
if (have.M) {
if (diff(range(N.vec)) >= 1) marginals.inconsistent <- TRUE
N <- max(N.vec)
} else {
N <- NA
}
if (marginals.inconsistent && !have.S) warning("DSM objects have inconsistent column marginals / sample sizes, should calculate scores before combining them")
if (any.sparse) {
## for sparse matrices, extract/reorder columns, then call rbind() to merge the models
## TODO: this wastes huge amounts of memory; needs to be reimplemented in a more sophisticated way!
adjusted.models <- lapply(models, function (.m) subset(.m, select=na.omit( match(cols.merged$term, .m$cols$term) )))
return( do.call(rbind, c(adjusted.models, list(term.suffix=term.suffix))) )
}
else {
## for dense matrices, build merged DSM directly, filling in a pre-allocated matrix (more memory-efficient)
rows.merged <- .bind.marginals(lapply(models, function (m) m$rows), margin="row", term.suffix=term.suffix)
n.rows <- sapply(models.info, function (i) i$nrow)
first.row <- cumsum(c(1, n.rows)) # rows offsets of individual DSMs in combined matrix M
if (have.M) M <- matrix(nrow=nrow(rows.merged), ncol=nrow(cols.merged), dimnames=list(rows.merged$term, cols.merged$term))
if (have.S) S <- matrix(nrow=nrow(rows.merged), ncol=nrow(cols.merged), dimnames=list(rows.merged$term, cols.merged$term))
for (i in 1:n.models) {
model <- models[[i]]
col.idx <- na.omit( match(cols.merged$term, model$cols$term) ) # extract columns of i-th DSM matrix that match the common terms
if (have.M) M[ (first.row[i]):(first.row[i]+n.rows[i]-1), ] <- model$M[ , col.idx]
if (have.S) S[ (first.row[i]):(first.row[i]+n.rows[i]-1), ] <- model$S[ , col.idx]
}
## construct and return merged DSM
res <- list(rows = rows.merged,
cols = cols.merged,
globals = models[[1]]$globals,
locked = marginals.inconsistent || any.locked)
if (have.M) {
res$M <- M
res$globals$N <- res$N <- N
}
if (have.S) res$S <- S
class(res) <- c("dsm", "list")
return(res)
}
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/bind_merge_dsm.R
|
## internal helper functions for [rc]bind.dsm() and merge.dsm()
# merge tables of marginal frequencies (either columns or rows, as indicated by margin argument)
# - all tables must have variables "term" and (optionally) "f"; metdata variables taken from first table
# - marginal frequencies are checked for equality; if there are differences, pmax() is assigned to result table with attribute "adjusted" set
# - mode="same" requires that all tables have exactly the same terms in the same order and aborts with error message otherwise
# - mode="intersection" preserves only terms that occur in all tables and reorders them if necessary
# - mode="union" collects terms from all tables (not implemented yet)
.combine.marginals <- function (tbls, margin=c("column", "row"), mode=c("same", "intersection", "union")) {
mode <- match.arg(mode)
if (mode == "union") stop("'union' operation has not been implemented yet")
margin <- match.arg(margin)
stopifnot(length(tbls) >= 1)
have.term.vec <- sapply(tbls, function (.t) "term" %in% colnames(.t))
if (!all(have.term.vec)) stop("all ", margin, " marginal tables must contain variable 'term'")
have.f.vec <- sapply(tbls, function (.t) "f" %in% colnames(.t))
have.f <- all(have.f.vec)
if (any(have.f.vec) && !have.f) stop("either all ", margin, " marginal tables must contain variable 'f', or none of them may")
first.tbl <- tbls[[1]]
other.tbls <- tbls[-1]
terms <- first.tbl$term
if (mode == "same") {
length.ok <- all( sapply(other.tbls, function (.t) nrow(.t) == length(terms)) )
if (!length.ok) stop("all DSM objects must have the same number of ", margin, "s")
terms.ok <- all( sapply(other.tbls, function (.t) all(.t$term == terms)) )
if (!terms.ok) stop("all DSM objects must have the same ", margin, " labels (terms or features)")
} else {
for (.t in other.tbls) terms <- intersect(terms, .t$term)
if (length(terms) < 1) stop("DSM objects have no common ", margin, " labels, cannot merge")
# intersect() keeps original ordering from first.tbl, so don't re-sort terms
}
adjusted <- FALSE
terms.idx <- match(terms, first.tbl$term)
res <- data.frame(term=terms, stringsAsFactors=FALSE)
if (have.f) {
F <- first.tbl$f[terms.idx] # init marginal frequencies from first table
for (.t in other.tbls) {
.idx <- match(terms, .t$term)
.f <- .t$f[.idx]
if (any(.f != F)) {
F <- pmax(F, .f)
adjusted <- TRUE
}
}
res$f <- F
}
other.vars <- setdiff(colnames(first.tbl), c("term", "f")) # metadata variables from first table
if (length(other.vars) > 0) res <- cbind(res, first.tbl[terms.idx, other.vars, drop=FALSE])
attr(res, "adjusted") <- adjusted
return(res)
}
# rbind tables of marginal frequencies (i.e. joining terms from all tables) with some consistency checks
# - all tables must have exactly the same variables, and terms must be unique across all tables
# - margin argument is currently used for error messages only
# - optional term.suffix contains strings to be appended to terms from each table in order to make them unique
.bind.marginals <- function (tbls, margin=c("row", "column"), term.suffix=NULL) {
margin <- match.arg(margin)
stopifnot(length(tbls) >= 1)
have.term.vec <- sapply(tbls, function (.t) "term" %in% colnames(.t))
if (!all(have.term.vec)) stop("all ", margin, " marginal tables must contain variable 'term'")
first.tbl <- tbls[[1]]
other.tbls <- tbls[-1]
vars <- colnames(first.tbl) # check that all tables are compatible, i.e. have the same variables in the same order
vars.same <- all( sapply(other.tbls, function (.t) all( colnames(.t) == vars )) )
if (!vars.same) stop(margin, " information tables of all DSM objects must be compatible (same variables)")
n.items <- sapply(tbls, function (.t) nrow(.t)) # number of entries from each DSM
res <- do.call(rbind, tbls)
if (!is.null(term.suffix)) {
stopifnot(length(term.suffix) == length(tbls))
res$orig.term <- res$term
res$orig.part <- rep(term.suffix, n.items)
res$term <- paste(res$orig.term, res$orig.part, sep="")
}
if (any(duplicated(res$term))) stop(margin, " labels must be unique across all DSM objects")
rownames(res) <- 1:nrow(res)
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/bind_merge_utils.R
|
check.dsm <- function (model, validate=FALSE, nonneg.check=FALSE) {
stopifnot(inherits(model, "dsm"))
slots <- names(model)
if ("M" %in% slots) {
M <- dsm.is.canonical(model$M, nonneg.check=nonneg.check)
M$ok <- TRUE
} else {
M <- data.frame(ok=FALSE)
}
rownames(M) <- "M"
if ("S" %in% slots) {
S <- dsm.is.canonical(model$S, nonneg.check=nonneg.check)
S$ok <- TRUE
} else {
S <- data.frame(ok=FALSE)
}
rownames(S) <- "S"
stopifnot(M$ok || S$ok) # need to have either frequency matrix or score matrix (or both)
stopifnot(all(c("rows", "cols", "globals") %in% slots))
required <- if (M$ok) c("term", "f") else c("term") # required columns in $rows and $cols
stopifnot(all(required %in% colnames(model$rows)))
stopifnot(all(required %in% colnames(model$cols)))
N <- if ("N" %in% names(model$globals)) model$globals$N else NA
if (M$ok && !is.finite(N)) stop("missing information on sample size N (but frequency matrix M is present)")
is.locked <- if ("locked" %in% names(model$globals)) model$globals$locked else FALSE
n.rows <- nrow(model$rows)
n.cols <- nrow(model$cols)
if (M$ok) {
stopifnot(nrow(model$M) == n.rows)
stopifnot(ncol(model$M) == n.cols)
if (validate) {
stopifnot(all(rownames(model$M) == model$rows$term))
stopifnot(all(colnames(model$M) == model$cols$term))
}
}
if (S$ok) {
stopifnot(nrow(model$S) == n.rows)
stopifnot(ncol(model$S) == n.cols)
if (validate) {
stopifnot(all(rownames(model$S) == model$rows$term))
stopifnot(all(colnames(model$S) == model$cols$term))
}
}
list(nrow=n.rows, ncol=n.cols, N=N, M=M, S=S, locked=is.locked)
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/check_dsm.R
|
context.vectors <- function (M, contexts, split="\\s+", drop.missing=TRUE, row.names=NULL) {
M <- find.canonical.matrix(M) # ensure that M is a suitable matrix, or extract matrix from DSM
known.terms <- rownames(M)
nR <- nrow(M)
nC <- ncol(M)
if (is.null(row.names)) {
row.names <- if (is.null(names(contexts))) 1:length(contexts) else names(contexts)
} else {
if (length(row.names) != length(contexts)) stop("row.names= must have same length as contexts=")
}
if (is.character(contexts)) {
tokens.list <- strsplit(contexts, split, perl=TRUE)
} else {
if (!is.list(contexts)) stop("contexts= must be either a character vector or a list")
tokens.list <- contexts
}
CM <- t(vapply(tokens.list, function (tokens) {
weights <- NULL
if (is.character(tokens)) {
## context = vector of tokens
idx <- na.omit(match(tokens, known.terms)) # row numbers of known terms in M (possibly repeated))
} else if (is.logical(tokens)) {
## context = logical index vector into M (deprecated)
if (length(tokens) != nR) stop("invalid logical index vector in contexts= (wrong length)")
idx <- which(tokens)
} else if (is.numeric(tokens)) {
terms <- names(tokens)
if (is.character(terms)) {
## context = weighted bag of words = vector of weights labelled with terms
idx <- match(terms, known.terms)
ok <- !is.na(idx)
idx <- idx[ok]
weights <- tokens[ok]
}
else if (is.integer(tokens)) {
## context = index of row numbers into M (deprecated)
idx <- tokens
if (length(idx) > 0 && (min(idx) < 1 || max(idx) > nR)) stop("invalid integer index vector in contexts= (row number out of range)")
}
else stop("invalid numeric vector without labels in contexts=")
} else stop("invalid specification in contexts= (must be character, numeric or logical vector)")
if (length(idx) > 0) {
if (is.null(weights)) {
colMeans(M[idx, , drop=FALSE]) # unweighted centroid vector
}
else {
colSums(scaleMargins(M[idx, , drop=FALSE], rows=weights)) / sum(weights) # weighted centroid vector
}
} else {
if (drop.missing) rep(NA, nC) else rep(0, nC) # return null vector for context without known tokens
}
}, FUN.VALUE=numeric(nC), USE.NAMES=FALSE))
rownames(CM) <- row.names
if (!is.null(colnames(M))) colnames(CM) <- colnames(M)
if (drop.missing) {
idx.miss <- is.na(CM[, 1]) # assuming there were no NAs or NaNs in M
CM[!idx.miss, , drop=FALSE]
} else {
CM
}
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/context_vectors.R
|
convert.lemma <-
function (lemma, format=c("CWB", "BNC", "DM", "HW", "HWLC"), hw.tolower=FALSE) {
format <- match.arg(format)
lemma <- as.character(lemma)
errors <- grep("_[A-Z.]$", lemma, invert=TRUE, perl=TRUE, value=TRUE)
if (length(errors) > 0) stop("invalid lemma string(s): ", paste(errors, collapse=", "))
len <- nchar(lemma)
hw <- substr(lemma, 1, len - 2) # headword part
pos <- substr(lemma, len, len) # POS code
if (format == "BNC") {
penn2bnc <- c(N="SUBST", Z="SUBST", V="VERB", J="ADJ", R="ADV", D="ART", .="STOP", I="PREP", T="PREP")
bncpos <- penn2bnc[pos]
bncpos[is.na(bncpos)] <- "UNC"
#### old code (should no longer be needed)
## penn <- c("N", "Z", "V", "J", "R", "D", ".", "I", "T")
## bnc <- c("SUBST", "SUBST", "VERB", "ADJ", "ADV", "ART", "STOP", "PREP", "PREP", "UNC")
## pos.idx <- match(pos, penn, nomatch=length(bnc))
## bncpos <- bnc[pos.idx]
}
if (hw.tolower) {
hw <- tolower(hw)
}
switch(format,
CWB = paste(hw, pos, sep="_"),
BNC = paste(tolower(hw), bncpos, sep="_"),
DM = paste(hw, tolower(pos), sep="-"),
HW = hw,
HWLC = tolower(hw)
)
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/convert_lemma.R
|
dim.dsm <- function (x) {
res <- check.dsm(x)
c(res$nrow, res$ncol)
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/dim_dsm.R
|
dimnames.dsm <- function (x) {
res <- check.dsm(x, validate=TRUE) # row/column names must be consistent
if (res$S$ok) {
dimnames(x$S)
} else if (res$M$ok) {
dimnames(x$M)
} else stop("no co-occurrence matrix available in DSM object")
}
`dimnames<-.dsm` <- function (x, value) {
info <- check.dsm(x) # don't need to check row/column names since they'll be overwritten
if (length(value) != 2) stop("value must be a list of length two with row and column names, respectively")
valR <- value[[1]] # validate new row and column names
if (!is.character(valR)) stop("rownames must be a character vector")
nR <- length(valR)
if (nR != info$nrow) stop(sprintf("rownames should be a vector of length %d, but has %d elements", info$nrow, nR))
valC <- value[[2]]
if (!is.character(valC)) stop("colnames must be a character vector")
nC <- length(valC)
if (nC != info$ncol) stop(sprintf("rownames should be a vector of length %d, but has %d elements", info$ncol, nC))
x$rows$term <- valR # set in row/column info tables
x$cols$term <- valC
if (info$M$ok) dimnames(x$M) <- value # set both dimnames in single call to avoid overhead
if (info$S$ok) dimnames(x$S) <- value
x
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/dimnames_dsm.R
|
dist.matrix <- function (M, M2=NULL, method="cosine", p=2, normalized=FALSE, byrow=TRUE, convert=TRUE, as.dist=FALSE, terms=NULL, terms2=terms, skip.missing=FALSE) {
method <- match.arg(method, c("cosine", "euclidean", "maximum", "manhattan", "minkowski", "canberra", "jaccard", "overlap"))
similarity <- (method %in% c("cosine", "jaccard", "overlap")) && !convert
symmetric <- !(method %in% c("overlap")) # FALSE if distance/similarity measure is asymmetric
cross.distance <- !is.null(M2) # TRUE if calculating (rectangular) cross-distance matrix
need.nonneg <- method %in% c("jaccard", "overlap")
if (method == "minkowski") {
if (p == Inf) {
method <- "maximum" # same as in rowNorms()
} else if (p < 0 || !is.finite(p)) {
stop("Minkowski p-distance can only be computed for 0 <= p < Inf")
}
}
if (as.dist && similarity) stop("cannot create 'dist' object from similarity matrix")
if (as.dist && cross.distance) stop("cannot create 'dist' object from cross-distance matrix")
if (as.dist && !symmetric) stop("cannot create 'dist' object for asymmetric distance measure")
ensure.nonneg <- function (M, name="M") {
is.nonneg <- dsm.is.canonical(M)$nonneg
if (is.na(is.nonneg)) is.nonneg <- dsm.is.canonical(M, nonneg.check=TRUE)$nonneg
if (!is.nonneg) stop(sprintf("%s must be a non-negative matrix", name))
}
M <- find.canonical.matrix(M) # extract co-occurence matrix from DSM object, ensure canonical format
sparse.M <- dsm.is.canonical(M)$sparse
if (need.nonneg) ensure.nonneg(M)
if (cross.distance) {
M2 <- find.canonical.matrix(M2)
sparse.M2 <- dsm.is.canonical(M2)$sparse
if (byrow) {
if (ncol(M) != ncol(M2)) stop("M and M2 are not conformable (must have same number of columns)")
} else {
if (nrow(M) != nrow(M2)) stop("M and M2 are not conformable (must have same number of rows)")
}
if (need.nonneg) ensure.nonneg(M2, name="M2")
}
if (!is.null(terms) || !is.null(terms2)) {
targets.M <- if (byrow) rownames(M) else colnames(M)
targets.M2 <- if (is.null(M2)) targets.M else if (byrow) rownames(M2) else colnames(M2)
if (!missing(terms2)) cross.distance <- TRUE # if different filters are applied, we're always dealing with a cross-distance calculation
## if cross.distance is FALSE, M2 must be NULL and terms2 unspecified (i.e. M2=M and terms2=terms), so leave M2 set to NULL
if (cross.distance) {
if (!is.null(terms2)) {
terms2 <- as.character(terms2) # in case terms2 is a factor
found <- terms2 %in% targets.M2
if (!all(found) && !skip.missing) stop("second term(s) not found in M2: ", paste(terms2[!found], collapse=", "))
terms2 <- terms2[found]
if (is.null(M2)) {
M2 <- if (byrow) M[terms2, , drop=FALSE] else M[ , terms2, drop=FALSE] # need to process terms2 first before overwriting M below
} else {
M2 <- if (byrow) M2[terms2, , drop=FALSE] else M2[ , terms2, drop=FALSE]
}
} else {
if (is.null(M2)) M2 <- M # cross-distances with terms2=NULL -> M2 = copy of M before subsetting
}
sparse.M2 <- dsm.is.canonical(M2)$sparse # define/update sparse.M2
}
if (!is.null(terms)) {
terms <- as.character(terms) # in case terms is a factor
found <- terms %in% targets.M
if (!all(found) && !skip.missing) stop("first term(s) not found in M: ", paste(terms[!found], collapse=", "))
terms <- terms[found]
M <- if (byrow) M[terms, , drop=FALSE] else M[ , terms, drop=FALSE]
}
}
if (method == "cosine") {
## cosine / angular measure is computed as very efficient matrix crossproduct
if (byrow) {
result <- if (is.null(M2)) tcrossprod(M) else tcrossprod(M, M2)
} else {
result <- if (is.null(M2)) crossprod(M) else crossprod(M, M2)
}
result <- as.matrix(result) # ensure that cosine similarity matrix is in dense representation
if (!normalized) {
norms.M <- if (byrow) rowNorms(M, "euclidean") else colNorms(M, "euclidean")
if (is.null(M2)) {
norms.M2 <- norms.M
} else {
norms.M2 <- if (byrow) rowNorms(M2, "euclidean") else colNorms(M2, "euclidean")
}
result <- scaleMargins(result, rows=1/norms.M, cols=1/norms.M2, duplicate=FALSE) # transform in-place (newly allocated above)
}
if (convert) {
transform_code <- 0L # cosine -> angle transformation
tol <- 1e-12
result <- CPP_similarity_to_distance(result, transform_code, tol, duplicate=FALSE) # can operate inplace on <result>
}
rownames(result) <- if (byrow) rownames(M) else colnames(M)
colnames(result) <- if (is.null(M2)) rownames(result) else if (byrow) rownames(M2) else colnames(M2)
} else {
## other distance measures are implemented in C code, working on columns (transposed matrix) for efficiency
.M <- if (byrow) t(M) else M
if (cross.distance) {
if (sparse.M != sparse.M2) stop("M and M2 must either be both in dense format or both in sparse format")
.M2 <- if (byrow) t(M2) else M2
} else {
.M2 <- .M
}
method.code <- switch(method, euclidean=0, maximum=1, manhattan=2, minkowski=3, canberra=4, jaccard=5, overlap=6) # must be kept in sync with C code
param1 <- switch(method, euclidean=0, maximum=0, manhattan=0, minkowski=p, canberra=0, jaccard=0, overlap=0)
if (sparse.M) {
result <- CPP_col_dist_sparse(ncol(.M), .M@p, .M@i, .M@x, ncol(.M2), .M2@p, .M2@i, .M2@x, method.code, param1, symmetric && !cross.distance)
} else {
result <- CPP_col_dist_dense(.M, .M2, method.code, param1, symmetric && !cross.distance)
}
if (method %in% c("jaccard", "overlap")) {
if (method == "overlap" && !normalized) {
## asymmetric overlap relative to x, so values must be normalised with ||x||_1 = sum x_i
norms.M <- if (byrow) rowNorms(M, "manhattan") else colNorms(M, "manhattan")
result <- scaleMargins(result, rows=1/norms.M, duplicate=FALSE) # can operate inplace on <result>
idx <- norms.M == 0 # special case: o(0, x) = 1
if (any(idx)) result[idx, ] <- 1
}
if (convert) {
transform_code <- 1L # d = 1 - sim is a metric (jaccard) or dissimilarity (overlap)
result <- CPP_similarity_to_distance(result, transform_code, 0, duplicate=FALSE) # can operate inplace on <result>
}
}
dimnames(result) <- list(colnames(.M), colnames(.M2))
}
if (as.dist) {
as.dist(result)
} else {
class(result) <- c("dist.matrix", "matrix")
if (similarity) attr(result, "similarity") <- TRUE
if (symmetric && !cross.distance) attr(result, "symmetric") <- TRUE
result
}
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/dist_matrix.R
|
.complain.about.words <- function (w, what="words") {
n <- length(w)
if (n >= 10) {
sprintf("%d missing %s: %s, ...", n, what, paste(w[1:6], collapse=", "))
}
else {
sprintf("missing %s: %s", what, paste(w, collapse=", "))
}
}
dsm <- function (M = NULL, target = NULL, feature = NULL, score = NULL, rowinfo = NULL, colinfo = NULL, N = NA, globals = list(), raw.freq = FALSE, sort = FALSE, verbose = FALSE) {
if (!is.null(M)) {
if (!is.null(score)) stop("you must specify either M= or score=, target= and feature=")
flags <- dsm.is.canonical(M)
if (verbose) cat(sprintf("Compiling DSM from %d x %d co-occurrence matrix ...\n", nrow(M), ncol(M)))
if (!flags$canonical) {
if (verbose) cat(sprintf(" - converting %s matrix into canonical format\n", if (flags$sparse) "sparse" else "dense"))
M <- dsm.canonical.matrix(M)
}
if (!is.null(target)) {
if (length(target) != nrow(M)) stop("target= must provide one label for each row of M")
rownames(M) <- target
} else {
if (is.null(rownames(M))) stop("target= must be specified if M doesn't have row labels")
}
if (!is.null(feature)) {
if (length(feature) != ncol(M)) stop("feature= must provide one label for each column of M")
colnames(M) <- feature
} else {
if (is.null(colnames(M))) stop("feature= must be specified if M doesn't have column labels")
}
} else {
if (is.null(score) || is.null(target) || is.null(feature)) stop("you must specify either M= or score=, target= and feature=")
n.items <- length(target)
ok <- (length(score) == n.items || length(score) == 1) && length(feature) == n.items
if (!ok) stop("score=, target= and feature= must be vectors of the same length")
stopifnot(is.numeric(score))
if (is.factor(target)) target <- as.character(target)
if (is.factor(feature)) feature <- as.character(feature)
stopifnot(is.character(target) && is.character(feature))
if (verbose) cat(sprintf("Compiling DSM from %.2fM items in triplet representation ...\n", n.items / 1e6))
if (verbose) cat(" - target & feature terms\n")
t.dict <- unique(target) # compile lists of target and feature types
f.dict <- unique(feature)
if (sort) {
t.dict <- sort(t.dict)
f.dict <- sort(f.dict)
}
row.idx <- match(target, t.dict) # row/column indices of triplets
col.idx <- match(feature, f.dict)
if (verbose) cat(" - building sparse matrix\n")
M <- sparseMatrix(i=row.idx, j=col.idx, x=score, giveCsparse=TRUE) # may either be M or S in the final object
rm(row.idx, col.idx) # free memory
rownames(M) <- t.dict
colnames(M) <- f.dict
flags <- list(sparse=TRUE)
}
if (raw.freq) {
if (verbose) cat(" - checking non-negative frequency counts\n")
if (!signcount(M, "nonneg")) stop("raw frequency counts must be non-negative")
attr(M, "nonneg") <- TRUE
}
if (verbose) cat(" - collecting target and feature information\n")
if (is.null(rowinfo)) {
rowinfo <- data.frame(term=as.character(rownames(M)), stringsAsFactors=FALSE)
} else {
if (!("term" %in% colnames(rowinfo))) stop("rowinfo= must specify target types in column 'term'")
rowinfo$term <- as.character(rowinfo$term)
idx <- match(rownames(M), rowinfo$term)
if (any(is.na(idx))) stop(.complain.about.words(rownames(M)[is.na(idx)], "target types in rowinfo"))
rowinfo <- rowinfo[idx, , drop=FALSE]
}
if (is.null(colinfo)) {
colinfo <- data.frame(term=as.character(colnames(M)), stringsAsFactors=FALSE)
} else {
if (!("term" %in% colnames(colinfo))) stop("colinfo= must specify feature types in column 'term'")
colinfo$term <- as.character(colinfo$term)
idx <- match(colnames(M), colinfo$term)
if (any(is.na(idx))) stop(.complain.about.words(colnames(M)[is.na(idx)], "feature types in colinfo"))
colinfo <- colinfo[idx, , drop=FALSE]
}
if (verbose) cat(" - computing marginal statistics\n")
if (is.na(N) && !is.null(globals$N)) N <- globals$N # use sample size from globals unless specified with N=
if (!("nnzero" %in% colnames(rowinfo))) rowinfo$nnzero <- rowNorms(M, method="minkowski", p=0) # we now have efficient nonzero counts with "Hamming length"
if (!("nnzero" %in% colnames(colinfo))) colinfo$nnzero <- colNorms(M, method="minkowski", p=0)
if (raw.freq) {
if (!("f" %in% colnames(rowinfo))) rowinfo$f <- rowSums(M) # M must be non-negative at this point
if (!("f" %in% colnames(colinfo))) colinfo$f <- colSums(M)
if (is.na(N)) N <- sum(M)
}
if (flags$sparse) {
n.nzero <- signcount(M, "nnzero")
if (verbose) cat(sprintf("%d x %d matrix with %d nonzero entries (= %.1f%%)\n", nrow(M), ncol(M), n.nzero, 100 * n.nzero / prod(dim(M))))
} else {
if (verbose) cat(sprintf("%d x %d matrix with %.2fM cells\n", nrow(M), ncol(M), prod(dim(M)) / 1e6))
}
globals$N <- N
globals$locked <- FALSE
if (raw.freq) {
dsm.obj <- list(M=M, rows=rowinfo, cols=colinfo, globals=globals)
} else {
dsm.obj <- list(S=M, rows=rowinfo, cols=colinfo, globals=globals)
}
structure(dsm.obj, class=c("dsm", "list"))
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/dsm.R
|
dsm.is.canonical <- function (x, nonneg.check = FALSE) {
if (is.matrix(x) && is.numeric(x)) {
sparse <- FALSE # regular dense numeric matrix
canonical <- TRUE
} else {
if (!is(x, "dMatrix")) stop("first argument must be a dense or sparse numeric matrix")
sparse <- if (is(x, "sparseMatrix")) TRUE else FALSE
canonical <- if (sparse && is(x, "dgCMatrix")) TRUE else FALSE
}
if (nonneg.check) {
if (!sparse || is(x, "dgeMatrix") || is(x, "dgCMatrix") || is(x, "dgRMatrix")) {
nonneg <- signcount(x, "nonneg") # efficient non-negativity check is supported for these formats
} else {
nonneg <- !any(x < 0) # caution: all(x >= 0) would result in dense matrix!
}
} else {
nonneg <- attr(x, "nonneg")
if (is.null(nonneg)) nonneg <- NA
}
data.frame(sparse=sparse, canonical=canonical, nonneg=nonneg)
}
dsm.canonical.matrix <- function (x, triplet = FALSE, annotate = FALSE, nonneg.check = FALSE) {
if (nonneg.check && !annotate) {
warning("nonneg.check=TRUE ignored without annotate=TRUE")
nonneg.check <- FALSE
}
flags <- dsm.is.canonical(x, nonneg.check=FALSE) # nonneg check deferred to when we have a canonical matrix
if (flags$sparse) {
if (triplet) {
if (nonneg.check && flags$canonical) {
flags$nonneg <- signcount(x, "nonneg") # efficient non-negativity check possible before conversion to triplet form
nonneg.check <- FALSE
}
if (!is(x, "dgTMatrix")) {
## x must be a dMatrix and a sparseMatrix, so there are only two possible adjustments left to check
if (!is(x, "generalMatrix")) x <- as(x, "generalMatrix")
if (!is(x, "TsparseMatrix")) x <- as(x, "TsparseMatrix")
if (!is(x, "dgTMatrix")) stop(paste0("internal error: conversion to dgTMatrix failed, got '", class(x)[1], "' instead"))
}
} else {
if (!flags$canonical) {
## x must be a dMatrix and a sparseMatrix, so there are only two possible adjustments left to check
if (!is(x, "CsparseMatrix")) x <- as(x, "CsparseMatrix")
if (!is(x, "generalMatrix")) x <- as(x, "generalMatrix")
if (!is(x, "dgCMatrix")) stop(paste0("internal error: conversion to dgCMatrix failed, got '", class(x)[1], "' instead"))
}
}
} else {
if (!flags$canonical) x <- as.matrix(x)
}
if (annotate) {
attr(x, "sparse") <- flags$sparse
attr(x, "nonneg") <- if (nonneg.check) dsm.is.canonical(x, nonneg.check=TRUE)$nonneg else flags$nonneg
}
x
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/dsm_canonical_matrix.R
|
dsm.projection <- function (model, n, method=c("svd", "rsvd", "asvd", "ri", "ri+svd"), oversampling=NA, q=2, rate=.01, power=1, with.basis=FALSE, verbose=FALSE) {
method <- match.arg(method)
M <- find.canonical.matrix(model)
info <- dsm.is.canonical(M) # to find out whether M is sparse or dense
nR <- nrow(M)
nC <- ncol(M)
if (is.na(n)) n <- min(nR, nC)
if (method == "ri") {
if (2*n > nC) stop(sprintf("random indexing from %d to %d dimensions makes no sense", nC, n))
} else {
if (n > min(nR, nC)) stop("number of target dimensions exceeds dimensionality of DSM matrix")
}
if (is.na(oversampling)) {
oversampling <- switch(method, svd=1, rsvd=2, asvd=10, ri=1, "ri+svd"=20)
}
if (with.basis && method %in% c("ri", "ri+svd")) stop("with.basis=TRUE is not supported for RI-based models")
B <- NULL
R2 <- NULL
sigma <- NULL
if (method == "svd") {
## --- standard SVD algorithm (dense or sparse) ---
if (verbose) cat(sprintf("SVD reduction to %d dimensions:\n", n))
if (verbose) cat(" - SVD decomposition\n")
if (info$sparse) {
SVD <- sparsesvd(M, rank=n) # use SVDLIBC algorithm for truncated SVD of sparse matrix
if (ncol(SVD$u) < n) { # in case poorly conditioned singular components have been removed
if (verbose) cat(sprintf(" - %d poorly conditioned singular components have been dropped\n", n - ncol(SVD$u)))
n <- ncol(SVD$u)
}
} else {
SVD <- svd(M, nu=n, nv=(if (with.basis) n else 0)) # we don't need right singular vectors for the dimensionality reduction
}
if (verbose) cat(" - composing final matrix\n")
sigma <- SVD$d[1:n]
if (power == 1) {
S <- scaleMargins(SVD$u, cols=sigma) # dimensionality-reduced matrix
} else {
S <- scaleMargins(SVD$u, cols=sigma^power)
}
if (with.basis) B <- SVD$v
R2 <- sigma^2 / norm(M, "F")^2 # proportion of "variance" captured by SVD dimensions
rm(SVD)
} else if (method == "asvd") {
## --- approximated SVD based on random sample of rows (DEPRECATED) ---
sub.size <- min(n * oversampling, nR)
if (verbose) cat(sprintf("Approximate SVD reduction to %d dimensions, based on %d rows:\n", n, sub.size))
sub.idx <- sort(sample(1:nR, sub.size, replace=FALSE))
M.sub <- M[sub.idx, ]
if (verbose) cat(" - SVD decomposition\n")
SVD <- svd(M.sub, nu=0, nv=n) # here we only need the right singular vectors
if (verbose) cat(" - composing final matrix\n")
S <- M %*% SVD$v # V projects columns to first n latent dimensions
if (with.basis) B <- SVD$v
rm(SVD)
S <- as.matrix(S) # make sure result is an ordinary dense matrix (for efficient further processing)
R2 <- colNorms(M, "euclidean")^2 / norm(M, "F")^2 # this should be the proportion of explained "variance"
sigma <- SVD$d[1:n]
if (power != 1) S <- scaleMargins(S, cols=sigma^(power - 1))
} else if (method == "rsvd") {
## --- randomized SVD according to Halko, Martinsson & Tropp (2009, p. 9) ---
## preliminary testing suggests there is no substantial difference between the original and transposed rSVD algorith, so we currently always use the original version
SVD <- rsvd(M, n=n, q=q, oversampling=oversampling, transpose=FALSE, verbose=verbose)
sigma <- SVD$d
if (power == 1) {
S <- scaleMargins(SVD$u, cols=sigma)
} else {
S <- scaleMargins(SVD$u, cols=sigma^power)
}
if (with.basis) B <- SVD$v
R2 <- sigma^2 / norm(M, "F")^2
rm(SVD)
} else if (method %in% c("ri", "ri+svd")) {
## --- straightforward random indexing (with specified fill rate), optionally followed by rSVD ---
if (rate < 0 || rate > 1) stop("RI rate= must be between 0 and 1")
## TODO: -- check references on statistical guarantees, appropriate fill rates, etc.--
if (method == "ri+svd") {
nRI <- n * oversampling # number of intermediate random dimensions
nRI <- max(2*n, min(nRI, floor(nC / 2)))
} else {
nRI <- n # number of random dimensions
}
if (verbose) cat(sprintf("Random Indexing in %d dimensions:\n", nRI))
if (!info$sparse) {
## if original matrix can be stored in dense representation, RI should not pose any memory problems
n.fill <- as.integer(nC * rate) # number of nonzero entries in each random vector
if (n.fill < 1) n.fill <- 1
scale <- 1 / sqrt(n.fill) # scale random vectors so they are normalised
if (verbose) cat(sprintf(" - generating %d random vectors with %d of %d nonzero elements\n", nRI, n.fill, nC))
Q <- matrix(0, nRI, nC)
for (.d in 1:nRI) {
.idx <- sort(sample.int(nC, n.fill))
Q[.d, .idx] <- scale * (1 - 2*rbinom(n.fill, 1, .5))
}
if (verbose) cat(" - projecting into random subspace\n")
S <- tcrossprod(M, Q)
rm(Q)
} else {
## efficient C implementation of RI for sparse matrix (which is guaranteed to be in canonical format)
S <- CPP_random_indexing_sparse(nR, nC, M@p, M@i, M@x, nRI, rate, verbose)
}
if (method == "ri+svd") {
S <- dsm.projection(S, "svd", n, verbose=verbose, with.basis=FALSE) # use plain SVD since matrix is already dense
}
attr(S, "R2") <- R2 <- NULL # partial R2 not accurate for non-orthogonal projection
} else {
stop("dimensionality reduction method '", method, "' has not been implemented yet")
}
dimnames(S) <- list(rownames(M), paste(method, 1:n, sep=""))
if (with.basis) {
dimnames(B) <- list(colnames(M), colnames(S))
attr(S, "basis") <- B
}
if (!is.null(R2)) attr(S, "R2") <- R2
if (!is.null(sigma)) attr(S, "sigma") <- sigma
return(S)
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/dsm_projection.R
|
dsm.score <- function (model, score="frequency",
sparse=TRUE, negative.ok=NA,
transform=c("none", "log", "root", "sigmoid"),
scale=c("none", "standardize", "center", "scale"),
normalize=FALSE, method="euclidean", p=2, tol=1e-6,
matrix.only=FALSE, update.nnzero=FALSE,
batchsize=1e6, gc.iter=Inf) {
## validate DSM object
model.info <- check.dsm(model, validate=TRUE)
have.M <- model.info$M$ok
have.S <- model.info$S$ok
## check association score and determine internal code (must be consistent with <score.c>)
force(score) # need to evaluate argument to check whether it is a function or a string
if (is.function(score)) {
AM <- score # callback function implementing user-defined AM
score <- "user" # indicates a user-define association score
if (!have.M) stop("cannot compute association scores: no co-occurrence frequency data available")
if (model.info$locked) stop("marginal frequencies are invalid, cannot compute association scores")
} else {
score <- match.arg(score, c("frequency", "simple-ll", "t-score", "z-score", "Dice", "MI", "tf.idf", "log-likelihood", "chi-squared", "reweight"))
score.code <- switch(score, frequency=0, reweight=0, "simple-ll"=1, "t-score"=2, "z-score"=3, Dice=4, MI=5, tf.idf=6, "log-likelihood"=7, "chi-squared"=8)
if (score.code %in% c(0, 4, 6)) sparse <- TRUE # frequency measure, reweighting, tf.idf and Dice are always sparse
if (score == "reweight" && !have.S) stop("cannot use score='reweight': association scores have not been computed yet")
if (score != "reweight" && !have.M) stop("cannot compute association scores: no co-occurrence frequency data available")
if (model.info$locked && !(score.code %in% c(0, 6))) stop("marginal frequencies are invalid, cannot compute association scores")
}
## check transformation and determine internal code (must be consistent with <score.c>)
transform <- match.arg(transform)
transform.code <- switch(transform, none=0, log=1, root=2, sigmoid=3)
transform.fun <- switch( # for use with user-defined AM
transform,
none = identity,
log = function (x) sign(x) * log(abs(x) + 1),
root = function (x) sign(x) * sqrt(abs(x)),
sigmoid = tanh
)
## check other arguments
scale <- match.arg(scale)
## set up marginal frequencies for different AMs
if (score == "reweight") {
cooc.matrix <- model$S # neat trick: apply "frequency" association measure to S instead of M
f1 <- 0 # we may need dummy entries for marginal frequencies and sample size
f2 <- 0
N <- 0
} else if (score == "tf.idf") {
cooc.matrix <- model$M
f1 <- model$rows$f # dummy, will be ignored
if ("df" %in% colnames(model$cols)) {
f2 <- model$cols$df
N <- 1 # df should contain relative document frequencies -> dummy document count
} else if ("nnzero" %in% colnames(model$cols)) {
f2 <- model$cols$nnzero + 1
N <- nrow(cooc.matrix) + 1 # simulate df by column nonzero counts, relative to number of rows of the matrix
} else {
stop("relative document frequencies ('df') or nonzero counts ('nnzero') for feature terms (= columns) are required in order to compute tf.idf association measure")
}
} else {
if (!have.M) stop("cannot compute association scores: no co-occurrence frequency data available")
cooc.matrix <- model$M
f1 <- model$rows$f
f2 <- model$cols$f
N <- model.info$N
if (is.null(f1)) stop("cannot compute association scores: no marginal frequencies for target terms")
if (is.null(f2)) stop("cannot compute association scores: no marginal frequencies for feature terms")
if (is.na(N)) stop("cannot compute association scores: unknown sample size")
}
## check matrix format and sparse/dense representation
matrix.info <- dsm.is.canonical(cooc.matrix)
cooc.sparse <- matrix.info$sparse
scaling.not.sparse <- scale %in% c("standardize", "center")
if (is.na(negative.ok)) negative.ok <- !cooc.sparse
if (is.character(negative.ok)) negative.ok <- match.arg(negative.ok, "nonzero")
if (negative.ok == "nonzero") {
if (scaling.not.sparse) stop("column scaling would introduce negative values and force dense representation: specify negative.ok=TRUE if you really want to do this")
negative.ok <- !sparse # allow negative values for nonzero cells if sparse=FALSE
}
else {
if (!negative.ok) {
if (!sparse) stop("computation of non-sparse association scores would introduce negative values and force dense representation: specify negative.ok=TRUE if you really want to do this")
if (scaling.not.sparse) stop("column scaling would introduce negative values and force dense representation: specify negative.ok=TRUE if you really want to do this")
}
if (!sparse && cooc.sparse) {
cooc.matrix <- as.matrix(cooc.matrix) # make co-occurrence matrix dense for non-sparse association scores
matrix.info <- list(sparse=FALSE, canonical=TRUE, nonneg=FALSE)
cooc.sparse <- FALSE
}
if (negative.ok && sparse && !scaling.not.sparse) negative.ok <- FALSE # scored matrix will be non-negative, so mark it as such
}
if (!matrix.info$canonical) cooc.matrix <- dsm.canonical.matrix(cooc.matrix)
## compute association scores and apply optional transformation
if (score == "user") {
## (A) user-defined association measure: process large matrix in batches
## wrapper around callback function provides observed and expected frequencies as arguments with lazy evaluation
compute.AM <- function (
AM, f, f1, f2, N, rows, cols,
O=f, E=f1*f2/N,
R1=f1, R2=N-f1, C1=f2, C2=N-f2,
O11=f, O12=f1-f, O21=f2-f, O22=N-f1-f2+f,
E11=f1*f2/N, E12=f1*C2/N, E21=R2*f2/N, E22=R2*C2/N) {
AM(f=f, f1=f1, f2=f2, N=N, rows=rows, cols=cols, O=O, E=E,
R1=R1, R2=R2, C1=C1, C2=C2,
O11=O11, O12=O12, O21=O21, O22=O22, E11=E, E12=E12, E21=E21, E22=E22)
}
if (cooc.sparse) {
## sparse matrix: unpack dgCMatrix into triplet representation (i.row, i.col, f), then process in batches
# i.row <- cooc.matrix@i + 1 # we compute i.row[idx] directly in the loop below
i.col <- rep(seq_len(ncol(cooc.matrix)), times=diff(cooc.matrix@p)) # large vector, but can't be done effectively in batches
n <- length(cooc.matrix@x)
scores.x <- numeric(n) # pre-allocate result vector
i1 <- 1
gc.step <- 1
while (i1 <= n) {
i2 <- min(i1 + batchsize - 1, n)
## cat(sprintf("dsm.score: processing cells #%d .. #%d\n", i1, i2))
idx <- i1:i2 # cells to process in this batch
i.row.idx <- cooc.matrix@i[idx] + 1L
i.col.idx <- i.col[idx]
y <- transform.fun(compute.AM(
AM, f=cooc.matrix@x[idx], f1=f1[i.row.idx], f2=f2[i.col.idx], N=N,
rows=model$rows[i.row.idx, ], cols=model$cols[i.col.idx, ]
))
scores.x[idx] <- if (sparse) pmax(0, y) else y
i1 <- i1 + batchsize
gc.step <- gc.step + 1
if (gc.step > gc.iter) {
gc(verbose=FALSE) # clean up temporary objects so they don't accumulate in RAM
gc.step <- 1
}
}
rm(i.col, idx, y)
if (gc.iter < Inf) gc(verbose=FALSE)
scores <- new("dgCMatrix", Dim=as.integer(c(model.info$nrow, model.info$ncol)), p=cooc.matrix@p, i=cooc.matrix@i, x=scores.x)
rm(scores.x) # no need to re-run gc() because scores.x has been integrated into the new sparseMatrix object
} else {
## dense matrix: divide columns into batches
nR <- nrow(cooc.matrix)
nC <- ncol(cooc.matrix)
batch.cols <- ceiling(batchsize / nC) # number of columns per batch
scores <- matrix(0, nR, nC)
i1 <- 1
gc.step <- 1
while (i1 <= nC) {
i2 <- min(i1 + batch.cols - 1, nC)
## cat(sprintf("dsm.score: processing columns #%d .. #%d\n", i1, i2))
idx <- i1:i2 # columns in batch
i.row <- rep(1:nR, length(idx)) # row index for batch matrix
i.col <- rep(idx, each=nR) # column index for batch matrix
y <- transform.fun(compute.AM(
AM, f=cooc.matrix[, idx, drop=FALSE], f1=f1[i.row], f2=f2[i.col], N=N,
rows=model$rows[i.row, ], cols=model$cols[i.col, ]
))
scores[, idx] <- if (sparse) pmax(0, y) else y
i1 <- i1 + batch.cols
gc.step <- gc.step + 1
if (gc.step > gc.iter) {
gc(verbose=FALSE) # clean up temporary objects so they don't accumulate in RAM
gc.step <- 1
}
}
rm(i.row, i.col, idx, y)
if (gc.iter < Inf) gc(verbose=FALSE)
}
} else {
## (B) built-in association measures: C code for optimal memory-efficiency
if (cooc.sparse) {
## compute association scores for sparse matrix (in canonical dgCMatrix format)
scores.x <- CPP_dsm_score_sparse(model.info$nrow, model.info$ncol, cooc.matrix@p, cooc.matrix@i, cooc.matrix@x, f1, f2, N, score.code, sparse, transform.code)
scores <- new("dgCMatrix", Dim=as.integer(c(model.info$nrow, model.info$ncol)), p=cooc.matrix@p, i=cooc.matrix@i, x=scores.x)
rm(scores.x)
} else {
## compute dense or sparse association scores for dense matrix
scores <- CPP_dsm_score_dense(cooc.matrix, f1, f2, N, score.code, sparse, transform.code)
}
}
if (scale == "standardize") {
scores <- scale(scores, center=TRUE, scale=TRUE)
} else if (scale == "center") {
scores <- scale(scores, center=TRUE, scale=FALSE)
} else if (scale == "scale") {
rms <- colNorms(scores, "euclidean") / sqrt(nrow(scores) - 1) # root mean square according to ?scale
scores <- scaleMargins(scores, cols = 1 / rms, duplicate=FALSE) # transform in-place (scores has been allocated above)
} else {
# no scaling
}
if (normalize) {
## carry out row normalization in-place (because scores has been newly allocated above)
scores <- normalize.rows(scores, method=method, p=p, tol=tol, inplace=TRUE)
}
dimnames(scores) <- dimnames(cooc.matrix) # make sure that row and column names are preserved
if (!negative.ok) attr(scores, "nonneg") <- TRUE # S is known to be non-negative
if (matrix.only) {
return(scores)
} else {
model$S <- scores
if (update.nnzero) {
model$rows$nnzero <- rowNorms(scores, method="minkowski", p=0)
model$cols$nnzero <- colNorms(scores, method="minkowski", p=0)
}
return(model)
}
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/dsm_score.R
|
## determine majority class assignments for data points
majority.class <- function(clusters, gold) {
class.dist <- table(clusters, gold) # class distribution for each cluster
best.class <- colnames(class.dist)[ apply(class.dist, 1, which.max) ] # best class assignment for each cluster (by name)
names(best.class) <- rownames(class.dist)
best.class[ clusters ] # look up best class assignment for each data point
}
## clustering purity is easily computed from best assignments
cluster.purity <- function (clusters, gold) {
stopifnot(length(gold) == length(clusters))
best.class <- majority.class(clusters, gold)
stopifnot(length(best.class) == length(clusters))
100 * sum(as.character(best.class) == as.character(gold)) / length(clusters)
}
## clustering entropy (scaled to range 0..1 if scale=TRUE)
cluster.entropy <- function(clusters, gold, scale=FALSE) {
stopifnot(length(gold) == length(clusters))
class.dist <- table(clusters, gold)
H.by.class <- apply(prop.table(class.dist, 1), 1,
function (p) sum(ifelse(p > 0, -p * log2(p), 0)))
class.p <- apply(class.dist, 1, sum) / length(clusters)
H <- sum(class.p * H.by.class) # weighted average of cluster entropies = mean clasification uncertainty
if (scale) {
p <- prop.table(table(gold)) # largest plausible clustering entropy = entropy of gold standard
H <- H / sum(ifelse(p > 0, -p * log2(p), 0))
}
H
}
eval.clustering <- function (task, M, dist.fnc=pair.distances, ..., details=FALSE, format=NA, taskname=NA, scale.entropy=FALSE, n.clusters=NA, word.name="word", class.name="class") {
if (is.na(taskname)) taskname <- deparse(substitute(task))
if (!(word.name %in% colnames(task))) stop(sprintf("gold standard does not have a column labelled '%s'", word.name))
if (!(class.name %in% colnames(task))) stop(sprintf("gold standard does not have a column labelled '%s'", class.name))
orig.words <- as.character(task[, word.name])
words <- if (is.na(format)) orig.words else convert.lemma(orig.words, format)
gold <- as.factor(task[, class.name])
n.gold <- length(unique(gold))
if (is.na(n.clusters)) n.clusters <- n.gold
ok <- words %in% rownames(M) # check for missing words
known.words <- words[ok]
clusters <- rep("n/a", length(words)) # unknown words are assigned to a single cluster "n/a"
names(clusters) <- words
if (any(ok)) {
## create a dissimilarity structure for known words using pair.distances
pairs <- t(combn(known.words, 2))
distances <- dist.fnc(pairs[, 1], pairs[, 2], M, ...)
class(distances) <- "dist"
attr(distances, "Size") <- length(known.words)
attr(distances, "Labels") <- known.words
## cluster assignments for known words, using the PAM algorithm
known.clusters <- pam(distances, n.clusters, diss=TRUE, cluster.only=TRUE)
clusters[known.words] <- known.clusters # fill in cluster assignments for known words
}
best.class <- majority.class(clusters, gold) # best label for each cluster
purity <- cluster.purity(clusters, gold)
entropy <- cluster.entropy(clusters, gold, scale=scale.entropy)
if (details) {
res <- data.frame(
word = orig.words,
cluster = as.factor(clusters),
label = factor(best.class, levels=levels(gold)),
gold = gold,
correct = best.class == gold,
missing = !ok,
stringsAsFactors = FALSE)
attr(res, "purity") <- purity
attr(res, "entropy") <- entropy
attr(res, "taskname") <- taskname
res
} else {
res <- data.frame(purity=purity, entropy=entropy, missing=sum(!ok))
rownames(res) <- taskname
res
}
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/eval_clustering.R
|
eval.multiple.choice <-
function (task, M, dist.fnc=pair.distances, ..., details=FALSE, format=NA, taskname=NA, target.name="target", correct.name="correct", distractor.name="^distract") {
if (is.na(taskname)) taskname <- deparse(substitute(task))
if (!(target.name %in% colnames(task))) stop(sprintf("gold standard does not have a column labelled '%s'", target.name))
if (!(correct.name %in% colnames(task))) stop(sprintf("gold standard does not have a column labelled '%s'", correct.name))
idx.distract <- grepl(distractor.name, colnames(task), perl=TRUE)
if (!any(idx.distract)) stop(sprintf("no distractors matching /%s/ found in gold standard", distractor.name))
targets <- as.character(task[, target.name])
choices <- cbind(as.character(task[, correct.name]),
as.matrix(task[, idx.distract, drop=FALSE]))
mode(choices) <- "character" # first column contains correct choice, further columns are distractors
n.choices <- ncol(choices)
n.items <- nrow(task)
w1 <- rep(targets, n.choices)
w2 <- as.vector(choices)
if (!is.na(format)) {
w1 <- convert.lemma(w1, format)
w2 <- convert.lemma(w2, format)
}
distance <- dist.fnc(w1, w2, M, ...)
is.similarity <- isTRUE(attr(distance, "similarity"))
distance <- matrix(distance, ncol=n.choices, byrow=FALSE) # distances in matrix format corresponding to choices
if (is.similarity) distance <- -distance # simple trick
res.list <- lapply(1:n.items, function (i) {
d <- distance[i, ]
ranks <- rank(d, ties.method="max") # so we don't get a correct answer if it is tied with a distractor
best <- which.min(ranks)
correct <- if (d[best] < Inf) ranks[1] == 1 else NA # whether correct answer is ranked first (NA if all pairings not in DSM)
data.frame(
target=targets[i], correct=correct,
best.choice=choices[i, best], best.dist=d[best],
correct.choice=choices[i, 1], correct.rank=ranks[1], correct.dist=d[1],
row.names=NULL, stringsAsFactors=FALSE
)
})
res <- do.call("rbind", res.list)
if (details) {
if (is.similarity) {
res$best.dist <- -res$best.dist # convert back to similarity scores
res$correct.dist <- -res$correct.dist # (can't use transform() because of warnings from "make check")
}
res
} else {
tp <- sum(res$correct, na.rm=TRUE)
data.frame(
accuracy=100 * tp / n.items,
TP=tp, FP=n.items - tp, missing=sum(distance[, 1] == Inf),
row.names=taskname
)
}
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/eval_multiple_choice.R
|
eval.similarity.correlation <- function (task, M, dist.fnc=pair.distances, details=FALSE, format=NA, taskname=NA, word1.name="word1", word2.name="word2", score.name="score", ...) {
if (is.na(taskname)) taskname <- deparse(substitute(task))
for (varname in c(word1.name, word2.name, score.name)) {
if (!(varname %in% colnames(task))) stop(sprintf("gold standard does not have a column labelled '%s'", varname))
}
n.items <- nrow(task)
w1 <- as.character(task[, word1.name])
w2 <- as.character(task[, word2.name])
if (!is.na(format)) {
w1 <- convert.lemma(w1, format)
w2 <- convert.lemma(w2, format)
}
score <- task[, score.name]
distance <- dist.fnc(w1, w2, M, ...) # vector of DSM distances between word pairs (can also be similarities)
if (any(is.na(distance))) stop("missing values in distance/similarity vector must be replaced by +Inf or -Inf")
is.pinf <- distance == Inf # missing data points in case of distance
is.minf <- distance == -Inf # missing data points in case of similarity
is.missing <- is.pinf | is.minf
n.missing <- sum(is.missing)
if (all(is.missing)) stop("no distance/similarity values available, can't compute correlation")
dist.range <- range(distance[!is.missing]) # fill in missing data with appropriate extreme of value range extended by 10%
surrogates <- dist.range + diff(dist.range) * c(-.1, +.1)
distance[is.pinf] <- surrogates[2]
distance[is.minf] <- surrogates[1]
spearman <- cor.test(score, distance, method="spearman", exact=FALSE)
rho <- abs(spearman$estimate)
rho.pvalue <- spearman$p.value
pearson <- cor.test(score, distance, method="pearson", conf.level=.95)
r <- abs(pearson$estimate)
r.confint <- if (pearson$estimate < 0) -rev(pearson$conf.int) else pearson$conf.int
eval.result <- data.frame(
rho = rho, p.value = rho.pvalue, missing = n.missing,
r = r, r.lower = r.confint[1], r.upper = r.confint[2],
row.names=taskname
)
if (details) {
task$distance <- distance
task$missing <- is.missing
attr(task, "eval.result") <- eval.result
attr(task, "taskname") <- taskname
class(task) <- c("eval.similarity.correlation", "data.frame")
task
} else {
eval.result
}
}
print.eval.similarity.correlation <- function (x, ...) {
NextMethod("print", x, ...) # print as data frame
cat("Evaluation result:\n")
print(attr(x, "eval.result"))
}
plot.eval.similarity.correlation <- function (x, y, line=TRUE, categories=NULL, cat.col=NA, cat.legend="bottomleft", pch=20, cex=1, xlim=NULL, ylim=NULL, xlab="human rating", ylab="distributional model", main=attr(x, "taskname"), ...) {
if (!missing(categories)) {
categories <- eval(substitute(categories), x, parent.frame())
categories <- as.factor(categories)
stopifnot(length(categories) == nrow(x))
cat.types <- levels(categories)
n.cat <- length(cat.types)
if (is.na(cat.col)) {
cat.col <- 1:n.cat
} else {
cat.col <- rep(cat.col, length.out=n.cat)
}
colours <- cat.col[categories]
} else {
n.cat <- 0
colours <- rep("black", nrow(x))
}
if (is.null(xlim)) xlim <- range(x$score)
if (is.null(ylim)) {
ylim <- range(x$distance)
ylim <- ylim + c(0, 0.05) * diff(ylim) # extend top of range by 5%
}
pch.vec <- ifelse(x$missing, 0, pch)
cex.vec <- ifelse(x$missing, cex * 0.8, cex)
plot(x$score, x$distance, col=colours, pch=pch.vec, cex=cex.vec, xlim=xlim, ylim=ylim, xlab=xlab, ylab=ylab, main=main, ...)
if (line) lines(lowess(x$score, x$distance), col="blue", lwd=3)
if (n.cat > 0) {
legend(cat.legend, inset=.02, bg="white", legend=cat.types, pch=pch, col=cat.col, pt.cex=cex*1.5)
}
viewport <- par("usr") # computer anchor coordinates in top left corner with 2% inset
anchor.x <- sum(c(0.98, 0.02, 0, 0) * viewport)
anchor.y <- sum(c(0, 0, 0.02, 0.98) * viewport)
result <- attr(x, "eval.result")
report <- with(result, sprintf("|rho| = %.3f, p = %.4f, |r| = %.3f .. %.3f", rho, p.value, r.lower, r.upper))
if (result$missing > 0) report <- sprintf("%s (%d pairs not found)", report, result$missing)
text(anchor.x, anchor.y, adj=c(0, 1), font=2, col="blue", labels=report)
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/eval_similarity_correlation.R
|
head.dist.matrix <- function (x, n=6L, k=n, ...) {
n <- min(n, nrow(x))
k <- min(k, ncol(x))
x[1:n, 1:k]
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/head_dist_matrix.R
|
head.dsm <- function (x, n=6L, k=n, ...) {
info <- check.dsm(x)
k <- min(k, info$ncol) # do this first, so change in n doesn't affect the default
n <- min(n, info$nrow)
M <- if (info$S$ok) x$S else x$M
M[1:n, 1:k]
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/head_dsm.R
|
match.split <- function (x, f, values=NULL, groups=NULL, nomatch=NA_integer_) {
if (length(x) != length(f)) stop("arguments x and f must be vectors of the same length")
if (missing(groups)) {
f <- as.factor(f)
groups <- levels(f)
} else {
f <- factor(f, levels=groups)
}
n <- length(x)
idx.split <- split(1:n, f)
if (is.null(values)) {
x.split <- lapply(idx.split, function (.idx) unique(x[.idx]))
values <- Reduce(intersect, x.split)
if (length(values) < 1) stop("no values are shared between all groups")
}
## find positions of first matches in each group, then obtain original positions with .idx[...]
res <- do.call(cbind, lapply(idx.split, function (.idx) .idx[ match(values, x[.idx]) ]))
if (!missing(nomatch)) res[is.na(res)] <- as.integer(nomatch) # replace NA's for "no match" by user-specified value
rownames(res) <- values
return(res)
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/match_split.R
|
nearest.neighbours <- function (M, term, n=10, M2=NULL, byrow=TRUE, drop=TRUE, skip.missing=FALSE, dist.matrix=FALSE, ..., batchsize=50e6, verbose=FALSE) {
is.dist <- inherits(M, "dist.matrix") || isTRUE(attr(M, "dist.matrix"))
if (is.dist) {
## case 1: M is a pre-computed distance matrix
## - byrow determines if target term is looked up in rows or columns of M
## - if M is not marked symmetric, we need to treat this as a cross-distance computation
cross.distance <- !isTRUE(attr(M, "symmetric"))
if (!is.null(M2)) stop("M2 cannot be specified if M is a pre-computed distance matrix")
if (dist.matrix && cross.distance) stop("pre-computed distance matrix must be symmetric for dist.matrix=TRUE")
} else {
## case 2: M is a matrix of row or column vectors
## - compute cross-distances if M2 is specified, otherwise (usually symmetric) distances in M
## - byrow determines whether row or column vectors of M and M2 are used
cross.distance <- !is.null(M2)
M <- find.canonical.matrix(M) # ensure that M is a suitable matrix, or extract from DSM object
if (cross.distance) {
M2 <- find.canonical.matrix(M2) # ensure that M2 is a suitable matrix, or extract from DSM object
if (byrow && ncol(M) != ncol(M2)) stop("M and M2 are not conformable (must have the same number of columns)")
if (!byrow && nrow(M) != nrow(M2)) stop("M and M2 are not conformable (must have the same number of rows)")
} else {
M2 <- M # so we can always compute distances against M2
}
}
nn.of.vector <- is.numeric(term)
if (nn.of.vector) {
## case 1: targets are given as row vectors
if (is.dist) stop("cannot find nearest neighbours of vector if M is a pre-computed distance matrix")
if (!is.matrix(term)) term <- matrix(term, nrow=1) # convert plain vector into row vector
if (byrow && ncol(term) != ncol(M2)) stop("target vectors in term= do not have the right number of dimensions")
if (!byrow && ncol(term) != nrow(M2)) stop("target vectors in term= do not have the right number of dimensions")
n.terms <- nrow(term)
if (is.null(rownames(term))) rownames(term) <- paste0("VEC", 1:n.terms)
} else {
## case 2: targets are given as strings (to be looked up in rows or columns of M)
term.labels <- if (byrow) rownames(M) else colnames(M)
found <- term %in% term.labels
if (any(!found) && !skip.missing) stop("target term(s) not found in M: ", paste(term[!found], collapse=", "))
term <- term[found]
n.terms <- length(term)
}
if (n.terms == 0) return(NULL)
## unless we're working on a pre-computed dist.matrix, process vector of lookup terms in moderately sized batches
if (!is.dist) {
n.cand <- if (byrow) nrow(M2) else ncol(M2) # neighbour candidates
if (n.cand > 1 && as.double(n.terms) * n.cand > batchsize) {
items.per.batch <- ceiling(batchsize / n.cand)
res.list <- lapply(seq(1, n.terms, items.per.batch), function (i.start) {
i.end <- min(i.start + items.per.batch - 1, n.terms)
if (verbose) cat(sprintf(" - nearest.neighbours(): terms #%d .. #%d of %d (size = %.1fM)\n", i.start, i.end, n.terms, (i.end-i.start+1) * n.cand / 1e6))
term.batch <- if (nn.of.vector) term[i.start:i.end, , drop=FALSE] else term[i.start:i.end]
nearest.neighbours(M, term.batch, n=n, M2=if (cross.distance) M2 else NULL, drop=FALSE, skip.missing=FALSE, byrow=byrow, dist.matrix=dist.matrix, ..., batchsize=Inf, verbose=verbose)
})
return(do.call(c, res.list))
}
}
## items to look up
items <- if (nn.of.vector) rownames(term) else term
## prepare distance matrix DM between all targets and candidates,
## arranging so that it is always accessed by column (which is more efficient)
if (is.dist) {
## pre-computed distance matrix: extract relevant rows or columns
if (byrow) {
items.ok <- items[items %in% rownames(M)]
DM <- t(M[items.ok, , drop=FALSE]) # now accessed by column
} else {
items.ok <- items[items %in% colnames(M)]
DM <- M[, items.ok, drop=FALSE]
}
## there should be methods [.dist.matrix and [<-.dist.matrix so that we don't need to reconstruct a dist.matrix object here,
## but this would probably make row and column access considerably slower; and a sparse distance matrix works differently anyway
DM <- as.distmat(DM, similarity=isTRUE(attr(M, "similarity"))) # won't be symmetric, even if M is
} else {
## compute distance matrix between specified items and all other targets
if (nn.of.vector) {
M.term <- if (byrow) term else t(term) # M.term = matrix of target vectors (rows or columns)
} else {
M.term <- if (byrow) M[term, , drop=FALSE] else M[, term, drop=FALSE]
}
## it's more efficient to have the smaller matrix M.term first because it will be the inner loop
## when computing a general distance matrix (resulting in better cache coherence)
DM <- t(dist.matrix(M=M.term, M2=M2, byrow=byrow, ...)) # items correspond to columns, regardless of <byrow>
}
similarity <- isTRUE(attr(DM, "similarity"))
sparse <- dsm.is.canonical(DM)$sparse # may only happen for pre-computed similarity matrix
if (sparse && !similarity) stop("only non-negative similarity matrix supported in sparse format")
result <- lapply(items, function (.t) {
if (sparse) {
## sparse pre-computed similarity matrix
neighbours <- DM[, .t]
neighbours <- neighbours[neighbours > 0] # only non-zero cells are candidates for neighbours
} else {
## in all other cases
neighbours <- DM[, .t]
}
neighbours <- sort(neighbours, decreasing=similarity)
if (!nn.of.vector && !cross.distance) {
neighbours <- head(neighbours, n + 1) # remove target from list of nearest neighbours
neighbours <- neighbours[names(neighbours) != .t] # this should remove at most 1 element
}
neighbours <- head(neighbours, n)
if (dist.matrix) {
## case 1: compute distance matrix between nearest neighbours (including target term)
nn.terms <- names(neighbours)
if (is.dist) {
## pre-computed distance matrix M must be symmetric (i.e. !cross.distance)
nn.terms <- c(.t, nn.terms) # prepend target term to list of neighbours (has been removed above)
nn.dist <- M[nn.terms, nn.terms]
nn.dist <- as.distmat(nn.dist, symmetric=TRUE, similarity=similarity)
} else {
## dense (cross-)distance matrix computed on the fly
nn.matrix <- if (byrow) M2[nn.terms, , drop=FALSE] else t(M2[, nn.terms, drop=FALSE]) # matrix of row vectors for all neighbours
nn.terms <- c(.t, nn.terms) # add target term
if (nn.of.vector) {
nn.matrix <- rbind(term[.t, , drop=FALSE], nn.matrix) # add specified target vector
} else {
nn.matrix <- rbind(if (byrow) M[.t, , drop=FALSE] else t(M[, .t, drop=FALSE]), nn.matrix) # add target vector from M
}
rownames(nn.matrix) <- nn.terms
nn.dist <- dist.matrix(nn.matrix, byrow=TRUE, ...)
}
attr(nn.dist, "selected") <- nn.terms == .t # mark target as selected
nn.dist
} else {
## case 2: return specified number of nearest neighbours
neighbours
}
})
names(result) <- items
if (drop && length(result) == 1) result[[1]] else result
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/nearest_neighbours.R
|
## returns scaling exponent required for normalization (usually 1)
.check.normalize <- function (method, p) {
if (method == "minkowski") {
if (p == 0) stop("Hamming length (p = 0) cannot be normalized")
if (p < .05) stop("reliable normalization not possible for Minkowski norm with small p (< .05)")
if (p < 1) return(1 / p)
}
return(1)
}
normalize.rows <- function (M, method = "euclidean", p = 2, ..., tol = 1e-6, inplace = FALSE) {
scale <- .check.normalize(method, p)
norms <- rowNorms(M, method=method, p=p, ...)
lambda <- ifelse(norms < tol, 0, 1 / norms) # any rows with norm < tol are set to 0
if (scale == 1) scaleMargins(M, rows = lambda, duplicate=!inplace) else scaleMargins(M, rows = lambda ^ scale, duplicate=!inplace)
}
normalize.cols <- function (M, method = "euclidean", p = 2, ..., tol = 1e-6, inplace = FALSE) {
scale <- .check.normalize(method, p)
norms <- colNorms(M, method=method, p=p, ...)
lambda <- ifelse(norms < tol, 0, 1 / norms) # any cols with norm < tol are set to 0
if (scale == 1) scaleMargins(M, cols = lambda, duplicate=!inplace) else scaleMargins(M, cols = lambda ^ scale, duplicate=!inplace)
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/normalize_rows.R
|
pair.distances <- function (w1, w2, M, ..., transform=NULL, rank=c("none", "fwd", "bwd", "avg"), avg.method=c("arithmetic", "geometric", "harmonic"), batchsize=10e6, verbose=FALSE) {
rank <- match.arg(rank)
avg.method <- match.arg(avg.method)
if (!is.null(transform) && !is.function(transform)) stop("transform= must be a vectorized function expecting a single argument")
w1 <- as.character(w1)
w2 <- as.character(w2)
stopifnot(length(w1) == length(w2))
is.dist <- inherits(M, "dist.matrix") || isTRUE(attr(M, "dist.matrix"))
if (!is.dist) M <- find.canonical.matrix(M) # ensure DSM matrix is in canonical format, or extract from DSM object
if (rank != "none") {
## case 1: dispatch to specialised function for computing neighbour ranks
if (rank == "fwd") {
r <- pair.ranks(w1, w2, M, ..., rank="fwd", is.dist=is.dist, batchsize=batchsize, verbose=verbose)
} else if (rank == "bwd") {
r <- pair.ranks(w1, w2, M, ..., rank="bwd", is.dist=is.dist, batchsize=batchsize, verbose=verbose)
} else {
r1 <- pair.ranks(w1, w2, M, ..., rank="fwd", is.dist=is.dist, batchsize=batchsize, verbose=verbose)
r2 <- pair.ranks(w1, w2, M, ..., rank="bwd", is.dist=is.dist, batchsize=batchsize, verbose=verbose)
r <- switch(avg.method,
arithmetic = (r1 + r2) / 2,
geometric = sqrt(r1 * r2),
harmonic = ifelse(is.finite(pmax(r1, r2)), 2 * r1 * r2 / (r1 + r2), Inf))
}
if (!is.null(transform)) transform(r) else r
} else {
## case 2: compute regular distances or similarities (rank == "none")
n <- length(w1)
types1 <- unique(w1)
types2 <- unique(w2)
n.types1 <- as.double(length(types1))
n.types2 <- as.double(length(types2))
## if there are too many distinct types (such that intermediate distance matrix would have > chunksize elements),
## partition input recursively and combine result vectors (unless M is a pre-computed distance matrix)
if (!is.dist) {
n.elements <- n.types1 * n.types2 # size of distance matrix to be computed
split.batch <- n.elements > batchsize && n >= 4
if (verbose) cat(sprintf("%s- pair.distances(): %d pairs, %d x %d types = %.1fM elements %s\n", paste(rep(" ", verbose), collapse=""), n, length(types1), length(types2), n.elements/1e6, if (split.batch) "" else "***"))
if (split.batch) {
pivot <- floor(n/2)
verbose.val <- if (verbose) verbose + 2 else FALSE
res1 <- pair.distances(w1[1:pivot], w2[1:pivot], M, ..., batchsize=batchsize, verbose=verbose.val)
res2 <- pair.distances(w1[(pivot+1):n], w2[(pivot+1):n], M, ..., batchsize=batchsize, verbose=verbose.val)
is.similarity <- isTRUE(attr(res1, "similarity")) # pass through similarity marker
res <- structure(c(res1, res2), similarity=is.similarity)
if (!is.null(transform)) return(transform(res)) else return(res)
}
}
if (is.dist) {
## case 2a: look up distances or similarities directly in pre-computed matrix M
distances <- M # NB: is M is sparse, it cannot be indexed with a matrix of labels below
}
else {
## case 2b: compute distance matrix as a superset of the required distances between rows of M
distances <- dist.matrix(M, byrow=TRUE, terms=types1, terms2=types2, skip.missing=TRUE, ...)
}
is.similarity <- isTRUE(attr(distances, "similarity"))
miss.val <- if (is.similarity) -Inf else Inf
res <- rep(miss.val, n)
w1.row <- match(w1, rownames(distances)) # row index of w1
w2.col <- match(w2, colnames(distances)) # column index of w1
is.known <- !is.na(w1.row) & !is.na(w2.col)
res[is.known] <- distances[cbind(w1.row[is.known], w2.col[is.known])]
res <- structure(res, names=paste(w1, w2, sep="/"), similarity=is.similarity)
if (!is.null(transform)) return(transform(res)) else return(res)
}
}
## need specialised implemenation for neighbour ranks, which loops over w1 types instead of word pairs,
## in order to avoid repeated expensive computation of all-neighbour rankings
pair.ranks <- function (w1, w2, M, ..., rank=c("fwd", "bwd"), is.dist=FALSE, batchsize=10e6, verbose=FALSE) {
rank <- match.arg(rank)
## already ensured by pair.distances()
# w1 <- as.character(w1)
# w2 <- as.character(w2)
# stopifnot(length(w1) == length(w2))
res <- rep(Inf, length(w1))
if (is.dist) {
## case 1: pre-computed distance or similarity matrix
is.similarity <- isTRUE(attr(M, "similarity"))
is.symmetric <- isTRUE(attr(M, "symmetric")) # whether to adjust for word as its own neighbour
is.sparse <- dsm.is.canonical(M)$sparse
if (is.sparse && !is.similarity) stop("only non-negative similarity matrix supported in sparse format")
is.known <- w1 %in% rownames(M) & w2 %in% colnames(M) # word pairs found in pre-computed distance matrix
u1 <- if (rank == "fwd") w1 else w2 # compute rank of u2 among neighbours of u1
u2 <- if (rank == "fwd") w2 else w1 # but we still have to decide between rows and columns of M below
u1.types <- sort(unique(u1[is.known])) # list of u1 types we need to process
n.types <- length(u1.types)
items.per.batch <- ceiling(batchsize / (if (rank == "fwd") ncol(M) else nrow(M))) # number of u1 types we can process per batch
for (i.start in seq(1, n.types, items.per.batch)) {
i.end <- min(i.start + items.per.batch - 1, n.types)
if (verbose) cat(sprintf(" - pair.ranks(as.dist, '%s'): types #%d .. #%d of %d (%s .. %s)\n",
rank, i.start, i.end, n.types, u1.types[i.start], u1.types[i.end]))
batch.types <- u1.types[i.start:i.end] # extract and rank row ("fwd") or column ("bwd") vectors for these types from M
distances <- if (rank == "fwd") t(as.matrix(M[batch.types, , drop=FALSE])) else as.matrix(M[, batch.types, drop=FALSE])
if (is.similarity) distances <- -distances # to rank by decreasing similarity
ranks <- apply(distances, 2, function (x) {
res <- rank(x, ties.method="min")
if (is.sparse) res[x == 0] <- Inf # empty cells in sparse matrix cannot be neighbours
res
}) # ranks is column-major: look up ranks[u2, u1]
idx <- (u1 %in% batch.types) & is.known
adj.rank <- if (is.symmetric) 1 else 0 # adjust ranks for u1 as its own first neighbour if M is symmetric
res[idx] <- ranks[cbind(u2[idx], u1[idx])] - adj.rank
}
} else {
## case 2: compute distances between rows of DSM matrix
u1 <- if (rank == "fwd") w1 else w2 # compute rank of u2 among neighbours of u1
u2 <- if (rank == "fwd") w2 else w1
known.words <- rownames(M)
is.known <- u1 %in% known.words & u2 %in% known.words # word pairs found in DSM
u1.types <- sort(unique(u1[is.known])) # list of u1 types found in DSM
n.types <- length(u1.types)
items.per.batch <- ceiling(batchsize / nrow(M)) # number of u1 types we can process in a single batch
for (i.start in seq(1, n.types, items.per.batch)) {
i.end <- min(i.start + items.per.batch - 1, n.types)
if (verbose) cat(sprintf(" - pair.ranks(): types #%d .. #%d of %d (%s .. %s)\n",
i.start, i.end, n.types, u1.types[i.start], u1.types[i.end]))
batch.types <- u1.types[i.start:i.end]
distances <- dist.matrix(M, byrow=TRUE, terms=batch.types, terms2=NULL, skip.missing=FALSE, ...)
is.similarity <- isTRUE(attr(distances, "similarity"))
if (is.similarity) distances <- -distances
ranks <- apply(distances, 1, function (x) rank(x, ties.method="min")) # ranks is column-major: ranks[u2, u1]
idx <- (u1 %in% batch.types) & is.known
res[idx] <- ranks[cbind(u2[idx], u1[idx])] - 1 # adjust for u1 as its own neighbour (cannot be used for cross-distances!)
}
}
structure(res, names=paste(w1, w2, sep="/"), similarity=FALSE)
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/pair_distances.R
|
plot.dist.matrix <- function (x, y, labels=rownames(x), show.labels=TRUE, label.pos=3, selected=attr(x, "selected"), show.selected=TRUE, col="black", cex=1, pch=20, pt.cex=1.2, selected.cex=1.2, selected.col="red", show.edges=TRUE, edges.lwd=6, edges.col="#AABBFF", edges.threshold=quantile(x, 2/3), method=c("isomds", "sammon"), aspect=1, expand=.05, ...) {
stopifnot(inherits(x, "dist.matrix"))
if (!missing(y)) stop("plot.dist.matrix() doesn't take a second argument (y)")
if (!isTRUE(attr(x, "symmetric"))) stop("only symmetric distance matrices can be plotted")
if (isTRUE(attr(x, "similarity"))) stop("similarity matrices are not supported. Please provide a distance matrix for this plot.")
method <- match.arg(method)
if (is.null(labels)) {
show.labels <- FALSE
} else {
if (length(labels) != nrow(x)) stop("wrong number of labels specified")
}
coords <- if (method == "isomds") isoMDS(x, k=2, trace=FALSE)$points else sammon(x, k=2, trace=FALSE)$points
x.range <- extendrange(coords[, 1], f=expand)
y.range <- extendrange(coords[, 2], f=expand)
.asp <- diff(x.range) / diff(y.range)
if (.asp < aspect) {
x.range <- extendrange(x.range, f=(aspect/.asp-1)/2)
} else if (.asp > aspect) {
y.range <- extendrange(y.range, f=(.asp/aspect-1)/2)
}
## set up plot region
plot(coords, type="n", xlim=x.range, ylim=y.range, xlab="", ylab="", xaxs="i", yaxs="i", xaxt="n", yaxt="n", ...)
## draw edges if requested
if (show.edges) {
midx <- t(combn(1:nrow(x), 2)) # 2d-index for upper triangle of distance matrix
P1 <- midx[, 1] # idx of start node of edge
P2 <- midx[, 2] # idx of end node of edge
len <- x[midx] # distance between P1 and P2
lwd.vec <- edges.lwd * (1 - len / edges.threshold)
idx <- lwd.vec > 0.1 # draw only edges with minimum lwd of 0.1
segments(coords[P1[idx], 1], coords[P1[idx], 2], coords[P2[idx], 1], coords[P2[idx], 2], lwd=lwd.vec[idx], col=edges.col)
}
## draw points
if (is.null(selected) || !show.selected) selected <- rep(FALSE, nrow(x))
col.vec <- ifelse(selected, selected.col, col)
cex.vec <- cex * pt.cex * ifelse(selected, selected.cex, 1)
points(coords, pch=pch, col=col.vec, cex=cex.vec)
## draw labels if requested
if (show.labels) {
cex.vec <- cex * ifelse(selected, selected.cex, 1)
text(coords[, 1], coords[, 2], labels=labels, pos=label.pos, font=2, cex=cex.vec, col=col.vec)
}
## return matrix of MDS coordinates with row labels
if (!is.null(labels)) rownames(coords) <- labels
invisible(coords)
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/plot_dist_matrix.R
|
print.dsm <- function (x, ...) {
info <- check.dsm(x)
cat(sprintf("Distributional Semantic Model with %d rows x %d columns\n", info$nrow, info$ncol))
n.cells <- prod(info$nrow, info$ncol)
qformat <- function (y) {
if (is.na(y)) return("N/A")
if (y > 9.9999e9) return(sprintf("%.1fG", y/1e9))
if (y > 9.9999e6) return(sprintf("%.1fM", y/1e6))
if (y > 50e3) return(sprintf("%.1fk", y/1e3))
return(sprintf("%d", y))
}
if (info$M$ok) {
cat("* raw co-occurrence matrix M available\n")
if (info$M$sparse) {
n.nz <- signcount(x$M, "nnzero")
cat(sprintf(" - sparse matrix with %s / %s nonzero entries (fill rate = %.2f%%)\n", qformat(n.nz), qformat(n.cells), 100 * n.nz / n.cells))
} else {
cat(sprintf(" - dense matrix with %s cells\n", qformat(n.cells)))
}
if (info$M$canonical) cat(" - in canonical format\n")
if (isTRUE(info$M$nonneg)) cat(" - known to be non-negative\n")
if (!is.na(info$N)) cat(sprintf(" - sample size of underlying corpus: %s tokens\n", qformat(info$N)))
}
if (info$S$ok) {
cat("* scored matrix S available\n")
if (info$S$sparse) {
n.nz <- signcount(x$S, "nnzero")
cat(sprintf(" - sparse matrix with %s / %s nonzero entries (fill rate = %.2f%%)\n", qformat(n.nz), qformat(n.cells), 100 * n.nz / n.cells))
} else {
cat(sprintf(" - dense matrix with %s cells\n", qformat(n.cells)))
}
if (info$S$canonical) cat(" - in canonical format\n")
if (isTRUE(info$S$nonneg)) cat(" - known to be non-negative\n")
}
invisible(x)
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/print_dsm.R
|
## replacement for iotools::read.delim.raw, which re-encodes input file after reading
## - most options are fixed and column names and classes have to be specified by the user
## - fh must be a connection that can be properly opened in binary mode (with automatic decompression if necessary)
## - column names can be read from file (header=TRUE), specified as character vector or implicitly as names of colClasses
## - colClasses must be given as character strings; if unspecified, they are inferred from the first lines of the file
## - unless encoding is "" or "native.enc", input will be converted to UTF-8 and marked as such
.read.delim.raw <- function (fh, header=TRUE, colClasses=NULL, sep="\t", quote="", nrows=-1L, nrowsClasses=25L, encoding=getOption("encoding")) {
is.native <- encoding == "" || encoding == "native.enc"
is.utf8 <- any(grepl("^utf-?8$", encoding, ignore.case=TRUE))
with.header <- isTRUE(header)
## read entire file as raw vector (may want to make block size n= larger than default of 64k bytes)
bindata <- readAsRaw(fh)
if (!is.native && !is.utf8) {
## use iconv to recode the raw vector into UTF-8
bindata <- iconv(list(bindata), from=encoding, to="UTF-8", toRaw=TRUE)[[1]]
}
## if colClasses aren't specified, try to infer them from the first nrowsClasses lines
if (is.null(colClasses)) {
subset <- mstrsplit(bindata, sep=sep, quote=quote, nsep=NA, strict=TRUE, nrows=nrowsClasses, skip=with.header)
colClasses <- sapply(1:ncol(subset), function (i) {
type <- class(type.convert(subset[, i], as.is=TRUE))
if (type == "integer") "numeric" else type # integer marginal frequencies can be problematic
})
}
## read header line
if (with.header) {
header <- mstrsplit(bindata, sep=sep, quote=quote, nsep=NA, strict=TRUE, nrows=1L)
}
## add header names to colClasses
if (is.character(header)) {
if (length(header) != length(colClasses)) stop("numer of items in header doesn't match number of table columns")
names(colClasses) <- header
}
else {
if (is.null(names(colClasses))) names(colClasses) <- sprintf("V%d", seq_along(colClasses))
}
## now parse the raw table data into a data frame
res <- dstrsplit(bindata, colClasses, sep=sep, quote=quote, nsep=NA, strict=TRUE, nrows=nrows, skip=with.header)
if (!is.native) {
## mark all character variables as UTF-8 (unless read with native encoding)
for (i in seq_along(colClasses)) {
if (colClasses[i] == "character") Encoding(res[[i]]) <- "UTF-8"
}
}
res
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/read_delim_raw.R
|
read.dsm.matrix <- function (file, format=c("word2vec"), encoding="UTF-8", batchsize=1e6, verbose=FALSE) {
format <- match.arg(format)
if (is.character(file)) {
file <- file(file, "r", encoding=encoding)
on.exit(close(file))
}
else {
if (!inherits(file, "connection")) stop("'file' must be a character string or connection")
if (!isOpen(file, "r")) {
open(file, "r")
on.exit(close(file))
}
}
if (format == "word2vec") {
info <- read.table(file, header=FALSE, quote="", comment.char="", colClasses="integer", col.names=c("nR", "nC"), nrows=1)
if (!(info$nR > 0 && info$nC > 0)) stop("format error in 'word2vec' file")
if (as.double(info$nR) * info$nC > 2147483647) stop(sprintf("%d x %d matrix is too large for R", info$nR, info$nC))
batch.lines <- max(round(batchsize / info$nC), 10) # how many matrix rows to read per batch
col.classes <- c("character", rep("double", info$nC))
M <- matrix(0, info$nR, info$nC) # pre-allocate the data matrix
row.names <- character(info$nR)
n.total <- 0
if (verbose) {
pb <- txtProgressBar(0, info$nR, style=3)
on.exit(close(pb), add=TRUE)
}
while (n.total < info$nR) {
A <- as.matrix(read.table(file, header=FALSE, quote="", comment.char="", na.strings=c(), colClasses=col.classes, row.names=1, nrows=batch.lines))
if (ncol(A) != info$nC) stop(sprintf("format error in 'word2vec' file -- expected %d-dim vector, but got %d columns", info$nC, ncol(A)))
nR <- nrow(A)
if (nR < 1) stop(sprintf("read error in 'word2vec' file -- expecting to read %d further rows", info$nR - n.total))
if (n.total + nR > info$nR) stop("format error in 'word2vec' file -- too many rows")
idx <- seq(n.total + 1, n.total + nR)
M[idx, ] <- A
row.names[idx] <- rownames(A)
n.total <- n.total + nR
if (verbose) setTxtProgressBar(pb, n.total)
}
structure(M, dimnames=list(row.names, NULL))
}
else stop(sprintf("internal error -- unsupported format '%s'", format))
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/read_dsm_matrix.R
|
read.dsm.triplet <- function (filename, freq=FALSE, value.first=FALSE, tokens=FALSE,
rowinfo=NULL, rowinfo.header=NULL, colinfo=NULL, colinfo.header=NULL, N=NA, span.size=1,
sep="\t", quote="", nmax=-1, sort=FALSE, encoding=getOption("encoding"), verbose=FALSE) {
if (verbose) cat(sprintf("Loading DSM triplets from '%s' ... ", filename))
is.pipe <- grepl("\\|\\s*$", filename, perl=TRUE)
if (is.pipe) {
filename <- sub("\\s*\\|\\s*$", "", filename, perl=TRUE)
fh <- pipe(filename) # don't set encoding= parameter because .read.delim.raw will read binary data and convert later
} else {
fh <- file(filename) # don't set encoding= parameter
}
if (tokens) {
if (freq || value.first) warning("freq= and value.first= options are ignored with tokens=TRUE")
triplets <- .read.delim.raw(fh, header=FALSE, colClasses=c(l1="character", l2="character"), sep=sep, quote=quote, nrows=nmax, encoding=encoding)
## I prefer iotools::read.delim.raw over readr::read_delim because:
## - readr has many "expensive" dependencies (esp. Boost in package 'BH')
## - readr doesn't support the default "native.enc" encoding and cannot read from all types of connections
## - iotools is slightly faster and leaner (less memory overhead) than readr
## - unfortunately, read.delim.raw doesn't convert character encodings at all, but we work around this with our own .read.delim.raw
## Alternative version using readr::read_delim:
## triplets <- read_delim(fh, sep, col_names=c("l1", "l2"), col_types="cc", locale=locale(encoding=encoding), n_max=nmax, quote=quote, comment="", na=character(), escape_double=FALSE, escape_backslash=FALSE, progress=FALSE)
triplets$val <- 1
freq <- TRUE
} else {
col.types <- if (value.first) c(val="numeric", l1="character", l2="character") else c(l1="character", l2="character", val="numeric")
triplets <- .read.delim.raw(fh, header=FALSE, colClasses=col.types, sep=sep, quote=quote, nrows=nmax, encoding=encoding)
## Alternative version using readr::read_delim:
## col.types <- if (value.first) "dcc" else "ccd"
## col.names <- if (value.first) c("val", "l1", "l2") else c("l1", "l2", "val")
## triplets <- read_delim(fh, sep, col_names=col.names, col_types=col.types, locale=locale(encoding=encoding), n_max=nmax, quote=quote, comment="", na=character(), escape_double=FALSE, escape_backslash=FALSE, progress=FALSE)
}
## close(fh) not needed because .read.delim.raw automatically opens and closes the connection
if (verbose) cat(sprintf("%.2fM %s\n", length(triplets$l1) / 1e6, if (tokens) "tokens" else "items"))
## read external marginal frequencies or other information on targets and features
have.rowinfo <- !is.null(rowinfo)
have.colinfo <- !is.null(colinfo)
have.N <- !is.na(N)
if ((have.rowinfo || have.colinfo) && freq) {
if (!(have.rowinfo && have.colinfo && have.N)) stop("need rowinfo=, colinfo= and N= for external marginal frequencies")
}
if (have.rowinfo) {
if (verbose) cat(sprintf(" - loading target information from '%s'\n", rowinfo))
if (is.null(rowinfo.header)) rowinfo.header <- TRUE
rowinfo.tbl <- .read.delim.raw(file(rowinfo), header=rowinfo.header, sep=sep, quote=quote, encoding=encoding)
if (!("term" %in% colnames(rowinfo.tbl))) stop("rowinfo= must specify feature types in column 'term'")
have.f <- "f" %in% colnames(rowinfo.tbl)
if (freq && !have.f) stop("rowinfo= must include marginal frequencies (column 'f') if freq=TRUE or tokens=TRUE")
if (have.f && !missing(span.size)) rowinfo.tbl$f <- span.size * rowinfo.tbl$f # adjust row marginals for span size
## dsm() constructor below will check that all target terms are included in the table and add nnzero counts if necessary
}
else rowinfo.tbl <- NULL
if (have.colinfo) {
if (verbose) cat(sprintf(" - loading feature information from '%s'\n", colinfo))
if (is.null(colinfo.header)) colinfo.header <- TRUE
colinfo.tbl <- .read.delim.raw(file(colinfo), header=colinfo.header, sep=sep, quote=quote, encoding=encoding)
if (!("term" %in% colnames(colinfo.tbl))) stop("colinfo= must specify target types in column 'term'")
have.f <- "f" %in% colnames(colinfo.tbl)
if (freq && !have.f) stop("colinfo= must include marginal frequencies (column 'f') if freq=TRUE or tokens=TRUE")
}
else colinfo.tbl <- NULL
dsm(target=triplets$l1, feature=triplets$l2, score=triplets$val, raw.freq=freq, rowinfo=rowinfo.tbl, colinfo=colinfo.tbl, N=N, sort=sort, verbose=verbose)
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/read_dsm_triplet.R
|
## this function reads the contents of a UCS export directory and returns the necessary information for .access.file()
.open.archive <- function (filename) {
if (file.access(filename, 4) == 0) {
if (file.info(filename)$isdir) {
contents <- list.files(filename)
return(list(contents=contents, dir=filename))
} else {
stop("file '", filename, "' exists, but is not a directory")
}
}
else {
stop("UCS export archive '", filename, "' does not exist")
}
}
## access a file from UCS export archive, returning a connection object opened in "rt" mode;
## this function automatically finds compressed files with extension .gz, .bz2 or .xz (which file() will magically decompress)
.access.file <- function (archive, member, encoding=getOption("encoding"), check.only=FALSE) {
if (grepl("\\.(gz|bz2|xz)$", member, ignore.case=TRUE, perl=TRUE)) stop("internal error - archive members can only be accessed by uncompressed name")
member.names <- paste(member, c("", ".gz", ".bz2", ".xz"), sep="")
idx.found <- member.names %in% archive$contents
n.found <- sum(idx.found)
if (check.only) return(n.found == 1)
if (n.found > 1) stop("UCS export archive '", archive$dir, "' contains multiple versions of the same component: ", paste(member.names[idx.found], collapse=", "))
if (n.found == 0) stop("'", archive$dir, "' is not a valid UCS export archive, component '", member, "' missing")
file(paste(archive$dir, member.names[idx.found], sep="/"), encoding=encoding, open="rt")
}
read.dsm.ucs <- function (filename, encoding=getOption("encoding"), verbose=FALSE) {
archive <- .open.archive(filename)
if (verbose) cat(sprintf("Loading DSM from UCS export archive %s/ ...\n", filename))
fh <- .access.file(archive, "globals.tbl", encoding)
globals <- read.delim(fh, colClasses=c(N="double"), quote="", comment.char="", stringsAsFactors=FALSE)
close(fh)
if (nrow(globals) != 1) stop("format error - globals.tbl must contain exactly one row")
if (!("N" %in% colnames(globals))) stop("format error - sample size N missing from globals.tbl")
N <- globals$N
if (verbose) cat(" - marginal frequencies of targets (row information)\n")
fh <- .access.file(archive, "rows.tbl", encoding)
rows <- read.delim(fh, colClasses=c(term="character", f="double"), quote="", comment.char="", stringsAsFactors=FALSE, na.strings="")
close(fh)
if (verbose) cat(" - marginal frequencies of features (column information)\n")
fh <- .access.file(archive, "cols.tbl", encoding)
cols <- read.delim(fh, colClasses=c(term="character", f="double"), quote="", comment.char="", stringsAsFactors=FALSE, na.strings="")
close(fh)
n.rows <- nrow(rows)
n.cols <- nrow(cols)
## rows$f <- as.double(rows$f) # adjust marginal frequencies to doubles
## cols$f <- as.double(cols$f)
have.dense.M <- .access.file(archive, "M", check.only=TRUE)
have.sparse.M <- .access.file(archive, "M.mtx", check.only=TRUE)
if (have.dense.M && have.sparse.M) stop("UCS export archive '", archive$dir, "' contains both sparse (M.mtx) and dense (M) cooccurrence matrix")
if (have.sparse.M) {
if (verbose) cat(" - sparse co-occurrence matrix\n")
fh <- .access.file(archive, "M.mtx", encoding)
M <- as(readMM(fh), "CsparseMatrix") # make sure that the sparse matrix is in canonical DSM format
if (!is(M, "generalMatrix")) M <- as(M, "generalMatrix")
if (!is(M, "dgCMatrix")) stop(paste0("internal error: conversion to dgCMatrix failed, got '", class(M)[1], "' instead"))
close(fh)
} else {
if (verbose) cat(" - dense co-occurrence matrix\n")
fh <- .access.file(archive, "M", encoding)
n.cells <- as.double(n.rows) * n.cols # avoid integer overflow for oversized matrix
if (n.cells >= 2^31) stop("dense co-occurrence matrix is too large to load into R")
tmp <- scan(fh, what=double(0), nmax=n.cells, quiet=TRUE)
close(fh)
if (length(tmp) != n.cells) stop(sprintf("invalid data - M does not contain exactly %d = %d * %d cells", n.cells, n.rows, n.cols))
M <- matrix(tmp, nrow=n.rows, ncol=n.cols, byrow=TRUE)
rm(tmp) # free memory
}
stopifnot(nrow(M) == n.rows && ncol(M) == n.cols)
dimnames(M) <- list(rows$term, cols$term)
dsm(M=M, rowinfo=rows, colinfo=cols, globals=globals, raw.freq=TRUE, verbose=verbose)
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/read_dsm_ucs.R
|
.check.norm <- function (method, p) {
if (method == "minkowski" && p == Inf) {
method <- "maximum"
p <- 2
}
if (method == "minkowski" && (p < 0 || !is.finite(p))) stop("Minkowski p-norm con only be computed for 0 <= p < Inf")
## internal codes for selected norm (must match C code in <row_norms.c>)
code <- switch(method, euclidean=0, maximum=1, manhattan=2, minkowski=3)
if (is.null(code)) stop("unknown norm selected (internal error)")
list(code=as.integer(code), p=as.double(p))
}
rowNorms <- function (M, method = "euclidean", p = 2) {
method <- match.arg(method, c("euclidean", "maximum", "manhattan", "minkowski"))
norm <- .check.norm(method, p)
info <- dsm.is.canonical(M)
if (!info$canonical) M <- dsm.canonical.matrix(M)
if (info$sparse) {
result <- CPP_row_norms_sparse(nrow(M), ncol(M), M@p, M@i, M@x, norm$code, norm$p)
} else {
result <- CPP_row_norms_dense(M, norm$code, norm$p)
}
names(result) <- rownames(M)
result
}
colNorms <- function (M, method = "euclidean", p = 2) {
method <- match.arg(method, c("euclidean", "maximum", "manhattan", "minkowski"))
norm <- .check.norm(method, p)
info <- dsm.is.canonical(M)
if (!info$canonical) M <- dsm.canonical.matrix(M)
if (info$sparse) {
result <- CPP_col_norms_sparse(nrow(M), ncol(M), M@p, M@i, M@x, norm$code, norm$p)
} else {
result <- CPP_col_norms_dense(M, norm$code, norm$p)
}
names(result) <- colnames(M)
result
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/rowNorms.R
|
rsvd <- function (M, n, q=2, oversampling=2, transpose=FALSE, verbose=FALSE) {
## --- randomized SVD according to Halko, Martinsson & Tropp (2009, p. 9) ---
## We can apply the rSVD algorithm either to A = M or to A = t(M), depending on the format of M.
## Preliminary testing suggested that the original algorithm (A = M) is suitable for a matrix with many columns,
## while the transpose algorithm (A = t(M)) works better if the matrix has many rows and a limited number of columns.
## With the current implementation, which uses SVD rather than QR decomposition to obtain an orthonormal basis,
## there does not seem to be a substantial difference.
dsm.is.canonical(M) # ensure that M is a suitable matrix (we don't need to enforce canonical format)
nR <- nrow(M)
nC <- ncol(M)
if (n < 1 || n > min(nR, nC)) stop(sprintf("number of singular components out of range n = 1 ... %d", min(nR, nC)))
k2 <- min(oversampling * n, nR, nC) # = 2*k in the paper
if (verbose) cat(sprintf("Randomized SVD reduction%s to %d => %d dimensions:\n", if (transpose) " (transposed)" else "", k2, n))
if (!transpose) {
if (verbose) cat(" - sampling range of A\n") # -- original algorithm applied to A = M
Omega <- matrix(rnorm(nC*k2), nC, k2)
Y <- M %*% Omega
rm(Omega)
if (q >= 1) for (i in 1:q) {
if (verbose) cat(sprintf(" - power iteration #%d\n", i))
Y <- M %*% crossprod(M, Y)
}
if (verbose) cat(sprintf(" - orthonormal basis of %d x %d matrix\n", nrow(Y), ncol(Y)))
Q <- svd(Y, nu=k2, nv=0)$u # orthonormal basis of rg(Y); SVD is faster than and as accurate as QR decomposition
rm(Y)
B <- crossprod(Q, M)
if (verbose) cat(sprintf(" - SVD decomposition of %d x %d matrix\n", nrow(B), ncol(B)))
SVD <- svd(B, nu=n, nv=n) # SVD of B, truncated to n target dimensions
rm(B)
if (verbose) cat(" - composing final result\n")
return(list(
u = Q %*% SVD$u, # U = Q * \hat{U}
v = SVD$v, # V
d = SVD$d[1:n])) # diag(Sigma)
} else {
if (verbose) cat(" - sampling range of A\n") # -- transposed algorithm for A = t(M)
Omega <- matrix(rnorm(k2*nR), k2, nR) # = t(Omega)
Y <- Omega %*% M # = t(A * Omega)
rm(Omega)
if (q >= 1) for (i in 1:q) {
if (verbose) cat(sprintf(" - power iteration #%d\n", i))
Y <- tcrossprod(Y, M) %*% M # = t( (A * t(A))^i * A * Omega) ) = t(Y)
}
if (verbose) cat(sprintf(" - orthonormal basis of %d x %d matrix\n", ncol(Y), nrow(Y)))
Q <- svd(Y, nu=0, nv=k2)$v # orthonormal basis of rg(Y) = column space of t(Y); Q is _not_ transposed
rm(Y)
B <- M %*% Q # = t( t(Q) * A ) = t(B)
if (verbose) cat(sprintf(" - SVD decomposition of %d x %d matrix\n", nrow(B), ncol(B)))
SVD <- svd(B, nu=n, nv=n) # t(B) = V * Sigma * t(\hat{U}), truncated to n target dimensions
rm(B)
if (verbose) cat(" - composing final result\n")
## we now have A = U * Sigma * t(V) with U = Q * \hat{U}, where \hat{U} = SVD$v (_not_ transposed!) and V = SVD$u;
## the approximate SVD of M = t(A) is therefore M = V * Sigma * t(U)
return(list(
u = SVD$u, # V
v = Q %*% SVD$v, # U = Q * \hat{U}
d = SVD$d[1:n])) # diag(Sigma)
}
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/rsvd.R
|
scaleMargins <- function (M, rows=NULL, cols=NULL, duplicate=TRUE) {
info <- dsm.is.canonical(M)
if (!info$canonical) M <- dsm.canonical.matrix(M)
nr <- nrow(M)
nc <- ncol(M)
if (is.null(rows)) {
rows <- rep(1, nr)
} else {
if (length(rows) == 1) rows <- rep(rows, nr)
if (length(rows) != nr) stop("rows= must either be a scalar or conformable with the rows of M=")
}
if (is.null(cols)) {
cols <- rep(1, nc)
} else {
if (length(cols) == 1) cols <- rep(cols, nc)
if (length(cols) != nc) stop("cols= must either be a scalar or conformable with the columns of M=")
}
if (info$sparse) {
CPP_scale_margins_sparse(M, rows=rows, cols=cols, duplicate=duplicate)
} else {
CPP_scale_margins_dense(M, rows=rows, cols=cols, duplicate=duplicate)
}
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/scale_margins.R
|
signcount <- function (x, what=c("counts", "nonneg", "nnzero")) {
what <- match.arg(what)
if (is.double(x)) {
res <- CPP_signcount(x)
}
else if (is.integer(x)) {
res <- CPP_signcount_int(x)
}
else if (is(x, "dgeMatrix")) {
res <- CPP_signcount(x@x)
}
else if (is(x, "dgCMatrix") || is(x, "dgRMatrix")) {
n.val <- prod(dim(x))
res <- CPP_signcount(x@x)
res[2] <- res[2] + n.val - sum(res) # add structural zeroes to count
}
else {
stop("'x' must be a numeric vector or matrix, a dense Matrix or a sparseMatrix in compressed representation")
}
## <res> now contains c(pos, zero, neg)
switch(
what,
counts = structure(res, names=c("pos", "zero", "neg")),
nonneg = (res[3] == 0),
nnzero = res[1] + res[3],
stop("internal error -- unsupported value for what= argument"))
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/signcount.R
|
subset.dsm <- function (x, subset=NULL, select=NULL,
recursive=FALSE, drop.zeroes=FALSE, matrix.only=FALSE,
envir=parent.frame(), run.gc = FALSE,
...) {
info <- check.dsm(x, validate=TRUE) # make sure that rows/columns are consistent
if (recursive && matrix.only) stop("matrix.only=TRUE cannot be combined with recursive=TRUE")
while (recursive) {
force(envir) # fix parent environment to context of initial function call
y <- do.call(subset.dsm, list(x=x, subset=substitute(subset), select=substitute(select), recursive=FALSE, drop.zeroes=drop.zeroes, matrix.only=FALSE, envir=envir))
y.info <- check.dsm(y, validate=TRUE)
if (y.info$nrow == info$nrow && y.info$ncol == info$ncol) return(y)
x <- y
info <- y.info
if (run.gc) gc(verbose=FALSE) # avoid more than one duplicate during recursion
}
condition <- substitute(subset)
row.idx <- eval(condition, c(x$rows, x$globals), envir)
if (is.null(row.idx)) {
row.idx <- 1:info$nrow
update.nz.cols <- FALSE
} else {
## todo: check validity (either Boolean of correct length or numeric vector with indexes in range)
update.nz.cols <- TRUE # rows may have been deleted, so nnzero counts for columns need to be updated
}
condition <- substitute(select)
col.idx <- eval(condition, c(x$cols, x$globals), envir)
if (is.null(col.idx)) {
col.idx <- 1:info$ncol
update.nz.rows <- FALSE
} else {
## todo: check validity (either Boolean of correct length or numeric vector with indexes in range)
update.nz.rows <- TRUE # columns may have been deleted, so nnzero counts for rows need to be updated
}
if (is.logical(row.idx)) row.idx <- which(row.idx) # make sure we have numeric indices for additional subsetting
if (is.logical(col.idx)) col.idx <- which(col.idx)
if (!matrix.only && identical(row.idx, seq_len(info$nrow)) && identical(col.idx, seq_len(info$ncol))) {
## no change -> return unmodified DSM object (saves unnecessary copy in last iteration of recursive=TRUE)
return(x)
}
if (drop.zeroes) {
M <- if (info$S$ok) x$S else x$M # primary data matrix (use scores if available, which may be sparser than frequencies)
M.sub <- M[row.idx, col.idx, drop=FALSE] # proposed subset matrix
nnzero.rows <- rowNorms(M.sub, method="minkowski", p=0)
nnzero.cols <- colNorms(M.sub, method="minkowski", p=0)
rm(M.sub) # will take the real subset matrix in the code below (it would be better to just keep M.sub if no zeroes need to be deleted)
keep.rows <- nnzero.rows > 0
row.idx <- row.idx[keep.rows] # drop rows without nonzero entries
nnzero.rows <- nnzero.rows[keep.rows] # updated nnzero counts for final subset
keep.cols <- nnzero.cols > 0
col.idx <- col.idx[keep.cols] # drop columns without nonzero entries
nnzero.cols <- nnzero.cols[keep.cols]
rm(keep.rows, keep.cols)
}
if (matrix.only) {
M <- if (info$S$ok) x$S else x$M # matrix.only=TRUE: just return subset of the appropriate matrix
return(M[row.idx, col.idx, drop=FALSE])
}
## for small result sets, it is more memory-efficient to construct a new DSM object from scratch
y <- list(rows=x$rows[row.idx, , drop=FALSE], # mandatory components
cols=x$cols[col.idx, , drop=FALSE],
globals=x$globals)
if (info$M$ok) {
y$M <- x$M[row.idx, col.idx, drop=FALSE]
if (!is.na(info$M$nonneg)) attr(y$M, "nonneg") <- info$M$nonneg
}
if (info$S$ok) {
y$S <- x$S[row.idx, col.idx, drop=FALSE]
if (!is.na(info$S$nonneg)) attr(y$S, "nonneg") <- info$S$nonneg
}
## if rows and/or columns may have been deleted, update the relevant nonzero counts
if (drop.zeroes) {
y$rows$nnzero <- nnzero.rows # we've already computed these above
y$cols$nnzero <- nnzero.cols
} else {
if (update.nz.rows || update.nz.cols) {
M <- if (info$S$ok) y$S else y$M # nonzero counts are based on the primary data matrix
if (update.nz.rows) y$rows$nnzero <- rowNorms(M, method="minkowski", p=0)
if (update.nz.cols) y$cols$nnzero <- colNorms(M, method="minkowski", p=0)
}
}
structure(y, class=c("dsm", "list"))
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/subset_dsm.R
|
t.dsm <- function (x) {
info <- check.dsm(x)
tx <- list(rows=x$cols, cols=x$rows, globals=x$globals)
if (info$M$ok) tx$M <- t(x$M)
if (info$S$ok) tx$S <- t(x$S)
class(tx) <- c("dsm", "list")
return(tx)
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/t_dsm.R
|
wordspace.openmp <- function (threads=NULL) {
if (!is.null(threads)) {
if (!(is.numeric(threads) && length(threads) == 1 && threads >= 1)) stop("argument threads= must be a single integer >= 1")
CPP_set_openmp_threads(threads)
}
res <- CPP_get_openmp_threads()
if (is.null(threads)) res else invisible(res)
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/wordspace_openmp.R
|
write.dsm.matrix <- function (x, file, format=c("word2vec"), round=FALSE, encoding="UTF-8", batchsize=1e6, verbose=FALSE) {
format <- match.arg(format)
M <- find.canonical.matrix(x)
info <- dsm.is.canonical(M, nonneg.check=FALSE)
nR <- nrow(M)
nC <- ncol(M)
if (format == "word2vec") {
if (info$sparse) stop("'word2vec' format cannot be used for a sparse matrix")
idx <- grepl("\\s", rownames(M), perl=TRUE)
if (any(idx)) stop(sprintf("row labels must not contain whitespace in word2vec format (%d problematic labels found)", sum(idx)))
idx <- duplicated(rownames(M))
if (any(idx)) stop(sprintf("duplicate row labels are not allowed in word2vec format (%d problematic labels found)", sum(idx)))
}
if (is.character(file)) {
file <- file(file, "w", encoding=encoding)
on.exit(close(file))
}
else {
if (!inherits(file, "connection")) stop("'file' must be a character string or connection")
if (!isOpen(file, "w")) {
open(file, "w")
on.exit(close(file))
}
}
if (format == "word2vec") {
batch.lines <- max(round(batchsize / nC), 10) # how many matrix rows to write per batch
if (verbose) {
pb <- txtProgressBar(0, nR, style=3)
on.exit(close(pb), add=TRUE)
}
cat(sprintf("%d %d\n", nrow(M), ncol(M)), file=file)
for (i in seq(1, nR, batch.lines)) {
k <- min(i + batch.lines - 1, nR)
M.batch <- M[i:k, , drop=FALSE]
if (!isFALSE(round)) M.batch <- round(M.batch, round)
write.table(M.batch, file=file, quote=FALSE, sep=" ", row.names=TRUE, col.names=FALSE)
if (verbose) setTxtProgressBar(pb, k)
}
}
else stop(sprintf("internal error -- unsupported format '%s'", format))
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/write_dsm_matrix.R
|
## define .onLoad and .onAttach here if package initialisation functions are needed;
## .Last.lib (has to be exported) or .onUnload for package finalisation
##
## Internal helper functions
##
## extract (score or frequency) matrix from DSM object or pass through matrix; ensures canonical format
find.canonical.matrix <- function (x, triplet=FALSE) {
if (inherits(x, "dsm")) {
info <- check.dsm(x)
M <- if (info$S$ok) x$S else x$M
} else if (is.matrix(x) || is(x, "dMatrix")) {
M <- x
} else {
stop("first argument must be an object of class 'dsm' or a co-occurrence/score matrix")
}
info <- dsm.is.canonical(M)
if (info$canonical && !triplet) M else dsm.canonical.matrix(M, triplet=triplet)
}
|
/scratch/gouwar.j/cran-all/cranData/wordspace/R/zzz.R
|
## ----message=FALSE------------------------------------------------------------
library(wordspace)
## ----echo=FALSE---------------------------------------------------------------
set.seed(42)
idx <- sort(sample.int(nrow(DSM_VerbNounTriples_BNC), 10))
knitr::kable(DSM_VerbNounTriples_BNC[idx, ])
## -----------------------------------------------------------------------------
Triples <- subset(DSM_VerbNounTriples_BNC, mode == "written")
## -----------------------------------------------------------------------------
subset(Triples, noun == "dog" & verb == "walk")
## -----------------------------------------------------------------------------
VObj <- dsm(target=Triples$noun, feature=Triples$verb, score=Triples$f, raw.freq=TRUE)
dim(VObj)
## -----------------------------------------------------------------------------
subset(VObj$rows, rank(-f) <= 6) # 6 most frequent nouns
## -----------------------------------------------------------------------------
head(VObj)
## -----------------------------------------------------------------------------
VObj <- subset(VObj, nnzero >= 3, nnzero >= 3, recursive=TRUE)
dim(VObj)
## -----------------------------------------------------------------------------
VObj <- dsm.score(VObj, score="simple-ll", transform="log", normalize=TRUE, method="euclidean")
## -----------------------------------------------------------------------------
VObj
## -----------------------------------------------------------------------------
VObj300 <- dsm.projection(VObj, method="svd", n=300)
dim(VObj300)
## ---- fig.width=7, fig.height=3, echo=2---------------------------------------
par(mar=c(4,4,1,1))
plot(attr(VObj300, "R2"), type="h", xlab="latent dimension", ylab="R2")
## -----------------------------------------------------------------------------
pair.distances("book", "paper", VObj300, method="cosine")
## -----------------------------------------------------------------------------
pair.distances("book", "paper", VObj300, method="cosine", convert=FALSE)
## -----------------------------------------------------------------------------
nearest.neighbours(VObj300, "book", n=14) # reduced space
## -----------------------------------------------------------------------------
nn <- nearest.neighbours(VObj, "book", n=15) # unreduced space
names(nn)
## ---- echo=c(1,3), fig.height=4-----------------------------------------------
nn.mat <- nearest.neighbours(VObj300, "book", n=15, dist.matrix=TRUE)
par(mar=c(1,1,1,1))
plot(nn.mat)
## ----echo=FALSE---------------------------------------------------------------
knitr::kable(RG65[seq(5, 65, 10), ])
## -----------------------------------------------------------------------------
eval.similarity.correlation(RG65, VObj300, convert=FALSE, format="HW")
## ---- echo=2------------------------------------------------------------------
par(mar=c(4,4,2,1))
plot(eval.similarity.correlation(RG65, VObj300, convert=FALSE, format="HW", details=TRUE))
## ---- echo=2------------------------------------------------------------------
par(mar=c(4,4,2,1))
plot(eval.similarity.correlation(RG65, DSM_Vectors, convert=FALSE, details=TRUE))
## -----------------------------------------------------------------------------
Vessel <- subset(SemCorWSD, target == "vessel" & pos == "n")
table(Vessel$gloss)
## ---- echo=FALSE--------------------------------------------------------------
knitr::kable(Vessel[, c("sense", "sentence")], row.names=FALSE)
## -----------------------------------------------------------------------------
centroids <- context.vectors(DSM_Vectors, Vessel$lemma, row.names=Vessel$id)
## ---- echo=2:4----------------------------------------------------------------
par(mar=c(2, 2, 2, 1))
library(cluster) # clustering algorithms of Kaufman & Rousseeuw (1990)
res <- pam(dist.matrix(centroids), 2, diss=TRUE, keep.diss=TRUE)
plot(res, col.p=factor(Vessel$sense), shade=TRUE, which=1, main="WSD for 'vessel'")
## ---- echo=1, eval=2----------------------------------------------------------
table(res$clustering, Vessel$sense)
knitr::kable(table(res$clustering, Vessel$sense))
## -----------------------------------------------------------------------------
eval.clustering(Vessel, M=centroids, word.name="id", class.name="sense")
## -----------------------------------------------------------------------------
mouse <- VObj300["mouse", ] # extract row vectors from matrix
computer <- VObj300["computer", ]
## -----------------------------------------------------------------------------
nearest.neighbours(VObj300, "mouse", n=12)
## -----------------------------------------------------------------------------
nearest.neighbours(VObj300, mouse + computer, n=12)
## -----------------------------------------------------------------------------
nearest.neighbours(VObj300, mouse * computer, n=12)
|
/scratch/gouwar.j/cran-all/cranData/wordspace/inst/doc/wordspace-intro.R
|
---
title: "Distributional Semantics in R with the 'wordspace' Package"
author: "Stefan Evert"
date: "1 April 2016"
output:
rmarkdown::html_vignette:
fig_width: 6
fig_height: 4
pdf_document: null
vignette: >
%\VignetteIndexEntry{Introduction to Wordspace}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
Distributional semantic models (DSMs) represent the meaning of a target term (which can be a word form, lemma, morpheme, word pair, etc.) in the form of a feature vector that records either co-occurrence frequencies of the target term with a set of feature terms (_term-term model_) or its distribution across textual units (_term-context model_). Such DSMs have become an indispensable ingredient in many NLP applications that require flexible broad-coverage lexical semantics.
Distributional modelling is an empirical science. DSM representations are determined by a wide range of parameters such as size and type of the co-occurrence context, feature selection, weighting of co-occurrence frequencies (often with statistical association measures), distance metric, dimensionality reduction method and the number of latent dimensions used. Despite recent efforts to carry out systematic evaluation studies, the precise effects of these parameters and their relevance for different application settings are still poorly understood.
The **wordspace** package aims to provide a flexible, powerful and easy to use "interactive laboratory" that enables its users to build DSMs and experiment with them, but that also scales up to the large models required by real-life applications.
Further background information and references can be found in:
> Evert, Stefan (2014). Distributional semantics in R with the wordspace package.
> In _Proceedings of COLING 2014, the 25th International Conference on Computational Linguistics: System Demonstrations_, pages 110--114, Dublin, Ireland.
Before continuing with this tutorial, load the package with
```{r message=FALSE}
library(wordspace)
```
## Input formats
The most general representation of a distributional model takes the form of a sparse matrix, with entries specified as a triplet of row label (_target term_), column label (_feature term_) and co-occurrence frequency. A sample of such a table is included in the package under the name `DSM_VerbNounTriples_BNC`, listing syntactic verb-noun co-occurrences in the British National Corpus:
```{r echo=FALSE}
set.seed(42)
idx <- sort(sample.int(nrow(DSM_VerbNounTriples_BNC), 10))
knitr::kable(DSM_VerbNounTriples_BNC[idx, ])
```
The `wordspace` package creates DSM objects from such triplet representations, which can easily be imported into R from a wide range of file and database formats. Ready-made import functions are provided for TAB-delimited text files (as used by [DISSECT](https://github.com/composes-toolkit/dissect)), which may be compressed to save disk space, and for term-document models created by the text-mining package `tm`.
The native input format is a pre-compiled sparse matrix representation generated by the [UCS toolkit](http://www.collocations.de/software.html). In this way, UCS serves as a hub for the preparation of co-occurrence data, which can be collected from dependency pairs, extracted from a corpus indexed with the [IMS Corpus Workbench](https://cwb.sourceforge.io/) or imported from various other formats.
## Creating a DSM
The first step in the creation of a distributional semantic model is the compilation of a co-occurrence matrix. Let us illustrate the procedure for verb-noun co-occurrences from the written part of the British National Corpus. First, we extract relevant rows from the table above.
```{r}
Triples <- subset(DSM_VerbNounTriples_BNC, mode == "written")
```
Note that many verb-noun pairs such as _(walk, dog)_ still have multiple entries in `Triples`: _dog_ can appear either as the subject or as the object of _walk_.
```{r}
subset(Triples, noun == "dog" & verb == "walk")
```
There are two ways of dealing with such cases: we can either add up the frequency counts (a _dependency-filtered model_) or treat "dog-as-subject" and "dog-as-object" as two different terms (a _dependency-structured model_). We opt for a dependency-filtered model in this example -- can you work out how to compile the corresponding dependency-structured DSM in R, either for verbs of for nouns as target terms?
The `dsm` constructor function expects three vectors of the same length, containing row label (target term), column label (feature term) and co-occurrence count (or pre-weighted score) for each nonzero cell of the co-occurrence matrix. In our example, we use nouns as targets and verbs as features. Note the option `raw.freq=TRUE` to indicate that the matrix contains raw frequency counts.
```{r}
VObj <- dsm(target=Triples$noun, feature=Triples$verb, score=Triples$f, raw.freq=TRUE)
dim(VObj)
```
The constructor automatically computes marginal frequencies for the target and feature terms by summing over rows and columns of the matrix respectively. The information is collected in data frames `VObj$rows` and `VObj$cols`, together with the number of nonzero elements in each row and column:
```{r}
subset(VObj$rows, rank(-f) <= 6) # 6 most frequent nouns
```
This way of computing marginal frequencies is appropriate for syntactic co-occurrence and term-document models. In the case of surface co-occurrence based on token spans, the correct marginal frequencies have to be provided separately in the `rowinfo=` and `colinfo=` arguments (see `?dsm` for details).
The actual co-occurrence matrix is stored in `VObj$M`. Since it is too large to display on screen, we extract the top left corner with the `head` method for DSM objects. Note that you can also use `head(VObj, Inf)` to extract the full matrix.
```{r}
head(VObj)
```
## The DSM parameters
Rows and columns with few nonzero cells provide unreliable semantic information and can lead to numerical problems (e.g. because a sparse association score deletes the remaining nonzero entries). It is therefore common to apply frequency thresholds both on rows and columns, here in the form of requiring at least 3 nonzero cells. The option `recursive=TRUE` guarantees that both criteria are satisfied by the final DSM when rows and columns are filtered at the same time (see the examples in `?subset.dsm` for an illustration).
```{r}
VObj <- subset(VObj, nnzero >= 3, nnzero >= 3, recursive=TRUE)
dim(VObj)
```
If you want to filter _only_ columns or rows, you can pass the constraint as a named argument: `subset=(nnzero >= 3)` for rows and `select=(nnzero >= 3)` for columns.
The next step is to weight co-occurrence frequency counts. Here, we use the _simple log-likelihood_ association measure with an additional logarithmic transformation, which has shown good results in evaluation studies. The `wordspace` package computes _sparse_ (or "positive") versions of all association measures by default, setting negative associations to zero. This guarantees that the sparseness of the co-occurrence matrix is preserved. We also normalize the weighted row vectors to unit Euclidean length (`normalize=TRUE`).
```{r}
VObj <- dsm.score(VObj, score="simple-ll", transform="log", normalize=TRUE, method="euclidean")
```
Printing a DSM object shows information about the dimensions of the co-occurrence matrix and whether it has already been scored. Note that the scored matrix does not replace the original co-occurrence counts, so `dsm.score` can be executed again at any time with different parameters.
```{r}
VObj
```
Most distributional models apply a dimensionality reduction technique to make data sets more manageable and to refine the semantic representations. A widely-used technique is singular value decomposition (SVD). Since `VObj` is a sparse matrix, `dsm.projection` automatically applies an efficient algorithm from the `sparsesvd` package.
```{r}
VObj300 <- dsm.projection(VObj, method="svd", n=300)
dim(VObj300)
```
`VObj300` is a dense matrix with 300 columns, giving the coordinates of the target terms in 300 latent dimensions. Its attribute `"R2"` shows what proportion of information from the original matrix is captured by each latent dimension.
```{r, fig.width=7, fig.height=3, echo=2}
par(mar=c(4,4,1,1))
plot(attr(VObj300, "R2"), type="h", xlab="latent dimension", ylab="R2")
```
## Using DSM representations
The primary goal of a DSM is to determine "semantic" distances between pairs of words. The arguments to `pair.distances` can also be parallel vectors in order to compute distances for a large number of word pairs efficiently.
```{r}
pair.distances("book", "paper", VObj300, method="cosine")
```
By default, the function converts similarity measures into an equivalent distance metric -- the angle between vectors in the case of cosine similarity. If you want the actual similarity values, specify `convert=FALSE`:
```{r}
pair.distances("book", "paper", VObj300, method="cosine", convert=FALSE)
```
We are often interested in finding the nearest neighbours of a given term in the DSM space:
```{r}
nearest.neighbours(VObj300, "book", n=14) # reduced space
```
The return value is actually a vector of distances to the nearest neighbours, labelled with the corresponding terms. Here is how you obtain the actual neighbour terms:
```{r}
nn <- nearest.neighbours(VObj, "book", n=15) # unreduced space
names(nn)
```
The neighbourhood plot visualizes nearest neighbours as a semantic network based on their mutual distances. This often helps interpretation by grouping related neighbours. The network below shows that _book_ as a text type is similar to _novel_, _essay_, _poem_ and _article_; as a form of document it is similar to _paper_, _letter_ and _document_; and as a publication it is similar to _leaflet_, _magazine_ and _newspaper_.
```{r, echo=c(1,3), fig.height=4}
nn.mat <- nearest.neighbours(VObj300, "book", n=15, dist.matrix=TRUE)
par(mar=c(1,1,1,1))
plot(nn.mat)
```
A straightforward way to evaluat distributional representations is to compare them with human judgements of the semantic similarity between word pairs. The `wordspace` package includes to well-known data sets of this type: Rubenstein-Goodenough (`RG65`) and `WordSim353` (a superset of `RG65` with judgements from new test subjects).
```{r echo=FALSE}
knitr::kable(RG65[seq(5, 65, 10), ])
```
There is also a ready-made evaluation function, which computes Pearson and rank correlation between the DSM distances and human subjects. The option `format="HW"` adjusts the POS-disambiguated notation for terms in the data set (e.g. `book_N`) to the format used by our distributional model (`book`).
```{r}
eval.similarity.correlation(RG65, VObj300, convert=FALSE, format="HW")
```
Evaluation results can also be visualized in the form of a scatterplot with a trend line.
```{r, echo=2}
par(mar=c(4,4,2,1))
plot(eval.similarity.correlation(RG65, VObj300, convert=FALSE, format="HW", details=TRUE))
```
The rank correlation of 0.308 is very poor, mostly due to the small amount of data on which our DSM is based. Much better results are obtained with pre-compiled DSM vectors from large Web corpus, which are also included in the package. Note that target terms are given in a different format there (which corresponds to the format in `RG65`).
```{r, echo=2}
par(mar=c(4,4,2,1))
plot(eval.similarity.correlation(RG65, DSM_Vectors, convert=FALSE, details=TRUE))
```
## Advanced techniques
SchΓΌtze (1998) used DSM representations for word sense disambiguation (or, more precisely, word sense induction) based on a clustering of the sentence contexts of an ambiguous word. The `wordspace` package includes a small data set with such contexts for a selection of English words. Let us look at the noun _vessel_ as an example, which has two main senses ("ship" and "blood vessel"):
```{r}
Vessel <- subset(SemCorWSD, target == "vessel" & pos == "n")
table(Vessel$gloss)
```
Sentence contexts are given as tokenized strings (`$sentence`), in lemmatized form (`$hw`) and as lemmas annotated with part-of-speech codes (`$lemma`). Choose the version that matches the representation of target terms in your DSM.
```{r, echo=FALSE}
knitr::kable(Vessel[, c("sense", "sentence")], row.names=FALSE)
```
Following SchΓΌtze, each context is represented by a centroid vector obtained by averaging over the DSM vectors of all context words.
```{r}
centroids <- context.vectors(DSM_Vectors, Vessel$lemma, row.names=Vessel$id)
```
This returns a matrix of centroid vectors for the 12 sentence contexts of _vessel_ in the data set. The vectors can now be clustered and analyzed using standard R functions. Partitioning around medoids (PAM) has shown good and robust performance in evaluation studies.
```{r, echo=2:4}
par(mar=c(2, 2, 2, 1))
library(cluster) # clustering algorithms of Kaufman & Rousseeuw (1990)
res <- pam(dist.matrix(centroids), 2, diss=TRUE, keep.diss=TRUE)
plot(res, col.p=factor(Vessel$sense), shade=TRUE, which=1, main="WSD for 'vessel'")
```
Colours in the plot above indicate the gold standard sense of each instance of _vessel_. A confusion matrix confirms perfect clustering of the two senses:
```{r, echo=1, eval=2}
table(res$clustering, Vessel$sense)
knitr::kable(table(res$clustering, Vessel$sense))
```
We can also use a pre-defined function for the evaluation of clustering tasks, which is convenient but does not produce a visualization of the clusters. Note that the "target terms" of the task must correspond to the row labels of the centroid matrix, which we have set to sentence IDs (`Vessel$id`) above.
```{r}
eval.clustering(Vessel, M=centroids, word.name="id", class.name="sense")
```
As a final example, let us look at a simple approach to compositional distributional semantics, which computes the compositional meaning of two words as the element-wise sum or product of their DSM vectors.
```{r}
mouse <- VObj300["mouse", ] # extract row vectors from matrix
computer <- VObj300["computer", ]
```
The nearest neighbours of mouse are problematic, presumably because the type vector represents a mixture of the two senses that is not close to either meaning in the semantic space.
```{r}
nearest.neighbours(VObj300, "mouse", n=12)
```
By adding the vectors of _mouse_ and _computer_, we obtain neighbours that seem to fit the "computer mouse" sense very well:
```{r}
nearest.neighbours(VObj300, mouse + computer, n=12)
```
Note that the target is specified as a distributional vector rather than a term in this case. Observations from the recent literature suggest that element-wise multiplication is not compatible with non-sparse SVD-reduced DSMs, so it is not surprising to find completely unrelated nearest neighbours in our example:
```{r}
nearest.neighbours(VObj300, mouse * computer, n=12)
```
|
/scratch/gouwar.j/cran-all/cranData/wordspace/inst/doc/wordspace-intro.Rmd
|
---
title: "Distributional Semantics in R with the 'wordspace' Package"
author: "Stefan Evert"
date: "1 April 2016"
output:
rmarkdown::html_vignette:
fig_width: 6
fig_height: 4
pdf_document: null
vignette: >
%\VignetteIndexEntry{Introduction to Wordspace}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
Distributional semantic models (DSMs) represent the meaning of a target term (which can be a word form, lemma, morpheme, word pair, etc.) in the form of a feature vector that records either co-occurrence frequencies of the target term with a set of feature terms (_term-term model_) or its distribution across textual units (_term-context model_). Such DSMs have become an indispensable ingredient in many NLP applications that require flexible broad-coverage lexical semantics.
Distributional modelling is an empirical science. DSM representations are determined by a wide range of parameters such as size and type of the co-occurrence context, feature selection, weighting of co-occurrence frequencies (often with statistical association measures), distance metric, dimensionality reduction method and the number of latent dimensions used. Despite recent efforts to carry out systematic evaluation studies, the precise effects of these parameters and their relevance for different application settings are still poorly understood.
The **wordspace** package aims to provide a flexible, powerful and easy to use "interactive laboratory" that enables its users to build DSMs and experiment with them, but that also scales up to the large models required by real-life applications.
Further background information and references can be found in:
> Evert, Stefan (2014). Distributional semantics in R with the wordspace package.
> In _Proceedings of COLING 2014, the 25th International Conference on Computational Linguistics: System Demonstrations_, pages 110--114, Dublin, Ireland.
Before continuing with this tutorial, load the package with
```{r message=FALSE}
library(wordspace)
```
## Input formats
The most general representation of a distributional model takes the form of a sparse matrix, with entries specified as a triplet of row label (_target term_), column label (_feature term_) and co-occurrence frequency. A sample of such a table is included in the package under the name `DSM_VerbNounTriples_BNC`, listing syntactic verb-noun co-occurrences in the British National Corpus:
```{r echo=FALSE}
set.seed(42)
idx <- sort(sample.int(nrow(DSM_VerbNounTriples_BNC), 10))
knitr::kable(DSM_VerbNounTriples_BNC[idx, ])
```
The `wordspace` package creates DSM objects from such triplet representations, which can easily be imported into R from a wide range of file and database formats. Ready-made import functions are provided for TAB-delimited text files (as used by [DISSECT](https://github.com/composes-toolkit/dissect)), which may be compressed to save disk space, and for term-document models created by the text-mining package `tm`.
The native input format is a pre-compiled sparse matrix representation generated by the [UCS toolkit](http://www.collocations.de/software.html). In this way, UCS serves as a hub for the preparation of co-occurrence data, which can be collected from dependency pairs, extracted from a corpus indexed with the [IMS Corpus Workbench](https://cwb.sourceforge.io/) or imported from various other formats.
## Creating a DSM
The first step in the creation of a distributional semantic model is the compilation of a co-occurrence matrix. Let us illustrate the procedure for verb-noun co-occurrences from the written part of the British National Corpus. First, we extract relevant rows from the table above.
```{r}
Triples <- subset(DSM_VerbNounTriples_BNC, mode == "written")
```
Note that many verb-noun pairs such as _(walk, dog)_ still have multiple entries in `Triples`: _dog_ can appear either as the subject or as the object of _walk_.
```{r}
subset(Triples, noun == "dog" & verb == "walk")
```
There are two ways of dealing with such cases: we can either add up the frequency counts (a _dependency-filtered model_) or treat "dog-as-subject" and "dog-as-object" as two different terms (a _dependency-structured model_). We opt for a dependency-filtered model in this example -- can you work out how to compile the corresponding dependency-structured DSM in R, either for verbs of for nouns as target terms?
The `dsm` constructor function expects three vectors of the same length, containing row label (target term), column label (feature term) and co-occurrence count (or pre-weighted score) for each nonzero cell of the co-occurrence matrix. In our example, we use nouns as targets and verbs as features. Note the option `raw.freq=TRUE` to indicate that the matrix contains raw frequency counts.
```{r}
VObj <- dsm(target=Triples$noun, feature=Triples$verb, score=Triples$f, raw.freq=TRUE)
dim(VObj)
```
The constructor automatically computes marginal frequencies for the target and feature terms by summing over rows and columns of the matrix respectively. The information is collected in data frames `VObj$rows` and `VObj$cols`, together with the number of nonzero elements in each row and column:
```{r}
subset(VObj$rows, rank(-f) <= 6) # 6 most frequent nouns
```
This way of computing marginal frequencies is appropriate for syntactic co-occurrence and term-document models. In the case of surface co-occurrence based on token spans, the correct marginal frequencies have to be provided separately in the `rowinfo=` and `colinfo=` arguments (see `?dsm` for details).
The actual co-occurrence matrix is stored in `VObj$M`. Since it is too large to display on screen, we extract the top left corner with the `head` method for DSM objects. Note that you can also use `head(VObj, Inf)` to extract the full matrix.
```{r}
head(VObj)
```
## The DSM parameters
Rows and columns with few nonzero cells provide unreliable semantic information and can lead to numerical problems (e.g. because a sparse association score deletes the remaining nonzero entries). It is therefore common to apply frequency thresholds both on rows and columns, here in the form of requiring at least 3 nonzero cells. The option `recursive=TRUE` guarantees that both criteria are satisfied by the final DSM when rows and columns are filtered at the same time (see the examples in `?subset.dsm` for an illustration).
```{r}
VObj <- subset(VObj, nnzero >= 3, nnzero >= 3, recursive=TRUE)
dim(VObj)
```
If you want to filter _only_ columns or rows, you can pass the constraint as a named argument: `subset=(nnzero >= 3)` for rows and `select=(nnzero >= 3)` for columns.
The next step is to weight co-occurrence frequency counts. Here, we use the _simple log-likelihood_ association measure with an additional logarithmic transformation, which has shown good results in evaluation studies. The `wordspace` package computes _sparse_ (or "positive") versions of all association measures by default, setting negative associations to zero. This guarantees that the sparseness of the co-occurrence matrix is preserved. We also normalize the weighted row vectors to unit Euclidean length (`normalize=TRUE`).
```{r}
VObj <- dsm.score(VObj, score="simple-ll", transform="log", normalize=TRUE, method="euclidean")
```
Printing a DSM object shows information about the dimensions of the co-occurrence matrix and whether it has already been scored. Note that the scored matrix does not replace the original co-occurrence counts, so `dsm.score` can be executed again at any time with different parameters.
```{r}
VObj
```
Most distributional models apply a dimensionality reduction technique to make data sets more manageable and to refine the semantic representations. A widely-used technique is singular value decomposition (SVD). Since `VObj` is a sparse matrix, `dsm.projection` automatically applies an efficient algorithm from the `sparsesvd` package.
```{r}
VObj300 <- dsm.projection(VObj, method="svd", n=300)
dim(VObj300)
```
`VObj300` is a dense matrix with 300 columns, giving the coordinates of the target terms in 300 latent dimensions. Its attribute `"R2"` shows what proportion of information from the original matrix is captured by each latent dimension.
```{r, fig.width=7, fig.height=3, echo=2}
par(mar=c(4,4,1,1))
plot(attr(VObj300, "R2"), type="h", xlab="latent dimension", ylab="R2")
```
## Using DSM representations
The primary goal of a DSM is to determine "semantic" distances between pairs of words. The arguments to `pair.distances` can also be parallel vectors in order to compute distances for a large number of word pairs efficiently.
```{r}
pair.distances("book", "paper", VObj300, method="cosine")
```
By default, the function converts similarity measures into an equivalent distance metric -- the angle between vectors in the case of cosine similarity. If you want the actual similarity values, specify `convert=FALSE`:
```{r}
pair.distances("book", "paper", VObj300, method="cosine", convert=FALSE)
```
We are often interested in finding the nearest neighbours of a given term in the DSM space:
```{r}
nearest.neighbours(VObj300, "book", n=14) # reduced space
```
The return value is actually a vector of distances to the nearest neighbours, labelled with the corresponding terms. Here is how you obtain the actual neighbour terms:
```{r}
nn <- nearest.neighbours(VObj, "book", n=15) # unreduced space
names(nn)
```
The neighbourhood plot visualizes nearest neighbours as a semantic network based on their mutual distances. This often helps interpretation by grouping related neighbours. The network below shows that _book_ as a text type is similar to _novel_, _essay_, _poem_ and _article_; as a form of document it is similar to _paper_, _letter_ and _document_; and as a publication it is similar to _leaflet_, _magazine_ and _newspaper_.
```{r, echo=c(1,3), fig.height=4}
nn.mat <- nearest.neighbours(VObj300, "book", n=15, dist.matrix=TRUE)
par(mar=c(1,1,1,1))
plot(nn.mat)
```
A straightforward way to evaluat distributional representations is to compare them with human judgements of the semantic similarity between word pairs. The `wordspace` package includes to well-known data sets of this type: Rubenstein-Goodenough (`RG65`) and `WordSim353` (a superset of `RG65` with judgements from new test subjects).
```{r echo=FALSE}
knitr::kable(RG65[seq(5, 65, 10), ])
```
There is also a ready-made evaluation function, which computes Pearson and rank correlation between the DSM distances and human subjects. The option `format="HW"` adjusts the POS-disambiguated notation for terms in the data set (e.g. `book_N`) to the format used by our distributional model (`book`).
```{r}
eval.similarity.correlation(RG65, VObj300, convert=FALSE, format="HW")
```
Evaluation results can also be visualized in the form of a scatterplot with a trend line.
```{r, echo=2}
par(mar=c(4,4,2,1))
plot(eval.similarity.correlation(RG65, VObj300, convert=FALSE, format="HW", details=TRUE))
```
The rank correlation of 0.308 is very poor, mostly due to the small amount of data on which our DSM is based. Much better results are obtained with pre-compiled DSM vectors from large Web corpus, which are also included in the package. Note that target terms are given in a different format there (which corresponds to the format in `RG65`).
```{r, echo=2}
par(mar=c(4,4,2,1))
plot(eval.similarity.correlation(RG65, DSM_Vectors, convert=FALSE, details=TRUE))
```
## Advanced techniques
SchΓΌtze (1998) used DSM representations for word sense disambiguation (or, more precisely, word sense induction) based on a clustering of the sentence contexts of an ambiguous word. The `wordspace` package includes a small data set with such contexts for a selection of English words. Let us look at the noun _vessel_ as an example, which has two main senses ("ship" and "blood vessel"):
```{r}
Vessel <- subset(SemCorWSD, target == "vessel" & pos == "n")
table(Vessel$gloss)
```
Sentence contexts are given as tokenized strings (`$sentence`), in lemmatized form (`$hw`) and as lemmas annotated with part-of-speech codes (`$lemma`). Choose the version that matches the representation of target terms in your DSM.
```{r, echo=FALSE}
knitr::kable(Vessel[, c("sense", "sentence")], row.names=FALSE)
```
Following SchΓΌtze, each context is represented by a centroid vector obtained by averaging over the DSM vectors of all context words.
```{r}
centroids <- context.vectors(DSM_Vectors, Vessel$lemma, row.names=Vessel$id)
```
This returns a matrix of centroid vectors for the 12 sentence contexts of _vessel_ in the data set. The vectors can now be clustered and analyzed using standard R functions. Partitioning around medoids (PAM) has shown good and robust performance in evaluation studies.
```{r, echo=2:4}
par(mar=c(2, 2, 2, 1))
library(cluster) # clustering algorithms of Kaufman & Rousseeuw (1990)
res <- pam(dist.matrix(centroids), 2, diss=TRUE, keep.diss=TRUE)
plot(res, col.p=factor(Vessel$sense), shade=TRUE, which=1, main="WSD for 'vessel'")
```
Colours in the plot above indicate the gold standard sense of each instance of _vessel_. A confusion matrix confirms perfect clustering of the two senses:
```{r, echo=1, eval=2}
table(res$clustering, Vessel$sense)
knitr::kable(table(res$clustering, Vessel$sense))
```
We can also use a pre-defined function for the evaluation of clustering tasks, which is convenient but does not produce a visualization of the clusters. Note that the "target terms" of the task must correspond to the row labels of the centroid matrix, which we have set to sentence IDs (`Vessel$id`) above.
```{r}
eval.clustering(Vessel, M=centroids, word.name="id", class.name="sense")
```
As a final example, let us look at a simple approach to compositional distributional semantics, which computes the compositional meaning of two words as the element-wise sum or product of their DSM vectors.
```{r}
mouse <- VObj300["mouse", ] # extract row vectors from matrix
computer <- VObj300["computer", ]
```
The nearest neighbours of mouse are problematic, presumably because the type vector represents a mixture of the two senses that is not close to either meaning in the semantic space.
```{r}
nearest.neighbours(VObj300, "mouse", n=12)
```
By adding the vectors of _mouse_ and _computer_, we obtain neighbours that seem to fit the "computer mouse" sense very well:
```{r}
nearest.neighbours(VObj300, mouse + computer, n=12)
```
Note that the target is specified as a distributional vector rather than a term in this case. Observations from the recent literature suggest that element-wise multiplication is not compatible with non-sparse SVD-reduced DSMs, so it is not surprising to find completely unrelated nearest neighbours in our example:
```{r}
nearest.neighbours(VObj300, mouse * computer, n=12)
```
|
/scratch/gouwar.j/cran-all/cranData/wordspace/vignettes/wordspace-intro.Rmd
|
# Process the files passed as input to workflowr functions.
#
# allow_null - Allow passing files=NULL
# files_only - Throw an error if a path to a directory is included
# rmd_only - Only allow files extensions Rmd or rmd
# must_exist - Paths must exist on filesystem
# convert_to_relative_paths - Return paths relative to working directory
# expland_glob - Pass through function glob()
process_input_files <- function(files,
allow_null = FALSE,
files_only = TRUE,
rmd_only = FALSE,
must_exist = TRUE,
convert_to_relative_paths = FALSE,
expand_glob = TRUE) {
if (allow_null && is_null(files)) {
return(NULL)
}
assert_not_null(files)
assert_is_character(files)
assert_has_length(files, required_length = 1, comparison = "greater than or equal to")
if (files_only) {
if (any(fs::dir_exists(files))) {
stop("files cannot include a path to a directory")
}
}
if (expand_glob) {
files <- glob(files)
}
if (must_exist) {
if (!all(fs::file_exists(files)))
stop("Not all files exist. Check the paths to the files")
}
if (convert_to_relative_paths) {
files <- relative(files)
}
if (rmd_only) {
assert_is_rmd(files)
}
return(files)
}
assert_is_rmd <- function(argument, env = environment()) {
if (!is_rmd(argument)) {
argument_name <- deparse(substitute(argument, env = env))
expected <- "Only files with extension Rmd or rmd"
observed <- deparse(argument)
stop_for_assert(argument_name, expected, observed)
}
}
assert_is_flag <- function(argument, env = environment()) {
assert_not_null(argument, env = env)
assert_not_na(argument, env = env)
assert_is_logical(argument, env = env)
assert_has_length(argument, 1, env = env)
}
assert_is_single_directory <- function(argument, env = environment()) {
assert_not_null(argument, env = env)
assert_not_na(argument, env = env)
assert_is_character(argument, env = env)
assert_has_length(argument, 1, env = env)
assert_is_directory(argument, env = env)
}
assert_not_null <- function(argument, env = environment()) {
if (is_null(argument)) {
argument_name <- deparse(substitute(argument, env = env))
expected <- "not NULL"
observed <- "NULL"
stop_for_assert(argument_name, expected, observed)
}
}
assert_not_na <- function(argument, env = environment()) {
if (is_na(argument)) {
argument_name <- deparse(substitute(argument, env = env))
expected <- "not NA"
observed <- "NA"
stop_for_assert(argument_name, expected, observed)
}
}
assert_is_logical <- function(argument, env = environment()) {
if (!is_logical(argument)) {
argument_name <- deparse(substitute(argument, env = env))
expected <- "logical vector"
observed <- deparse(argument)
stop_for_assert(argument_name, expected, observed)
}
}
assert_is_character <- function(argument, env = environment()) {
if (!is_character(argument)) {
argument_name <- deparse(substitute(argument, env = env))
expected <- "character vector"
observed <- deparse(argument)
stop_for_assert(argument_name, expected, observed)
}
}
assert_has_length <- function(argument, required_length,
comparison = c("equal to",
"greater than",
"greater than or equal to",
"less than",
"less than or equal to"),
env = environment()) {
comparison <- match.arg(comparison)
if (!has_length(argument, required_length, comparison)) {
argument_name <- deparse(substitute(argument, env = env))
expected <- paste("vector with length", comparison, required_length)
observed <- paste("vector with length", length(argument))
stop_for_assert(argument_name, expected, observed)
}
}
assert_is_directory <- function(argument, env = environment()) {
if (!is_directory(argument)) {
argument_name <- deparse(substitute(argument, env = env))
expected <- "directory"
observed <- deparse(argument)
stop_for_assert(argument_name, expected, observed)
}
}
stop_for_assert <- function(argument_name, expected, observed) {
stop("Invalid input for argument ", argument_name,
"\nExpected input: ", expected,
"\nObserved input: ", observed,
call. = FALSE)
}
is_null <- function(argument) {
is.null(argument)
}
is_na <- function(argument) {
anyNA(argument)
}
is_logical <- function(argument) {
is.logical(argument)
}
is_character <- function(argument) {
is.character(argument)
}
is_directory <- function(argument) {
all(fs::dir_exists(argument))
}
is_rmd <- function(argument) {
extensions <- fs::path_ext(argument)
all(stringr::str_detect(extensions, "^[Rr]md$"))
}
has_length <- function(argument, required_length, comparison) {
switch(comparison,
`equal to` = length(argument) == required_length,
`greater than` = length(argument) > required_length,
`greater than or equal to` = length(argument) >= required_length,
`less than` = length(argument) < required_length,
`less than or equal to` = length(argument) <= required_length)
}
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/assertions.R
|
#' Extract a commit from a Git repository
#'
#' \code{extract_commit} extracts the 7-digit SHA1 identifier and message for a
#' specified commit.
#'
#' @param path character. Specify the path to a directory that is a Git
#' repository (or any subdirectory of the Git repository).
#' @param num numeric. The number of the commit to extract in reverse
#' chronological order. In other words, 1 is the most recent commit, 2 is the
#' second most recent commit, etc.
#'
#' @return A list with the named elements \code{sha1} and \code{message} (both
#' characters). If a Git repository is not found at \code{path}, both are
#' \code{NA}.
#'
#' @examples
#' \dontrun{
#' # Most recent commit
#' extract_commit(".", 1)
#' # Penultimate commit
#' extract_commit(".", 2)
#' }
#' @export
#' @keywords internal
extract_commit <- function(path, num) {
stopifnot(fs::file_exists(path),
is.numeric(num),
num == trunc(num),
num > 0)
path <- absolute(path)
if (!git2r::in_repository(path)) {
return(list(sha1 = "NA", message = "NA"))
}
repo <- git2r::repository(path, discover = TRUE)
git_log <- utils::capture.output(git2r::reflog(repo))
total_commits <- length(git_log)
if (total_commits == 0) {
return(list(sha1 = "NA", message = "NA"))
}
if (num > total_commits) {
stop(sprintf("Invalid search: %d. This repo only has %d commits.",
num, total_commits))
}
commit <- git_log[num]
sha1 <- substr(commit, 2, 8)
commit_message <- strsplit(commit, split = "commit: ")[[1]][2]
return(list(sha1 = sha1, message = commit_message))
}
# Check for user.name and user.email in .gitconfig
#
# path character. Path to repository
#
# If unable to find user.name and user.email, stops the program.
check_git_config <- function(path, custom_message = "this function") {
stopifnot(is.character(path))
# Only look for local configuration file if the directory exists and it is a
# Git repo
if (fs::dir_exists(path)) {
look_for_local <- git2r::in_repository(path)
} else {
look_for_local <- FALSE
}
# Determine if user.name and user.email are set
if (look_for_local) {
r <- git2r::repository(path, discover = TRUE)
git_config <- git2r::config(r)
config_email_set <- "user.email" %in% names(git_config$global) |
"user.email" %in% names(git_config$local)
config_name_set <- "user.name" %in% names(git_config$global) |
"user.name" %in% names(git_config$local)
} else {
git_config <- git2r::config()
config_email_set <- "user.email" %in% names(git_config$global)
config_name_set <- "user.name" %in% names(git_config$global)
}
if (config_email_set & config_name_set) {
return(invisible())
} else {
stop(wrap(
"You must set your user.name and user.email for Git first to be able to
run ", custom_message, ". To do this, run the following command in R,
replacing the arguments with your name and email address:\n\n
wflow_git_config(user.name = \"Your Name\", user.email = \"email@domain\")"),
call. = FALSE)
}
}
# Check for staged changes
#
# path character. Path to repository
#
# If staged changes are detected, stops the program.
check_staged_changes <- function(path, custom_message = "this function") {
stopifnot(is.character(path))
r <- git2r::repository(path, discover = TRUE)
git_status <- git2r::status(r)
if (length(git_status$staged) == 0) {
return(invisible())
} else {
# Format files
files_staged <- as.character(git_status$staged)
files_staged <- file.path(git2r::workdir(r), files_staged)
files_staged <- relative(files_staged)
files_staged <- utils::capture.output(dput(files_staged))
stop(wrap(
"The Git repository has staged changes. You must decide if you want to
commit these changes first before you run ", custom_message, ". To do
this, run the following command in R:\n\n wflow_git_commit(",
files_staged, ")"),
call. = FALSE)
}
}
# Obtain all the committed files in a Git repository at a given commit.
#
# repo - a git_repository object
#
# commit - NULL (default) or a git_commit object
#
# sysgit - character (default: `Sys.which("git")`) Path to system Git executable
# used to obtain committed files via `git ls-files`. Cannot be used
# with commit argument. To disable, set `git = ""`.
#
# The default is to use the head commit.
#
# Returns absolute paths.
get_committed_files <- function(repo, commit = NULL,
sysgit = getOption("workflowr.sysgit", default = "")) {
stopifnot(inherits(repo, "git_repository"))
stopifnot(is.null(commit) || inherits(commit, "git_commit"))
n_commits <- length(git2r::commits(repo))
if (n_commits == 0) {
return(NA)
}
# If Git is available and don't need a specific commit, use `git ls-files`
if (!is.null(sysgit) && !is.na(sysgit) && nchar(sysgit) > 0 && is.null(commit)) {
cmd <- sprintf("%s -C %s ls-files", shQuote(sysgit),
shQuote(git2r::workdir(repo)))
suppressWarnings(files <- system(cmd, intern = TRUE, ignore.stderr = TRUE))
# Using Git is supposed to be a convenient speed increase. If it fails for
# any reason (a failure adds an attribute "status"), just continue and use
# git2r/libgit2.
if (is.null(attr(files, which = "status", exact = TRUE))) {
files <- absolute(file.path(git2r::workdir(repo), files))
return(files)
}
}
if (is.null(commit)) {
commit <- git2r::lookup(repo, git2r::branch_target(git2r::repository_head(repo)))
}
tree <- git2r::tree(commit)
files <- ls_files(tree)
files <- absolute(file.path(git2r::workdir(repo), files))
return(files)
}
# List all files in a given "git_tree" object.
ls_files <- function (tree) {
tree_list <- as.list(tree)
tree_df <- as.data.frame(tree)
names(tree_list) <- tree_df$name
files <- tree_df$name[tree_df$type == "blob"]
dirs <- tree_df$name[tree_df$type == "tree"]
out <- files
# Recurisvely call ls_files on the "git_tree" objects corresponding to each
# subdirectory
for (dir in dirs) {
tree_next <- tree_list[[dir]]
out <- c(out, file.path(dir, ls_files(tree_next)))
}
return(out)
}
# Get the files that have been committed to the repository more recently than
# their corresponding HTML files.
#
# repo: git_repository object
# files: character vector of filenames
# outdir: directory with website files
# sysgit: path to system Git executable to run `git log -n 1` to obtain time of
# last commit
get_outdated_files <- function(repo, files, outdir = NULL,
sysgit = getOption("workflowr.sysgit", default = "")) {
if (length(files) == 0) return(files)
stopifnot(inherits(repo, "git_repository"))
ext <- tools::file_ext(files)
if (!all(grepl("[Rr]md", ext)))
stop("Only R Markdown files are accepted.")
# Corresponding HTML files
html <- to_html(files, outdir = outdir)
# For each source file, determine if it has been committed more recently than
# its corresponding HTML
out_of_date <- logical(length = length(files))
# If Git is available, use it to run `git log -n 1`
if (!is.null(sysgit) && !is.na(sysgit) && nchar(sysgit) > 0) {
last_commit_time <- last_commit_time_sysgit
} else {
last_commit_time <- last_commit_time_git2r
}
for (i in seq_along(files)) {
recent_source_time <- last_commit_time(repo, files[i], sysgit = sysgit)
recent_html_time <- last_commit_time(repo, html[i], sysgit = sysgit)
if (recent_source_time >= recent_html_time) {
out_of_date[i] <- TRUE
}
}
outdated <- files[out_of_date]
return(outdated)
}
last_commit_time_git2r <- function(repo, fname, ...) {
last_commit <- git2r::commits(repo, n = 1, path = fname)[[1]]
last_commit_time <- last_commit$author$when$time
return(last_commit_time)
}
last_commit_time_sysgit <- function(repo, fname, sysgit, ...) {
cmd <- sprintf("%s -C %s log -n 1 --date=raw --format=%%ad -- %s",
shQuote(sysgit), shQuote(git2r::workdir(repo)), shQuote(fname))
raw_git <- suppressWarnings(system(cmd, intern = TRUE, ignore.stderr = TRUE))
# If it fails for any reason, fall back on git2r
if (!is.null(attr(raw_git, which = "status", exact = TRUE))) {
return(last_commit_time_git2r(repo, fname))
}
unix_git <- stringr::str_split(raw_git, "\\s")[[1]][1]
return(as.numeric(unix_git))
}
# Obtain the files updated in a commit
#
# Obtain the files updated in a commit, similar to \code{git status --stat}, by
# running a diff between the trees pointed to by the commit and its parent
# commit.
#
# This only works for commits that have one parent commit. Thus it will fail for
# merge commits (two or more parents) or the initial root commit (zero parents).
# This uses `diff,git_tree`. See the source code at
# \url{https://github.com/ropensci/git2r/blob/89d916f17cb979b3cc21cbb5834755a2cf075f5f/R/diff.r#L314}
# and examples at
# \url{https://github.com/ropensci/git2r/blob/cb30b1dd5f8b57978101ea7b7dc26ae2c9eed38e/tests/diff.R#L88}.
#
# @seealso \code{\link{obtain_files_in_commit_root}}
#
# Returns absolute paths.
obtain_files_in_commit <- function(repo, commit) {
stopifnot(inherits(repo, "git_repository"),
inherits(commit, "git_commit"))
parent_commit <- git2r::parents(commit)
# 3 possibilities:
#
# 1. Root commit with 0 parents
# 2. Standard commit with 1 parent
# 3. Merge commit with 2+ parents (yes, it's possible to merge more than 2 branches!)
if (length(parent_commit) == 0) {
files <- obtain_files_in_commit_root(repo, commit)
} else if (length(parent_commit) == 1) {
git_diff <- base::diff(git2r::tree(commit),
git2r::tree(parent_commit[[1]]))
files <- sapply(git_diff$files,
function(x) x$new_file)
} else {
stop(sprintf("Cannot perform diff on commit %s because it has %d parents",
commit$sha, length(parent_commit)))
}
files <- absolute(file.path(git2r::workdir(repo), files))
return(files)
}
# Obtain the files updated in the root commit
#
# The files included in the root commit cannot be determined comparing two
# trees (which is how \code{\link{obtain_files_in_commit}} works). See
# \href{https://stackoverflow.com/questions/41433034/how-to-obtain-files-included-in-initial-commit-using-git2r-libgit2}{this
# Stack Overflow question} for details.
#
# This only works for the root commit, i.e. it must have no parents.
#
# @seealso \code{\link{obtain_files_in_commit}}
#
# Returns paths relative to Git root directory.
obtain_files_in_commit_root <- function(repo, commit) {
# Obtain the files in the root commit of a Git repository
stopifnot(inherits(repo, "git_repository"),
inherits(commit, "git_commit"),
length(git2r::parents(commit)) == 0)
entries <- as.data.frame(git2r::tree(commit))
files <- character()
while (nrow(entries) > 0) {
if (entries$type[1] == "blob") {
# If the entry is a blob, i.e. file:
# - record the name of the file
# - remove the entry
files <- c(files, entries$name[1])
entries <- entries[-1, ]
} else if (entries$type[1] == "tree") {
# If the entry is a tree, i.e. subdirectory:
# - lookup the entries for this tree
# - add the subdirectory to the name so that path is correct
# - remove the entry from beginning and add new entries to end of
# data.frame
new_tree_df <- as.data.frame(git2r::lookup(repo, entries$sha[1]))
new_tree_df$name <- file.path(entries$name[1], new_tree_df$name)
entries <- rbind(entries[-1, ], new_tree_df)
} else {
stop(sprintf("Unknown type %s found in commit %s",
entries$type[1], commit))
}
}
return(files)
}
# Stop if HEAD does not point to a branch
check_branch <- function(git_head) {
if (!git2r::is_branch(git_head)) {
m <-
"You are not currently on any branch. Instead you are in 'detached HEAD'
state. workflowr doesn't support such advanced Git options. If you
didn't mean to do this, try running `git checkout master` in the
Terminal. If you did mean to do this, please use Git directly from the
Terminal to push your commits."
stop(wrap(m), call. = FALSE)
}
}
# Check remote repository.
#
# If there are no remotes available, throw an error.
#
# If a remote is specified, confirm it exists.
#
# remote - character vector or NULL
# remote_avail - a named character vector of remote URLs
check_remote <- function(remote, remote_avail) {
if (!(is.null(remote) || is.character(remote)))
stop("remote must be NULL or character vector")
if (!is.character(remote_avail))
stop("remote_avail must be a character vector")
# If there are no remotes available, throw an error.
if (length(remote_avail) == 0) {
m <- "No remote repositories are available. Run ?wflow_git_remote to learn
how to configure this."
stop(wrap(m), call. = FALSE)
}
# Fail early if remote is specified but doesn't exist
if (!is.null(remote) && !(remote %in% names(remote_avail))) {
m <-
"The remote you specified is not one of the remotes available. Run
?wflow_git_remote to learn how to add this remote."
stop(wrap(m), call. = FALSE)
}
}
# Determine which remote and branch to push or pull.
#
# This function assumes error handling has already happened upstream.
#
# See the documentation for wflow_git_push or wflow_git_pull for the explanation
# of this function.
#
# Returns a list of length two.
determine_remote_and_branch <- function(repo, remote, branch) {
stopifnot(inherits(repo, "git_repository"))
git_head <- git2r::repository_head(repo)
tracking <- git2r::branch_get_upstream(git_head)
# If both remote and branch are NULL and the current branch is tracking a
# remote branch, use this remote and branch.
if (is.null(remote) && is.null(branch) && !is.null(tracking)) {
remote <- git2r::branch_remote_name(tracking)
branch <- stringr::str_split_fixed(tracking$name,
"/", n = 2)[, 2]
}
# If remote is NULL, take an educated guess at what the user would want.
if (is.null(remote)) {
remote <- guess_remote(repo)
}
# If branch is NULL, use the same name as the current branch.
if (is.null(branch)) {
branch <- git_head$name
}
return(list(remote = remote, branch = branch))
}
# Take an educated guess of which remote to use if the user didn't specify one
# and the current branch is not tracking a remote branch.
#
# 1. If there is only 1 remote available, use it.
# 2. If there are multiple remotes available and one is called "origin", use it.
# 3. If there are multiple remotes available and none is "origin", throw error.
guess_remote <- function(repo) {
stopifnot(inherits(repo, "git_repository"))
remotes <- git2r::remotes(repo)
if (length(remotes) == 1) {
guess <- remotes
} else if ("origin" %in% remotes) {
guess <- "origin"
} else {
m <-
"Unable to guess which remote repository to use. Please specify the
argument `remote`. To see all the remotes available, you can run
`wflow_git_remote()`."
stop(wrap(m), call. = FALSE)
}
return(guess)
}
# Send warning if the remote branch is not the same one as local branch (HEAD)
warn_branch_mismatch <- function(remote_branch, local_branch) {
if (!(is.character(remote_branch) && is.character(local_branch)))
stop("remote_branch and local_branch must be character vectors")
if (remote_branch != local_branch) {
m <- sprintf(
"The remote branch is \"%s\", but the current local branch is \"%s\".
This is a valid option, but it is non-conventional. Is this what you
intended?",
remote_branch, local_branch)
warning(wrap(m), call. = FALSE)
}
}
# Determine if using HTTPS or SSH protocol
#
# remote - the name or URL of a remote repository. Note: The upstream function
# wflow_git_push()/pull() no longer accept direct URLs to a remote repository.
# However, I'm leaving this functionality in this function since it doesn't hurt
# anything and could be potentially useful in the future.
#
# remote_avail - a named character vector of remote URLs
#
# Return either "https" or "ssh"
get_remote_protocol <- function(remote, remote_avail) {
if (!(is.character(remote) && is.character(remote_avail)))
stop("remote and remote_avail must be character vectors")
if (remote %in% names(remote_avail)) {
url <- remote_avail[remote]
} else {
url <- remote
}
if (stringr::str_sub(url, 1, 5) == "https") {
protocol <- "https"
} else if (stringr::str_sub(url, 1, 4) == "git@") {
protocol <- "ssh"
} else {
m <- "The URL to the remote repository is using an unknown protocol. It
should start with https if you are using your username and password
for authentication, or with git@ if you are using your SSH keys. If
you are trying to acheive something non-standard, please use Git
via the command line interface."
stop(wrap(m), call. = FALSE)
}
return(protocol)
}
# Authenticate with Git using either HTTPS or SSH
#
# protocol - either "https" or "ssh"
# username - username or NULL
# password - password or NULL
# dry_run - logical
authenticate_git <- function(protocol, username = NULL,
password = NULL, dry_run = FALSE) {
if (!protocol %in% c("https", "ssh"))
stop("protocol must be either \"https\" or \"ssh\"")
if (!(is.null(username) || (is.character(username) && length(username) == 1)))
stop("username must be NULL or a one-element character vector")
if (!(is.null(password) || (is.character(password) && length(password) == 1)))
stop("password must be NULL or a one-element character vector")
if (protocol == "https" && !dry_run) {
if (is.null(username)) {
if (interactive()) {
response <- ""
while (response == "") {
response <- readline("Please enter your username (Esc to cancel): ")
}
username <- response
} else {
m <-
"No username was specified. Either include the username in the
function call or run the command in an interactive R session to be
prompted to enter it."
stop(wrap(m), call. = FALSE)
}
}
if (is.null(password)) {
if (interactive()) {
password <- getPass::getPass("Please enter your password: ")
} else {
m <-
"No password was specified. Either include the password in the
function call (not recommended) or run the command in an interactive
R session to be prompted to enter it in a secure manner."
stop(wrap(m), call. = FALSE)
}
}
credentials <- git2r::cred_user_pass(username = username,
password = password)
} else {
# If dry run, credentials aren't needed.
#
# If using SSH, can't run cred_ssh_key() here if using a passphrase.
# credentials has to be entered as NULL when calling push or pull in order
# for it to work.
#
# https://github.com/hadley/devtools/issues/642#issuecomment-139357055
# https://github.com/ropensci/git2r/issues/284#issuecomment-306103004
credentials <- NULL
}
return(credentials)
}
# Throw error if Git repository is locked
check_git_lock <- function(r) {
stopifnot(inherits(r, "git_repository"))
index_lock <- file.path(git2r::workdir(r), ".git/index.lock")
if (fs::file_exists(index_lock)) {
stop(call. = FALSE, wrap(
"The Git repository is locked. This can happen if a Git command
previously crashed or if multiple Git commands were executed at the same
time. To fix this, you need to delete the file .git/index.lock. You can
do this by running the following in the R console:"),
"\n\n",
glue::glue("file.remove(\"{index_lock}\")")
)
}
}
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/git.R
|
# Utility functions to wrap git2r
# Wrapper for git2r::merge.git_repository().
#
# Needed because the returned values for class git_merge_result are inconsistent.
#
# https://github.com/ropensci/git2r/pull/321
# https://github.com/ropensci/git2r/issues/389
# https://github.com/ropensci/git2r/pull/391
#
# x - git_repository object
# b - character string of existing branch name
# fail - Passed to merge. From docs:
# > If a conflict occurs, exit immediately instead of attempting to
# continue resolving conflicts. Default is FALSE.
#
# See ?git2r::merge.git_repository for more details.
git2r_merge <- function(x, b, fail = FALSE) {
stopifnot(inherits(x, "git_repository"))
stopifnot(is.character(b))
stopifnot(is.logical(fail), length(fail) == 1)
m <- base::merge(x, b, fail = fail)
if (length(m$fast_forward) == 0) m$fast_forward <- FALSE
if (length(m$conflicts) == 0) m$conflicts <- FALSE
if (length(m$sha) == 0) m$sha <- NA_character_
return(m)
}
# Cover all edge cases by passing original paths, absolute paths, and paths that
# are relative to root of Git repo.
git2r_add <- function(r, files, force = FALSE) {
# Confirm that Git repository isn't locked
check_git_lock(r)
git2r::add(r, files, force = force)
git2r::add(r, absolute(files), force = force)
git2r::add(r, relative(files, start = git2r::workdir(r)), force = force)
}
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/git2r.R
|
# Infrastructure for workflowr projects.
# This file defines templates for use with various workflowr functions. They are
# inserted with `cat(glue::glue(x), file = fname)`, which removes the starting
# blank line and leaves a final blank line in the output file.
# wflow_start() ----------------------------------------------------------------
templates <- list(
.gitattributes = '
# Classify R Markdown files as R code for GitHub language statistics
# https://github.com/github-linguist/linguist/blob/master/docs/overrides.md
*.[Rr]md linguist-language=R
',
.gitignore = '
.Rproj.user
.Rhistory
.RData
.Ruserdata
.Rapp.history
.DS_Store
analysis/figure
analysis/*png
analysis/*html
analysis/*_cache
analysis/site_libs
',
.Rprofile = '
## This makes sure that R loads the workflowr package
## automatically, everytime the project is loaded
if (requireNamespace("workflowr", quietly = TRUE)) {{
message("Loading .Rprofile for the current workflowr project")
library("workflowr")
}} else {{
message("workflowr package not installed, please run install.packages(\\"workflowr\\") to use the workflowr functions")
}}
',
`_workflowr.yml` = '
# workflowr options
# Version {wflow_version}
# The seed to use for random number generation. See ?set.seed for details.
seed: {the_seed_to_set}
# The working directory to build the R Markdown files. The path is relative to
# _workflowr.yml. See ?rmarkdown::render for details.
knit_root_dir: "."
',
`analysis/_site.yml` = '
name: "{name}"
output_dir: ../docs
navbar:
title: "{name}"
left:
- text: Home
href: index.html
- text: About
href: about.html
- text: License
href: license.html
output:
workflowr::wflow_html:
toc: yes
toc_float: yes
theme: cosmo
highlight: textmate
',
`analysis/index.Rmd` = '
---
title: "Home"
site: workflowr::wflow_site
output:
workflowr::wflow_html:
toc: false
editor_options:
chunk_output_type: console
---
Welcome to my research website.
',
`analysis/about.Rmd` = '
---
title: "About"
output:
workflowr::wflow_html:
toc: false
editor_options:
chunk_output_type: console
---
Describe your project.
',
`analysis/license.Rmd` = '
---
title: "License"
output:
workflowr::wflow_html:
toc: false
editor_options:
chunk_output_type: console
---
What license are you using for your code? See [choosealicense.com][choose] for
help deciding. It\'s a convention to save a file `LICENSE` in the root of your
Git repo that contains the license text.
What license are you using for the written content on your site? It is
traditional to choose a [Creative Commons][cc] license for this type of content.
[choose]: https://choosealicense.com/
[cc]: https://creativecommons.org/choose/
How should others cite your work? It\'s a convention to save a file `CITATION`
in the root of your Git repo that contains the citation information.
',
`code/README.md` = '
# Code
Save command-line scripts and shared R code here.
',
`data/README.md` = '
# Data
Save raw data files here.
',
`output/README.md` = '
# Output
Save processed data files here.
',
README.md = '
# {name}
A [workflowr][] project.
[workflowr]: https://github.com/workflowr/workflowr
',
"Rproj" = '
Version: 1.0
RestoreWorkspace: No
SaveWorkspace: No
AlwaysSaveHistory: Yes
EnableCodeIndexing: Yes
UseSpacesForTab: Yes
NumSpacesForTab: 2
Encoding: UTF-8
RnwWeave: Sweave
LaTeX: pdfLaTeX
AutoAppendNewline: Yes
StripTrailingWhitespace: Yes
'
)
# wflow_html() -----------------------------------------------------------------
# These templates are used by wflow_html() to insert HTML before and after the
# document body.
includes <- list(
header = '
<link rel="icon" href="https://github.com/workflowr/workflowr-assets/raw/main/img/reproducible.png">
<!-- Add a small amount of space between sections. -->
<style type="text/css">
div.section {
padding-top: 12px;
}
</style>
',
footer = '
<!-- Adjust MathJax settings so that all math formulae are shown using
TeX fonts only; see
https://docs.mathjax.org/en/latest/web/configuration.html. This will make
the presentation more consistent at the cost of the webpage sometimes
taking slightly longer to load. Note that this only works because the
footer is added to webpages before the MathJax javascript. -->
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
"HTML-CSS": { availableFonts: ["TeX"] }
});
</script>
'
)
extras <- list(
disable_remote = '
#!/bin/bash
# This hook prevents pushing to a remote repository, which is ideal for a
# project containing confidential data. It was created by wflow_start() with the
# argument disable_remote = TRUE. If you decide you want to be able to push this
# repository, delete this file.
echo "This is a confidential project. Do not push the files to a remote server"
exit 1
'
)
# wflow_use_gitlab() -----------------------------------------------------------
gitlab <- list(`.gitlab-ci.yml` = '
pages:
stage: deploy
script:
- echo \'Nothing to do...\'
artifacts:
paths:
- public
only:
- master
')
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/infrastructure.R
|
create_report <- function(input, output_dir, has_code, opts) {
if (opts$suppress_report) {
return("")
}
input <- absolute(input)
input_dir <- dirname(input)
uses_git <- git2r::in_repository(input_dir)
if (uses_git) {
r <- git2r::repository(input_dir, discover = TRUE)
s <- git2r::status(r, ignored = TRUE)
} else {
r <- NULL
s <- NULL
}
# workflowr checks ------------------------------------------------------
checks <- list()
# Check R Markdown status
if (uses_git) {
checks$result_rmd <- check_rmd(input, r, s)
}
if (has_code) {
# Check environment
checks$result_environment <- check_environment()
# Check seed
checks$result_seed <- check_seed(opts$seed)
# Check sessioninfo
checks$result_sessioninfo <- check_sessioninfo(input, opts$sessioninfo)
# Check caching
checks$cache <- check_cache(input)
# Check for absolute paths
checks$paths <- check_paths(input, opts$knit_root_dir)
}
# Check version control
checks$result_vc <- check_vc(input, r, s, opts$github, output_dir = output_dir)
# Formatting checks -----------------------------------------------------
checks_formatted <- Map(format_check, checks)
checks_formatted_string <- paste(unlist(checks_formatted), collapse = "\n")
report_checks <- glue::glue('
<div class="panel-group" id="workflowr-checks">
{checks_formatted_string}
</div>
')
# Format `knit_root_dir` for display in report.
knit_root_print <- opts$knit_root_dir
# If it is part of a workflowr project, construct a path relative to the
# directory that contains the workflowr project directory.
p <- try(wflow_paths(error_git = FALSE, project = input_dir), silent = TRUE)
if (!inherits(p, "try-error")) {
if (fs::path_has_parent(knit_root_print, absolute(p$root))) {
knit_root_print <- fs::path_rel(knit_root_print,
start = dirname(absolute(p$root)))
}
} else {
# Otherwise, just replace the home directory with ~
knit_root_print <- stringr::str_replace(knit_root_print,
fs::path_home(),
"~")
}
# Add trailing slash
if (!stringr::str_detect(knit_root_print, "/$")) {
knit_root_print <- paste0(knit_root_print, "/")
}
# Version history --------------------------------------------------------
if (uses_git) {
versions <- get_versions(input, output_dir, r, opts$github)
report_versions <- versions
} else {
report_versions <-
"<p>This project is not being versioned with Git. To obtain the full
reproducibility benefits of using workflowr, please see
<code>?wflow_start</code>.</p>"
}
# Return -----------------------------------------------------------------
checks_passed <- vapply(checks, function(x) x$pass, FUN.VALUE = logical(1))
if (all(checks_passed)) {
symbol <- "glyphicon-ok text-success"
} else {
symbol <- "glyphicon-exclamation-sign text-danger"
}
report <- glue::glue('
<p>
<button type="button" class="btn btn-default btn-workflowr btn-workflowr-report"
data-toggle="collapse" data-target="#workflowr-report">
<span class="glyphicon glyphicon-list" aria-hidden="true"></span>
workflowr
<span class="glyphicon {symbol}" aria-hidden="true"></span>
</button>
</p>
<div id="workflowr-report" class="collapse">
<ul class="nav nav-tabs">
<li class="active"><a data-toggle="tab" href="#summary">Summary</a></li>
<li><a data-toggle="tab" href="#checks">
Checks <span class="glyphicon {symbol}" aria-hidden="true"></span>
</a></li>
<li><a data-toggle="tab" href="#versions">Past versions</a></li>
</ul>
<div class="tab-content">
<div id="summary" class="tab-pane fade in active">
<p><strong>Last updated:</strong> {Sys.Date()}</p>
<p><strong>Checks:</strong>
<span class="glyphicon glyphicon-ok text-success" aria-hidden="true"></span>
{sum(checks_passed)}
<span class="glyphicon glyphicon-exclamation-sign text-danger" aria-hidden="true"></span>
{sum(!checks_passed)}
</p>
<p><strong>Knit directory:</strong>
<code>{knit_root_print}</code>
<span class="glyphicon glyphicon-question-sign" aria-hidden="true"
title="This is the local directory in which the code in this file was executed.">
</span>
</p>
<p>
This reproducible <a href="https://rmarkdown.rstudio.com">R Markdown</a>
analysis was created with <a
href="https://github.com/workflowr/workflowr">workflowr</a> (version
{packageVersion("workflowr")}). The <em>Checks</em> tab describes the
reproducibility checks that were applied when the results were created.
The <em>Past versions</em> tab lists the development history.
</p>
<hr>
</div>
<div id="checks" class="tab-pane fade">
{report_checks}
<hr>
</div>
<div id="versions" class="tab-pane fade">
{report_versions}
<hr>
</div>
</div>
</div>
')
return(report)
}
get_versions <- function(input, output_dir, r, github) {
rmd <- input
html <- to_html(rmd, outdir = output_dir)
df_versions <- get_versions_df(c(rmd, html), r)
# Convert paths to be relative to Git root
rmd <- relative(rmd, start = git2r::workdir(r))
html <- relative(html, start = git2r::workdir(r))
df_versions$File <- relative(df_versions$File, start = git2r::workdir(r))
# Exit early if there are no past versions
if (length(df_versions) == 0) {
text <-
"<p>There are no past versions. Publish this analysis with
<code>wflow_publish()</code> to start tracking its development.</p>"
return(text)
}
df_versions$File <- ifelse(df_versions$File == rmd, "Rmd", "html")
if (is.na(github)) {
df_versions$Version <- shorten_sha(df_versions$Version)
} else {
df_versions$Version <- ifelse(df_versions$File == "html",
# HTML preview URL
create_url_html(github, html, df_versions$Version),
# R Markdown URL
sprintf("<a href=\"%s/blob/%s/%s\" target=\"_blank\">%s</a>",
github, df_versions$Version, rmd,
shorten_sha(df_versions$Version)))
}
df_versions <- df_versions[, c("File", "Version", "Author", "Date", "Message")]
template <-
"
<p>
These are the previous versions of the repository in which changes were made
to the R Markdown (<code>{{rmd}}</code>) and HTML (<code>{{html}}</code>)
files. If you've configured a remote Git repository (see
<code>?wflow_git_remote</code>), click on the hyperlinks in the table below to
view the files as they were in that past version.
</p>
<div class=\"table-responsive\">
<table class=\"table table-condensed table-hover\">
<thead>
<tr>
<th>File</th>
<th>Version</th>
<th>Author</th>
<th>Date</th>
<th>Message</th>
</tr>
</thead>
<tbody>
{{#df_versions}}
<tr>
<td>{{{File}}}</td>
<td>{{{Version}}}</td>
<td>{{Author}}</td>
<td>{{Date}}</td>
<td>{{Message}}</td>
</tr>
{{/df_versions}}
</tbody>
</table>
</div>
"
data <- list(rmd = rmd, html = html,
df_versions = unname(whisker::rowSplit(df_versions)))
text <- whisker::whisker.render(template, data)
return(text)
}
# Get versions table for figures. Needs to be refactored to share code with
# get_versions.
get_versions_fig <- function(fig, r, github) {
df_versions <- get_versions_df(fig, r)
fig <- relative(fig, start = git2r::workdir(r))
# Exit early if there are no past versions
if (length(df_versions) == 0) {
return("")
}
if (is.na(github)) {
df_versions$Version <- shorten_sha(df_versions$Version)
} else {
df_versions$Version <- sprintf("<a href=\"%s/blob/%s/%s\" target=\"_blank\">%s</a>",
github, df_versions$Version, fig,
shorten_sha(df_versions$Version))
}
df_versions <- df_versions[, c("Version", "Author", "Date")]
fig <- basename(fig)
id <- paste0("fig-", tools::file_path_sans_ext(basename(fig)))
# An HTML ID cannot contain spaces. If filename has spaces, quote the figure
# name and convert spaces in ID to dashes. Also insert text in case there is a
# similar chunk name that already uses dashes instead of spaces.
if (stringr::str_detect(fig, "\\s")) {
fig <- paste0('"', fig, '"')
id <- stringr::str_replace_all(id, "\\s", "-")
id <- stringr::str_replace(id, "fig-", "fig-no-spaces-")
}
template <-
"
<p>
<button type=\"button\" class=\"btn btn-default btn-xs btn-workflowr btn-workflowr-fig\"
data-toggle=\"collapse\" data-target=\"#{{id}}\">
Past versions of {{fig}}
</button>
</p>
<div id=\"{{id}}\" class=\"collapse\">
<div class=\"table-responsive\">
<table class=\"table table-condensed table-hover\">
<thead>
<tr>
<th>Version</th>
<th>Author</th>
<th>Date</th>
</tr>
</thead>
<tbody>
{{#df_versions}}
<tr>
<td>{{{Version}}}</td>
<td>{{Author}}</td>
<td>{{Date}}</td>
</tr>
{{/df_versions}}
</tbody>
</table>
</div>
</div>
"
data <- list(fig = fig, id = id,
df_versions = unname(whisker::rowSplit(df_versions)))
text <- whisker::whisker.render(template, data)
return(text)
}
# Return a data frame of past versions
#
# files - paths to files
# r - git_repository
# timezone - timezone to use, e.g. "America/New_York". Defaults to local
# timezone. If unset (i.e. is NULL, NA, or ""), defaults to "Etc/UTC".
#
# If no past versions, returns empty data frame
get_versions_df <- function(files, r, timezone = Sys.timezone()) {
commits_path <- list()
for (f in files) {
commits_f <- git2r::commits(r, path = f)
names(commits_f) <- rep(f, length(commits_f))
commits_path <- c(commits_path, commits_f)
}
# Exit early if there are no past versions
if (length(commits_path) == 0) {
return(data.frame())
}
version <- vapply(commits_path, function(x) x$sha, character(1))
author <- vapply(commits_path, function(x) x$author$name, character(1))
date <- lapply(commits_path, function(x) as.POSIXct(x$author$when))
date <- do.call(c, date)
message <- vapply(commits_path, function(x) x$message, character(1))
# Only keep the first line of the commit message
message <- vapply(message, get_first_line, character(1))
df_versions <- data.frame(File = names(commits_path), Version = version,
Author = author, Date = date,
Message = message, stringsAsFactors = FALSE)
df_versions <- df_versions[order(df_versions$Date, decreasing = TRUE), ]
if (is.null(timezone) || is.na(timezone) || identical(timezone, "")) {
timezone <- "Etc/UTC"
}
df_versions$Date <- as.character(as.Date(df_versions$Date, tz = timezone))
rownames(df_versions) <- seq_len(nrow(df_versions))
return(df_versions)
}
check_vc <- function(input, r, s, github, output_dir) {
if (!is.null(r)) {
pass <- TRUE
log <- git2r::commits(r)
if (length(log) > 0) {
sha <- log[[1]]$sha
sha7 <- shorten_sha(sha)
if (!is.na(github)) {
sha_display <- sprintf("<a href=\"%s/tree/%s\" target=\"_blank\">%s</a>",
github, sha, sha7)
} else {
sha_display <- sha7
}
} else {
sha_display <- "No commits yet"
}
summary <- sprintf("<strong>Repository version:</strong> %s", sha_display)
# Scrub HTML and other generated content (e.g. site_libs). It's ok that these
# have uncommitted changes.
s <- scrub_status(s, r, output_dir = output_dir)
status <- utils::capture.output(print(s))
status <- c("<pre><code>", status, "</code></pre>")
status <- paste(status, collapse = "\n")
details <-
"
<p>
Great! You are using Git for version control. Tracking code development and
connecting the code version to the results is critical for reproducibility.
</p>
"
if (sha_display != "No commits yet") {
details <- c(details,
glue::glue(
"<p>
The results in this page were generated with repository version {sha_display}.
See the <em>Past versions</em> tab to see a history of the changes made to the
R Markdown and HTML files.
</p>"
))
}
details <- c(details,
"
<p>
Note that you need to be careful to ensure that all relevant files for the
analysis have been committed to Git prior to generating the results (you can
use <code>wflow_publish</code> or <code>wflow_git_commit</code>). workflowr only
checks the R Markdown file, but you know if there are other scripts or data
files that it depends on. Below is the status of the Git repository when the
results were generated:
</p>
",
status,
"
<p>
Note that any generated files, e.g. HTML, png, CSS, etc., are not included in
this status report because it is ok for generated content to have uncommitted
changes.
</p>
")
details <- paste(details, collapse = "\n")
} else {
pass <- FALSE
summary <- "<strong>Repository version:</strong> no version control"
details <-
"
Tracking code development and connecting the code version to the results is
critical for reproducibility. To start using Git, open the Terminal and type
<code>git init</code> in your project directory.
"
}
return(list(pass = pass, summary = summary, details = details))
}
check_sessioninfo <- function(input, sessioninfo) {
# Check if the user manually inserted sessionInfo or session_info (from
# devtools or sessioninfo packages)
lines <- readLines(input)
any_sessioninfo <- stringr::str_detect(lines, "session(_i|I)nfo")
if (any(any_sessioninfo) || sessioninfo != "") {
pass <- TRUE
summary <- "<strong>Session information:</strong> recorded"
details <-
"
Great job! Recording the operating system, R version, and package versions is
critical for reproducibility.
"
} else {
pass <- FALSE
summary <- "<strong>Session information:</strong> unavailable"
details <-
"
Recording the operating system, R version, and package versions is critical
for reproducibility. To record the session information, add <code>sessioninfo:
\"sessionInfo()\"</code> to _workflowr.yml. Alternatively, you could use
<code>devtools::session_info()</code> or
<code>sessioninfo::session_info()</code>. Lastly, you can manually add a code
chunk to this file to run any one of these commands and then disable to
automatic insertion by changing the workflowr setting to <code>sessioninfo:
\"\"</code>.
"
}
return(list(pass = pass, summary = summary, details = details))
}
check_seed <- function(seed) {
if (is.numeric(seed) && length(seed) == 1) {
pass <- TRUE
seed_code <- sprintf("<code>set.seed(%d)</code>", seed)
summary <- sprintf("<strong>Seed:</strong> %s", seed_code)
details <- sprintf(
"
The command %s was run prior to running the code in the R Markdown file.
Setting a seed ensures that any results that rely on randomness, e.g.
subsampling or permutations, are reproducible.
"
, seed_code)
} else {
pass <- FALSE
summary <- "<strong>Seed:</strong> none"
details <-
"
No seed was set with <code>set.seed</code> prior to running the code in the R
Markdown file. Setting a seed ensures that any results that rely on
randomness, e.g. subsampling or permutations, are reproducible. To set a seed,
specify an integer value for the option seed in _workflowr.yml or the YAML header
of the R Markdown file.
"
}
return(list(pass = pass, summary = summary, details = details))
}
# This function is designed to check the global environment for any defined
# objects that could interfere with an analysis. However, it accepts arbitrary
# environments to facilitate unit testing.
check_environment <- function(envir = .GlobalEnv) {
ls_envir <- ls(name = envir)
if (length(ls_envir) == 0) {
pass <- TRUE
summary <- "<strong>Environment:</strong> empty"
details <-
"
Great job! The global environment was empty. Objects defined in the global
environment can affect the analysis in your R Markdown file in unknown ways.
For reproduciblity it's best to always run the code in an empty environment.
"
} else {
pass <- FALSE
summary <- "<strong>Environment:</strong> objects present"
details <-
"
<p>The global environment had objects present when the code in the R Markdown
file was run. These objects can affect the analysis in your R Markdown file in
unknown ways. For reproduciblity it's best to always run the code in an empty
environment. Use <code>wflow_publish</code> or <code>wflow_build</code> to
ensure that the code is always run in an empty environment.</p>
"
objects_table <- create_objects_table(envir)
details <- paste(collapse = "\n",
details,
"<p>The following objects were defined in the global
environment when these results were created:</p>",
objects_table)
}
return(list(pass = pass, summary = summary, details = details))
}
create_objects_table <- function(env) {
objects <- ls(name = env)
classes <- vapply(objects, function(x) paste(class(env[[x]]), collapse = ";"),
character(1))
sizes <- vapply(objects,
function(x) format(utils::object.size(env[[x]]), units = "auto"),
character(1))
df <- data.frame(Name = objects, Class = classes, Size = sizes)
table <- convert_df_to_html_table(df)
return(table)
}
convert_df_to_html_table <- function(df) {
table <- knitr::kable(df, format = "html", row.names = FALSE,
table.attr = "class=\"table table-condensed table-hover\"")
return(as.character(table))
}
format_check <- function(check) {
if (check$pass) {
symbol <- "glyphicon-ok text-success"
} else {
symbol <- "glyphicon-exclamation-sign text-danger"
}
# Create a unique ID for the collapsible panel based on the summary by
# concatenating all alphanumeric characters.
panel_id <- stringr::str_extract_all(check$summary, "[:alnum:]")[[1]]
panel_id <- paste(panel_id, collapse = "")
text <- glue::glue('
<div class="panel panel-default">
<div class="panel-heading">
<p class="panel-title">
<a data-toggle="collapse" data-parent="#workflowr-checks" href="#{panel_id}">
<span class="glyphicon {symbol}" aria-hidden="true"></span>
{check$summary}
</a>
</p>
</div>
<div id="{panel_id}" class="panel-collapse collapse">
<div class="panel-body">
{check$details}
</div>
</div>
</div>
'
)
return(text)
}
check_rmd <- function(input, r, s) {
stopifnot("ignored" %in% names(s))
s_simpler <- lapply(s, unlist)
s_simpler <- lapply(s_simpler, add_git_path, r = r)
# Determine current status of R Markdown file
if (input %in% s_simpler$staged) {
rmd_status <- "staged"
} else if (input %in% s_simpler$unstaged) {
rmd_status <- "unstaged"
} else if (input %in% s_simpler$untracked) {
rmd_status <- "untracked"
} else if (input %in% s_simpler$ignored) {
rmd_status <- "ignored"
} else {
rmd_status <- "up-to-date"
}
if (rmd_status == "up-to-date") {
pass <- TRUE
summary <- "<strong>R Markdown file:</strong> up-to-date"
details <-
"
Great! Since the R Markdown file has been committed to the Git repository, you
know the exact version of the code that produced these results.
"
} else {
pass <- FALSE
summary <- "<strong>R Markdown file:</strong> uncommitted changes"
if (rmd_status %in% c("staged", "unstaged")) {
details <- sprintf("The R Markdown file has %s changes.", rmd_status)
} else {
details <- sprintf("The R Markdown is %s by Git.", rmd_status)
}
details <- paste(collapse = " ", details,
"
To know which version of the R Markdown file created these
results, you'll want to first commit it to the Git repo. If
you're still working on the analysis, you can ignore this
warning. When you're finished, you can run
<code>wflow_publish</code> to commit the R Markdown file and
build the HTML.
"
)
}
return(list(pass = pass, summary = summary, details = details))
}
check_cache <- function(input) {
# Check for cached chunks
input_cache <- fs::path_ext_remove(input)
input_cache <- glue::glue("{input_cache}_cache")
cached_chunks_files <- list.files(path = file.path(input_cache, "html"),
pattern = "RData$")
if (length(cached_chunks_files) == 0) {
pass <- TRUE
summary <- "<strong>Cache:</strong> none"
details <-
"
Nice! There were no cached chunks for this analysis, so you can be confident
that you successfully produced the results during this run.
"
} else {
pass <- FALSE
summary <- "<strong>Cache:</strong> detected"
cached_chunks <- fs::path_file(cached_chunks_files)
cached_chunks <- stringr::str_replace(cached_chunks, "_[a-z0-9]+.RData$", "")
cached_chunks <- unique(cached_chunks)
cached_chunks <- paste0("<li>", cached_chunks, "</li>", collapse = "")
details <- glue::glue("
The following chunks had caches available: <ul>{cached_chunks}</ul>
To ensure reproducibility of the results, delete the cache directory
<code>{fs::path_rel(input_cache, start = fs::path_dir(input))}</code>
and re-run the analysis. To have workflowr automatically delete the cache
directory prior to building the file, set <code>delete_cache = TRUE</code>
when running <code>wflow_build()</code> or <code>wflow_publish()</code>.
")
}
return(list(pass = pass, summary = summary, details = details))
}
add_git_path <- function(x, r) {
if (!is.null(x)) {
file.path(git2r::workdir(r), x)
} else {
NA_character_
}
}
detect_code <- function(input) {
stopifnot(fs::file_exists(input))
lines <- readLines(input)
code_chunks <- stringr::str_detect(lines, "^```\\{[a-z].*\\}$")
# Inline code can span multiple lines, so concatenate first. A new line counts
# as a character, which is the same as the space inserted by the collapse.
lines_collapsed <- paste(lines, collapse = " ")
# Extract all strings that start with "`r " and end with "`" (with no
# intervening "`").
code_inline_potential <- stringr::str_extract_all(lines_collapsed, "`r[^`]+`")[[1]]
# Only keep valid inline code:
# 1. Must start with at least one whitespace character after the "`r"
# 2. Must contain at least one non-whitespace character
#
# The regex in words is:
# `r{at least one whitespace character}{at least one non whitespace character}{zero or more characters}`
code_inline <- stringr::str_detect(code_inline_potential, "`r\\s+\\S+.*`")
return(any(code_chunks) || any(code_inline))
}
# Create URL to past versions of HTML files.
#
# For workflowr projects hosted at GitHub.com or GitLab.com, the returned URL
# will be to a CDN provided by raw.githack.com. The file is served as HTML for
# convenient viewing of the results. If the project is hosted on a different
# platform (e.g. Bitbucket or a custom GitLab instance), the returned URL will
# be to the specific version of the HTML file in the repository (inconveniently
# rendered as text).
#
# https://raw.githack.com/
#
# Examples:
#
# GitHub: https://github.com/user/repo/blob/commit/path/file.html
# -> https://rawcdn.githack.com/user/repo/commit/path/file.html
#
# GitLab: https://gitlab.com/user/repo/blob/commit/path/file.html
# -> https://glcdn.githack.com/user/repo/raw/commit/path/file.html
#
# GitLab custom: https://git.rcc.uchicago.edu/user/repo/blob/commit/path/file.html
# -> https://git.rcc.uchicago.edu/user/repo/blob/commit/path/file.html
#
# Note: The full result includes the anchor tag:
# <a href=\"https://rawcdn.githack.com/user/repo/commit/path/file.html\" target=\"_blank\">1st 7 characters of commit</a>
create_url_html <- function(url_repo, html, sha) {
url_github <- "https://github.com/"
url_gitlab <- "https://gitlab.com/"
cdn_github <- "https://rawcdn.githack.com"
cdn_gitlab <- "https://glcdn.githack.com"
if (stringr::str_detect(url_repo, url_github)) {
url_html <- sprintf("<a href=\"%s/%s/%s/%s\" target=\"_blank\">%s</a>",
cdn_github,
stringr::str_replace(url_repo, url_github, ""),
sha, html, shorten_sha(sha))
} else if (stringr::str_detect(url_repo, url_gitlab)) {
url_html <- sprintf("<a href=\"%s/%s/raw/%s/%s\" target=\"_blank\">%s</a>",
cdn_gitlab,
stringr::str_replace(url_repo, url_gitlab, ""),
sha, html, shorten_sha(sha))
} else {
url_html <- sprintf("<a href=\"%s/blob/%s/%s\" target=\"_blank\">%s</a>",
url_repo, sha, html, shorten_sha(sha))
}
return(url_html)
}
shorten_sha <- function(sha) {
stringr::str_sub(sha, 1, 7)
}
# Detect absolute file paths in a character vector
#
# Detects Unix and Windows file paths. Paths must be surrounded by quotations
# as they would appear in R code.
#
# Returns a character vector of all potential absolute paths
#
# Returns: "/a/b/c", '/a/b/c', "~/a/b/c", "~\\a\\b\\c", "C:/a/b/c", "C:\\a\\b\\c"
# Ignores: /a/b/c, "~a", "C:a/b/c", "~"
#
# **Warning:** The identified paths may not be returned in the input order
# because the order depends on the order of the regexes that are used to search
# for paths.
#
# Note: Since this checks the entire document, including non-code, I made it
# stringent. For example, it ignores "~" and "C:". These are technically valid
# paths, but it's unlikely that workflowr will be able to provide useful advice
# if these are actually being used as paths. Also, they could be in non-code
# sections. A potential way to improve this check is to first extract the code
# from the document and/or remove comments.
detect_abs_path <- function(string) {
path_regex <- c("[\",\'](/.+?)[\",\']", # Unix path surrounded by ' or "
"[\",\']([a-z,A-Z]:[/,\\\\].+?)[\",\']", # Windows path surrounded by ' or "
"[\",\'](~[/,\\\\].+?)[\",\']" # Path with tilde surrounded by ' or "
)
paths <- list()
for (re in path_regex) {
paths <- c(paths, stringr::str_match_all(string, re))
}
paths <- Reduce(rbind, paths)[, 2]
return(paths)
}
# Check for absolute paths that should be relative paths
#
# Looks for absolute paths between quotation marks (to detect strings in code)
# and between parentheses (to detect links in Markdown syntax). The paths have
# to be within the same project.
check_paths <- function(input, knit_root_dir) {
# Can't assume a workflowr just because they are using wflow_html().
proj_dir <- get_proj_dir(knit_root_dir)
lines <- readLines(input)
paths <- detect_abs_path(lines)
# Because fs doesn't remove the ~
paths_original <- paths
paths <- absolute(paths)
names(paths) <- paths_original
# Remove any duplicates
paths <- paths[!duplicated(paths)]
if (length(paths) > 0) {
internal <- vapply(paths, fs::path_has_parent, logical(1),
parent = proj_dir)
paths <- paths[internal]
}
if (length(paths) == 0) {
pass <- TRUE
summary <- "<strong>File paths:</strong> relative"
details <-
"
Great job! Using relative paths to the files within your workflowr project
makes it easier to run your code on other machines.
"
} else {
pass <- FALSE
summary <- "<strong>File paths:</strong> absolute"
# List the absolute paths and the suggested relative paths (need to be
# relative to knit_root_dir)
paths_df <- data.frame(absolute = names(paths),
relative = relative(paths, start = knit_root_dir),
stringsAsFactors = FALSE)
# If the original absolute path uses backslashes on Windows, use backslashes
# for the suggested relative path. Also display double backslashes as it
# would appear in R code.
paths_w_backslash <- stringr::str_detect(paths_df$absolute, "\\\\")
paths_df$relative[paths_w_backslash] <- stringr::str_replace_all(paths_df$relative[paths_w_backslash],
"/", "\\\\\\\\\\\\\\\\")
paths_df$absolute[paths_w_backslash] <- stringr::str_replace_all(paths_df$absolute[paths_w_backslash],
"\\\\\\\\", "\\\\\\\\\\\\\\\\")
paths_df_html <- convert_df_to_html_table(paths_df)
details <- glue::glue("
<p>Using absolute paths to the files within your workflowr project makes it
difficult for you and others to run your code on a different machine. Change
the absolute path(s) below to the suggested relative path(s) to make your code
more reproducible.</p>
{paths_df_html}
")
}
return(list(pass = pass, summary = summary, details = details))
}
# If uncertain if this is a workflowr project, search for these files in the
# following order to attempt to find root of current project.
#
# *.Rproj
# .git/
# _workflowr.yml
#
# If none of these are present, return the input directory.
get_proj_dir <- function(directory) {
# RStudio project file, .Rproj
proj_dir <- try(rprojroot::find_rstudio_root_file(path = directory),
silent = TRUE)
if (!inherits(proj_dir, "try-error")) return(proj_dir)
# .git/
proj_dir <- try(rprojroot::find_root_file(criterion = rprojroot::is_git_root,
path = directory),
silent = TRUE)
if (!inherits(proj_dir, "try-error")) return(proj_dir)
# _workflowr.yml file
proj_dir <- try(rprojroot::find_root(rprojroot::has_file("_workflowr.yml"),
path = directory),
silent = TRUE)
if (!inherits(proj_dir, "try-error")) return(proj_dir)
return(directory)
}
# Scrub HTML and other generated content (e.g. site_libs). It's ok that these
# have uncommitted changes.
scrub_status <- function(status, repo, output_dir, remove_ignored = FALSE) {
s <- status_to_df(status)
full_path <- file.path(git2r::workdir(repo), s$file)
generated <- vapply(full_path, fs::path_has_parent, logical(1),
parent = absolute(output_dir))
s <- s[!generated, ]
s <- df_to_status(s)
# # HTML
# s <- s[!stringr::str_detect(s$file, "html$"), ]
# # png
# s <- s[!stringr::str_detect(s$file, "png$"), ]
# # site_libs
# s <- s[!stringr::str_detect(s$file, "site_libs"), ]
# s <- df_to_status(s)
if (remove_ignored) s$ignored <- NULL
return(s)
}
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/report.R
|
# Non-exported utility functions
#
# See tests/testthat/test-utility.R for usage examples.
# Obtain the most upstream existing path.
#
# normalizePath only returns the absolute path if the directory exists. It is
# often useful to expand a potential path to an absolute path for debugging and
# error handling. This function returns the most upstream existing path as an
# absolute path.
#
# Currently it is used by `wflow_start` to check for the presence of an upstream
# Git repository before creating a new project directory.
#
# path - a path to a file or directory. Can be relative or absolute, existing or
# non-existing.
#
obtain_existing_path <- function(path) {
if (length(path) > 1) stop("Invalid input: the vector should only have element")
if (is.null(path)) stop("Invalid input: NULL")
if (is.na(path)) stop("Invalid input: NA")
if (!is.character(path)) stop("Invalid input: ", path)
if (fs::dir_exists(path)) {
return(absolute(path))
} else {
return(obtain_existing_path(dirname(path)))
}
}
# Wrap long messages
# https://github.com/workflowr/workflowr/issues/29
wrap <- function(...) {
input <- list(...)
if (!all(sapply(input, is.character)))
stop("All input must be a character vector")
m <- paste(unlist(input), collapse = "")
paste(strwrap(m), collapse = "\n")
}
# Convert R Markdown file to corresponding HTML
to_html <- function(files, outdir = NULL) {
ext <- tools::file_ext(files)
if (!all(stringr::str_detect(ext, "[Rr]md$")))
stop("Invalid file extension")
html <- stringr::str_replace(files, "[Rr]md$", "html")
if (!is.null(outdir)) {
# Remove trailing slash if present
outdir <- stringr::str_replace(outdir, "/$", "")
# Only prepend outdir if it's not "." for current working directory
if (outdir == ".") {
html <- basename(html)
} else {
html <- file.path(outdir, basename(html))
}
}
return(html)
}
# Get an absolute path while handling cross-platform filepath issues
#
# path - a vector of paths
absolute <- function(path) {
if (is.null(path)) return(path)
if (all(is.na(path))) return(path)
if (!is.character(path))
stop("path must be NULL or a character vector")
newpath <- path
# Convert to absolute path
newpath <- fs::path_abs(newpath)
# Expand ~ using R's definition of user directory
newpath <- fs::path_expand_r(newpath)
# Ensure Windows Drive is uppercase
newpath <- toupper_win_drive(newpath)
# Resolve symlinks
newpath <- resolve_symlink(newpath)
newpath <- as.character(newpath)
return(newpath)
}
# Get a relative path while handling cross-platform filepath issues
#
# path - a vector of paths
#
# start - a single starting path to be relative to
relative <- function(path, start = getwd()) {
if (is.null(path)) return(path)
if (all(is.na(path))) return(path)
if (!is.character(path))
stop("path must be NULL or a character vector")
if (!(is.character(start) && length(start) == 1))
stop("start must be a character vector of length 1")
newpath <- path
# First resolve any symlinks
newpath <- absolute(newpath)
start <- absolute(start)
# Handle any issues with Windows drives
if (.Platform$OS.type == "windows") {
# Require that all files are on the same Windows drive
drive <- unique(get_win_drive(newpath))
drive <- drive[!is.na(drive)]
if (length(drive) != 1) {
stop("All paths must be on the same Windows drive", call. = FALSE)
}
# If path and start are on different drives, return the absolute path
drive_start <- get_win_drive(start)
if (drive != drive_start) return(newpath)
}
# Convert to relative path
newpath <- fs::path_rel(newpath, start = start)
newpath <- as.character(newpath)
return(newpath)
}
# Resolve symlinks in a filepath even if file does not exist.
#
# Input: Vector of absolute filepaths
# Output: Vector of absolute filepaths with any symlinks resolved
resolve_symlink <- function(path) {
return(vapply(path, resolve_symlink_, character(1), USE.NAMES = FALSE))
}
# Recursive function to resolve symlinks one path at a time.
resolve_symlink_ <- function(path) {
# Base case #1: If path exists, resolve symlink
if (fs::file_exists(path)) {
return(fs::path_real(path))
}
parts <- fs::path_split(path)[[1]]
len <- length(parts)
# Base case #2: Only 1 part of file path remaining. Return it.
#
# Possible cases:
# * Invalid input such as NA
# * A Fake file path that doesn't exist on the machine
if (len == 1) {
return(path)
}
# Recursive case
return(fs::path_join(c(
resolve_symlink_(fs::path_join(parts[-len])),
parts[len])))
}
# Attempts to delete file(s) and/or directory(ies) pointed to by path.
#
# path - character vector
#
# Fails gracefully. unlink() fails silently. fs::file_delete() properly throws
# an error, but it is not the most informative. This will report the files that
# weren't deleted as well as their file permissions.
wflow_delete <- function(path) {
# Needed for, e.g. AppVeyor, where relative paths can cause permission issues
path <- absolute(path)
attempt_to_delete <- try(fs::file_delete(path), silent = TRUE)
persistent <- fs::file_exists(path)
if (any(persistent)) {
stop("Unable to delete the following file(s) or directory(ies):\n",
paste(path[persistent], collapse = "\n"),
"\nDo you have permission to delete them? The permissions are, in order:\n",
paste(fs::file_info(path[persistent])$permissions, collapse = "\n"),
call. = FALSE)
}
return(invisible(path))
}
# Because ~ maps to ~/Documents on Windows, need a reliable way to determine the
# user's home directory on Windows.
# https://cran.r-project.org/bin/windows/base/rw-FAQ.html#What-are-HOME-and-working-directories_003f
# https://stat.ethz.ch/pipermail/r-help/2007-March/128221.html
# https://github.com/ropensci/git2r/pull/320#issuecomment-367038961
get_home <- function() {
# If non-Windows, it is straightforward
if (.Platform$OS.type != "windows") {
home <- "~"
return(absolute(home))
} else {
home <- Sys.getenv("USERPROFILE")
home <- absolute(home)
if (!fs::dir_exists(home)) {
stop(wrap("Unable to determine user's home directory on Windows: ", home))
}
return(home)
}
}
# Detect if a filepath contains any globbing characters: *, ?, [...]
detect_glob <- function(paths) {
stringr::str_detect(paths, pattern = "\\*") |
stringr::str_detect(paths, pattern = "\\?") |
stringr::str_detect(paths, pattern = "\\[.+\\]")
}
# Perform file globbing
#
# Sys.glob works great on filepaths with globbing characters, but it's behavior
# for non-globs depends on 1) if the filepath exists, 2) if the path is to a
# file or a directory (with or without a trailing slash), and 3) which OS the
# command is run on. To avoid these edge cases, this function only runs Sys.glob
# on filepaths that contain globbing characters.
glob <- function(paths) {
is_glob <- detect_glob(paths)
expanded <- Map(Sys.glob, paths)
invalid_glob <- is_glob & vapply(expanded, length, numeric(1)) == 0
if (any(invalid_glob))
stop("Invalid file glob: ", paths[invalid_glob][1], call. = FALSE)
result <- ifelse(is_glob, expanded, paths)
result <- unique(unlist(result))
return(result)
}
# If the user doesn't define a URL for a host repo in the YAML header or
# _workflowr.yml, determine the URL from the remote "origin". If this remote
# doesn't exist, return NA.
#
# GitHub:
# HTTPS: https://github.com/workflowr/workflowr.git
# SSH: [email protected]:workflowr/workflowr.git
# Return value: https://github.com/workflowr/workflowr
#
# GitLab:
# HTTPS: https://gitlab.com/jdblischak/wflow-gitlab.git
# SSH: [email protected]:jdblischak/wflow-gitlab.git
# Return value: https://gitlab.com/jdblischak/wflow-gitlab
get_host_from_remote <- function(path) {
if (!git2r::in_repository(path = path)) {
return(NA_character_)
}
r <- git2r::repository(path = path, discover = TRUE)
remotes <- git2r::remotes(r)
if (!("origin" %in% remotes)) {
return(NA_character_)
}
origin <- git2r::remote_url(r, remote = "origin")
host <- origin
# Remove trailing .git
host <- stringr::str_replace(host, "\\.git$", "")
# If SSH, replace with HTTPS URL
host <- stringr::str_replace(host, "^git@(.+):", "https://\\1/")
return(host)
}
# Get output directory if it exists
get_output_dir <- function(directory, yml = "_site.yml") {
stopifnot(fs::dir_exists(directory))
site_fname <- file.path(directory, "_site.yml")
if (!fs::file_exists(site_fname)) {
return(NULL)
}
site_yml <- yaml::yaml.load_file(site_fname)
if (is.null(site_yml$output_dir)) {
output_dir <- directory
} else {
output_dir <- file.path(directory, site_yml$output_dir)
fs::dir_create(output_dir)
output_dir <- absolute(output_dir)
}
return(output_dir)
}
# Convert the output of git2r::status() to a data frame for easier manipulation
status_to_df <- function(x) {
stopifnot(inherits(x, "git_status"))
col_status <- character()
col_substatus <- character()
col_file <- character()
for (stat in names(x)) {
files <- unlist(x[[stat]])
col_status <- c(col_status, rep(stat, length(files)))
col_substatus <- c(col_substatus, names(files))
col_file <- c(col_file, files)
}
out <- data.frame(status = col_status,
substatus = col_substatus,
file = col_file,
row.names = seq_along(col_status),
stringsAsFactors = FALSE)
return(out)
}
# Convert data frame to git_status
df_to_status <- function(d) {
stopifnot(is.data.frame(d),
colnames(d) == c("status", "substatus", "file"))
status <- list(staged = structure(list(), .Names = character(0)),
unstaged = structure(list(), .Names = character(0)),
untracked = structure(list(), .Names = character(0)))
for (i in seq_along(d$file)) {
status[[d$status[i]]] <- c(status[[d$status[i]]], list(d$file[i]))
names(status[[d$status[i]]])[length(status[[d$status[i]]])] <- d$substatus[i]
}
class(status) <- "git_status"
return(status)
}
# Determine if a file is executable
#
# https://github.com/r-lib/fs/issues/172
file_is_executable <- function(f) {
stopifnot(fs::file_exists(f))
if (fs::file_access(f, mode = "execute")) return(TRUE)
return(FALSE)
}
# Automatically try to determine the best setting for knitr's dependson chunk
# option based on the caching status of other chunks in the document.
#
# Usage: Set dependson=workflowr:::wflow_dependson() for a given chunk
#
# Desired behavior:
# * If any other cached chunks are invalidated and re-executed, this chunk
# should also be re-executed.
# * If all the cached chunks in a document are read from the cache, also read
# this chunk from the cache.
# * Avoid as many knitr warnings as possible about depending on non-cached
# chunks. These are harmless, but avoiding these demonstrates that the
# function is using dependson as it is intended to be used.
#
# Long term goal: Use this with the sessionInfo() chunk inserted by workflowr
#
# Warning: knitr caching is complicated. Make sure to test this function's
# behavior for your setup before relying on it for anything important.
#
# https://yihui.name/knitr/options/#cache
# https://yihui.name/knitr/demo/cache/
# https://stackoverflow.com/a/47055058/2483477
# https://stackoverflow.com/questions/25436389/dependson-option-does-not-work-in-knitr
#
wflow_dependson <- function() {
cache_global <- knitr::opts_chunk$get("cache")
# If cache=TRUE is set globally, depend on all chunks except those that are
# explicitly labeled cache=FALSE.
if (cache_global) {
labels_all <- knitr::all_labels()
labels_cache_false <- knitr::all_labels(expression(cache == FALSE))
labels <- setdiff(labels_all, labels_cache_false)
# Remove label of current chunk
label_self <- knitr::opts_current$get(name = "label")
labels <- setdiff(labels, label_self)
# Remove the set.seed chunk inserted by workflowr b/c it can't be cached
labels <- setdiff(labels, "seed-set-by-workflowr")
if (length(labels) > 0) return(labels)
}
# If specific chunks are cached, depend on these
labels_cache_true <- knitr::all_labels(expression(cache == TRUE))
if (length(labels_cache_true) > 0) return(labels_cache_true)
# If the document doesn't use caching, don't use dependson.
return(NULL)
}
# Ensure that Windows drive is capitalized
#
# Motivation: getwd() on winbuilder returns d:/ but tempdir() returns D:/. This
# causes problems when creating relative paths.
toupper_win_drive <- function(path) {
stringr::str_replace(path, "^([a-z]):/", toupper)
}
# Return the Windows drive. Called by relative().
#
# > get_win_drive(c("C:/a/b/c", "D:/a/b/c"))
# [1] "C:" "D:"
get_win_drive <- function(path) {
drive <- fs::path_split(path)
drive <- vapply(drive, function(x) x[1], FUN.VALUE = character(1))
return(drive)
}
# Return TRUE if getOption("browser") is properly set. Required for opening URLs
# via browseURL().
#
# This can either be an R function that accepts a URL or a string with the
# name of the system program to invoke (e.g. "firefox"). If it is NULL or "",
# it won't work.
check_browser <- function() {
browser_opt <- getOption("browser")
if (is.null(browser_opt)) return(FALSE)
if (is.function(browser_opt)) return(TRUE)
if (nchar(browser_opt) > 0) return(TRUE)
return(FALSE)
}
# Only return the first line of a multi-line string(s)
get_first_line <- function(x) {
split <- stringr::str_split(x, "\n")
first_lines <- vapply(split, function(x) x[1], character(1))
return(first_lines)
}
# Check for `site: workflowr::wflow_site` in index.Rmd
check_site_generator <- function(index) {
if (!fs::file_exists(index))
stop(glue::glue("Unable to find index.Rmd. Expected to find {index}"),
call. = FALSE)
header <- rmarkdown::yaml_front_matter(index)
if (is.null(header$site)) return(FALSE)
if (header$site == "workflowr::wflow_site") return(TRUE)
return(FALSE)
}
# Save the files open in RStudio editor
autosave <- function() {
if (!rstudioapi::isAvailable(version_needed = "1.1.287")) return(FALSE)
rstudioapi::documentSaveAll()
}
check_wd_exists <- function() {
wd <- fs::path_wd()
if (length(fs::path_wd()) == 0)
stop("The current working directory doesn't exist.",
" Use setwd() to change to an existing directory.",
call. = FALSE)
}
# Creates two newlines in between texts
create_newlines <- function(m) {
m <- lapply(m, wrap)
m <- unlist(m)
m <- paste(m, collapse = "\n\n")
return(m)
}
# Get Font Awesome icon for a brand
#
# Decide between Font Awesome 4 and 5
# https://github.com/rstudio/rmarkdown/issues/1991
get_fa_brand_icon <- function(brand) {
if (utils::packageVersion("rmarkdown") < "2.6") {
# Font Awesome 4: rmarkdown adds preceding "fa"
return(sprintf("fa-%s", brand))
}
# Font Awesome 5
return(sprintf("fab fa-%s", brand))
}
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/utility.R
|
#' Build the site
#'
#' \code{wflow_build} builds the website from the files in the analysis
#' directory. This is intended to be used when developing your code to preview
#' the changes. When you are ready to commit the files, use
#' \code{\link{wflow_publish}}.
#'
#' \code{wflow_build} has multiple, non-mutually exclusive options for deciding
#' which files to build. If multiple options are used, then the argument
#' \code{combine} determines which files will be built. If \code{combine ==
#' "or"} (the default), then any file specified by at least one of the arguments
#' will be built. In contrast, if \code{combine == "and"}, then only files
#' specified by all of the arguments will be built. The argument \code{make} is
#' the most useful for interactively performing your analysis. The other options
#' are more useful when you are ready to publish specific files with
#' \code{\link{wflow_publish}} (which passes these arguments to
#' \code{wflow_build}). Here are the options for specifying files to be built:
#'
#' \itemize{
#'
#' \item Files specified via the argument \code{files}
#'
#' \item \code{make = TRUE} - Files which have been modified more recently
#' than their corresponding HTML files
#'
#' \item \code{update = TRUE} - Previously published files which have been
#' committed more recently than their corresponding HTML files.
#' However, files which currently have staged or unstaged changes are not
#' included.
#'
#' \item \code{republish = TRUE} - All published files.
#' However, files which currently have staged or unstaged changes are not
#' included.
#'
#' }
#'
#' Under the hood, \code{wflow_build} is a wrapper for
#' \code{\link[rmarkdown]{render_site}} from the package \link{rmarkdown}. By
#' default (\code{local = FALSE}), the code is executed in an isolated R
#' session. This is done using \code{\link[callr:r]{callr::r_safe}}.
#'
#' @section Comparison to RStudio Knit button:
#'
#' \code{wflow_build} is intentionally designed to be similar to clicking on the
#' Knit button in RStudio. Both isolate the code execution in a separate R
#' process, thus ensuring the results are not dependent on the state of the
#' current R session. However, they do differ in a few ways:
#'
#' \describe{
#'
#' \item{Number of files}{The RStudio Knit button only builds the current Rmd
#' file open in the editor. In contrast, \code{wflow_build} can build any
#' number of Rmd files (each in their own separate R process) with a single
#' invocation, including accepting file globs.}
#'
#' \item{System and user profiles}{The two methods diverge the most in
#' their use of \code{.Rprofile} files. \code{wflow_build} ignores any system
#' or user profiles (i.e. \code{~/.Rprofile} on Linux/macOS or
#' \code{~/Documents/.Rprofile} on Windows). This is the default behavior of
#' \code{\link[callr:r]{callr::r_safe}}, which it calls to run the separate R
#' process. This is ideal for reproducibility. Otherwise the results could be
#' affected by custom settings made only on the user's machine. In contrast,
#' the RStudio Knit button loads any system or user level profiles, consistent
#' with its role as a development tool.}
#'
#' \item{Project-specific profiles}{A project-specific \code{.Rprofile} is
#' treated differently than system or user profiles. \code{wflow_build} only
#' loads a project-specific \code{.Rprofile} if it is located in the current
#' working directory in which \code{wflow_build} is invoked. This may be
#' confusing if this differs from the directory in which the code in the Rmd
#' is actually executed (the option \code{knit_root_dir} defined in
#' \code{_workflowr.yml}). The RStudio Knit button only loads a
#' project-specific \code{.Rprofile} if it is located in the same directory as
#' its setting "Knit Directory" is configured. For example, if "Knit Directory"
#' is set to "Document Directory", it will ignore any \code{.Rprofile} in the
#' root of the project. But it would load the \code{.Rprofile} if "Knit
#' Directory" was changed to "Project Directory".}
#' }
#'
#' The main takeaway from the above is that you should try to limit settings and
#' options defined in \code{.Rprofile} to affect the interactive R experience
#' and superficial behavior, e.g. the option \code{max.print} to limit the
#' number of lines that can be printed to the console. Any critical settings
#' that affect the results of the analysis should be explicitly set in the Rmd
#' file.
#'
#' @param files character (default: NULL). Files to build. Only allows files in
#' the analysis directory with the extension Rmd or rmd. If \code{files} is
#' \code{NULL}, the default behavior is to build all outdated files (see
#' argument \code{make} below). Supports file
#' \href{https://en.wikipedia.org/wiki/Glob_(programming)}{globbing}.
#' The files are always built in the order they are listed.
#' @param make logical (default: \code{is.null(files)}). When \code{make =
#' TRUE}, build any files that have been modified more recently than their
#' corresponding HTML files (inspired by
#' \href{https://en.wikipedia.org/wiki/Make_(software)}{Make}). This is the
#' default action if no files are specified.
#' @param update logical (default: FALSE). Build any files that have been
#' committed more recently than their corresponding HTML files (and do not
#' have any unstaged or staged changes). This ensures that the commit version
#' ID inserted into the HTML corresponds to the exact version of the source
#' file that was used to produce it.
#' @param republish logical (default: FALSE). Build all published R Markdown
#' files (that do not have any unstaged or staged changes). Useful for
#' site-wide changes like updating the theme, navigation bar, or any other
#' setting in \code{_site.yml}.
#' @param combine character (default: \code{"or"}). Determine how to combine the
#' files from the arguments \code{files}, \code{make} (\code{wflow_build()}
#' only), \code{update}, and \code{republish}. When \code{combine} is
#' \code{"or"}, any file specified by at least one of these arguments will be
#' built. When \code{combine} is \code{"and"}, only files specified by all of
#' these arguments will be built.
#' @param view logical (default: \code{getOption("workflowr.view")}). View the
#' website with \code{\link{wflow_view}} after building files. If only one
#' file is built, it is opened. If more than one file is built, the main index
#' page is opened. Not applicable if no files are built or if \code{dry_run =
#' TRUE}.
#' @param clean_fig_files logical (default: FALSE). Delete existing figure files
#' for each R Markdown file prior to building it. This ensures that only
#' relevant figure files are saved. As you develop an analysis, it is easy to
#' generate lots of unused plots due to changes in the number of code chunks
#' and their names. However, if you are caching chunks during code
#' development, this could cause figures to disappear. Note that
#' \code{\link{wflow_publish}} uses \code{clean_fig_files = TRUE} to ensure
#' the results can be reproduced.
#' @param delete_cache logical (default: FALSE). Delete the cache directory (if
#' it exists) for each R Markdown file prior to building it.
#' @param seed numeric (default: 12345). The seed to set before building each
#' file. Passed to \code{\link{set.seed}}. \bold{DEPRECATED:} The seed set
#' here has no effect if you are using \code{\link{wflow_html}} as the output
#' format defined in \code{_site.yml}. This argument is for backwards
#' compatibility with previous versions of workflowr.
#' @param log_dir character (default: NULL). The directory to save log files
#' from building files. It will be created if necessary and ignored if
#' \code{local = TRUE}. The default is to use a directory named
#' \code{workflowr} in \code{\link{tempdir}}.
#' @param verbose logical (default: FALSE). Display the build log directly in
#' the R console as each file is built. This is useful for monitoring
#' long-running code chunks.
#' @param local logical (default: FALSE). Build files locally in the R console.
#' This should only be used for debugging purposes. The default is to build
#' each file in its own separate fresh R process to ensure each file is
#' reproducible in isolation. This is done using
#' \code{\link[callr:r]{callr::r_safe}}.
#' @param dry_run logical (default: FALSE). List the files to be built, without
#' building them.
#'
#' @inheritParams wflow_git_commit
#'
#' @return An object of class \code{wflow_build}, which is a list with the
#' following elements:
#'
#' \itemize{
#'
#' \item \bold{files}: The input argument \code{files}
#'
#' \item \bold{make}: The input argument \code{make}
#'
#' \item \bold{update}: The input argument \code{update}
#'
#' \item \bold{republish}: The input argument \code{republish}
#'
#' \item \bold{view}: The input argument \code{view}
#'
#' \item \bold{clean_fig_files}: The input argument \code{clean_fig_files}
#'
#' \item \bold{delete_cache}: The input argument \code{delete_cache}
#'
#' \item \bold{seed}: The input argument \code{seed}
#'
#' \item \bold{log_dir}: The directory where the log files were saved
#'
#' \item \bold{verbose}: The input argument \code{verbose}
#'
#' \item \bold{local}: The input argument \code{local}
#'
#' \item \bold{dry_run}: The input argument \code{dry_run}
#'
#' \item \bold{built}: The relative paths to the built R Markdown files
#'
#' \item \bold{html}: The relative paths to the corresponding HTML files
#'
#' }
#'
#' @seealso \code{\link{wflow_publish}}
#'
#' @examples
#' \dontrun{
#'
#' # Build any files which have been modified
#' wflow_build() # equivalent to wflow_build(make = TRUE)
#' # Build a single file
#' wflow_build("file.Rmd")
#' # Build multiple files
#' wflow_build(c("file1.Rmd", "file2.Rmd"))
#' # Build multiple files using a file glob
#' wflow_build("file*.Rmd")
#' # Build every published file
#' wflow_build(republish = TRUE)
#' # Build file.Rmd and any files which have been modified
#' wflow_build("file.Rmd", make = TRUE)
#' }
#'
#' @import rmarkdown
#' @export
wflow_build <- function(files = NULL, make = is.null(files),
update = FALSE, republish = FALSE,
combine = "or",
view = getOption("workflowr.view"),
clean_fig_files = FALSE, delete_cache = FALSE,
seed = 12345, log_dir = NULL, verbose = FALSE,
local = FALSE, dry_run = FALSE, project = ".") {
# Check input arguments ------------------------------------------------------
files <- process_input_files(files, allow_null = TRUE, rmd_only = TRUE,
convert_to_relative_paths = TRUE)
assert_is_flag(make)
assert_is_flag(update)
assert_is_flag(republish)
combine <- match.arg(combine, choices = c("or", "and"))
assert_is_flag(view)
assert_is_flag(clean_fig_files)
assert_is_flag(delete_cache)
if (!(is.numeric(seed) && length(seed) == 1))
stop("seed must be a one element numeric vector")
log_dir <- process_log_dir(log_dir)
assert_is_flag(verbose)
assert_is_flag(local)
assert_is_flag(dry_run)
check_wd_exists()
assert_is_single_directory(project)
project <- absolute(project)
do.call(wflow_build_, args = as.list(environment()))
}
wflow_build_ <- function() {}
formals(wflow_build_) <- formals(wflow_build)
body(wflow_build_) <- quote({
# Check to see if pandoc is installed
if(!rmarkdown::pandoc_available())
stop("Pandoc is not installed. Please use RStudio or install pandoc manually")
if (isTRUE(getOption("workflowr.autosave"))) autosave()
# Obtain files to consider ---------------------------------------------------
p <- wflow_paths(project = project)
# All files to consider (ignore files that start with an underscore)
files_all <- list.files(path = p$analysis, pattern = "^[^_].*\\.[Rr]md$",
full.names = TRUE)
files_all <- relative(files_all)
# All files must be in the analysis subdirectory. Since it's possible to
# directly pass a file that starts with an underscore, the file may not be in
# files_all.
if (!is.null(files)) {
if (!all(dirname(files) == p$analysis)) {
stop(wrap(
"Only files in the analysis directory can be built with wflow_build. Use
`rmarkdown::render` to build non-workflowr files."))
}
}
# Check for site generator ---------------------------------------------------
index <- file.path(p$analysis, "index.Rmd")
if (!check_site_generator(index)) {
m <-
"
Missing workflowr-specific site generator. To ensure your workflowr
project functions properly, please add the following to the YAML header of
index.Rmd:
site: workflowr::wflow_site
"
warning(wrap(m), call. = FALSE, immediate. = TRUE)
}
# Determine which files to build ---------------------------------------------
# Based on input argument `combine`, decide how to combine the files
# specified by different arguments:
#
# "or" (default): Combine files with `union()`, i.e. build all files specified
# by any of the arguments.
#
# "and": Combine files with `intersect()`, i.e. only build files that are specified
# by all of the arguments.
files_to_build <- files
if (combine == "and" && length(files_to_build) == 0) {
stop("combine = \"and\" can only be used when explicitly specifying Rmd files to build with the argument `files`")
}
if (combine == "and") {
combine_files_function <- intersect
} else if (combine == "or") {
combine_files_function <- union
}
if (make) {
files_make <- return_modified_rmd(files_all, p$docs)
files_to_build <- combine_files_function(files_to_build, files_make)
}
if (update || republish) {
s <- wflow_status(project = project)
if (update) {
files_update <- rownames(s$status)[s$status$mod_committed &
!s$status$mod_unstaged &
!s$status$mod_staged]
files_to_build <- combine_files_function(files_to_build, files_update)
}
if (republish) {
files_republish <- rownames(s$status)[s$status$published &
!s$status$mod_unstaged &
!s$status$mod_staged]
files_to_build <- combine_files_function(files_to_build, files_republish)
}
}
# Build files ----------------------------------------------------------------
if (!dry_run) {
n_files <- length(files_to_build)
if (n_files > 0) {
wd <- getwd()
message(sprintf("Current working directory: %s", wd))
message(sprintf("Building %i file(s):", n_files))
}
for (f in files_to_build) {
# Determine knit directory
wflow_opts <- wflow_options(f)
if (wflow_opts$knit_root_dir != wd) {
message(sprintf("Building %s in %s", f, wflow_opts$knit_root_dir))
} else {
message("Building ", f)
}
# Remove figure files to prevent accumulating outdated files
if (clean_fig_files) {
path <- create_figure_path(f)
figs_analysis <- file.path(p$analysis, path)
unlink(figs_analysis, recursive = TRUE)
figs_docs <- file.path(p$docs, path)
unlink(figs_docs, recursive = TRUE)
}
# Delete the cache directory
dir_cache <- fs::path_ext_remove(f)
dir_cache <- glue::glue("{dir_cache}_cache")
if (fs::dir_exists(dir_cache)) {
if (delete_cache) {
wflow_delete(dir_cache)
message(" - Note: Deleted the cache directory before building")
} else {
message(" - Note: This file has a cache directory")
}
}
if (local) {
build_rmd(f, seed = seed, envir = .GlobalEnv)
} else {
build_rmd_external(f, seed = seed, log_dir = log_dir, verbose = verbose,
envir = .GlobalEnv)
}
}
# Create .nojekyll if it doesn't exist
nojekyll <- file.path(p$docs, ".nojekyll")
if (!fs::file_exists(nojekyll)) {
fs::file_create(nojekyll)
}
}
# View files -----------------------------------------------------------------
# When 0 files are built, wflow_build() will do nothing.
#
# When 1 file is built, wflow_build() will open that file.
#
# When 1+ files are built, wflow_build() will open index.html.
if (!dry_run && view) {
n_files_to_build <- length(files_to_build)
if (n_files_to_build == 1) {
wflow_view(files_to_build, project = project)
} else if (n_files_to_build > 1) {
index <- file.path(p$analysis, "index.Rmd")
wflow_view(index, project = project)
}
}
# Prepare output -------------------------------------------------------------
o <- list(files = files, make = make,
update = update, republish = republish, combine = combine, view = view,
clean_fig_files = clean_fig_files, delete_cache = delete_cache,
seed = seed, log_dir = log_dir, verbose = verbose,
local = local, dry_run = dry_run,
built = files_to_build,
html = to_html(files_to_build, outdir = p$docs))
class(o) <- "wflow_build"
return(o)
})
#' @export
print.wflow_build <- function(x, ...) {
cat("Summary from wflow_build\n\n")
cat("Settings:\n")
if (x$make) cat(" make: TRUE")
if (x$update) cat(" update: TRUE")
if (x$republish) cat(" republish: TRUE")
cat(sprintf(" combine: \"%s\"", x$combine))
if (x$clean_fig_files) cat(" clean_fig_files: TRUE")
if (x$delete_cache) cat(" delete_cache: TRUE")
if (x$verbose) cat(" verbose: TRUE")
if (x$local) cat(" local: TRUE")
if (x$dry_run) cat(" dry_run: TRUE")
cat("\n\n")
if (length(x$built) == 0) {
cat(wrap("No files to build"))
cat("\n")
return(invisible(x))
}
if (x$dry_run & x$local) {
cat(wrap("The following would be built locally in the current R session:"),
"\n\n")
} else if (!x$dry_run & x$local) {
cat(wrap("The following were built locally in the current R session:"),
"\n\n")
} else if (x$dry_run & !x$local) {
cat(wrap("The following would be built externally each in their own fresh R session:"),
"\n\n")
} else if (!x$dry_run & !x$local) {
cat(wrap("The following were built externally each in their own fresh R session:"),
"\n\n")
}
cat(x$html, sep = "\n")
if (!x$dry_run & !x$local) {
cat("\n")
cat(wrap(sprintf("Log files saved in %s", x$log_dir)))
cat("\n")
}
return(invisible(x))
}
# Return the R Markdown files which have been modified more recently than their
# corresponding HTML files.
#
# Input: character. path to R Markdown files in analysis directory
return_modified_rmd <- function(rmd_files, docs) {
# Expected html files
html_files <- to_html(rmd_files, outdir = docs)
# Determine which R Markdown files have been updated and need to be rendered
files_to_update <- character()
for (i in seq_along(rmd_files)) {
rmd_timestamp <- file.mtime(rmd_files[i])
html_timestamp <- file.mtime(html_files[i])
if (is.na(html_timestamp)) {
files_to_update <- c(files_to_update, rmd_files[i])
} else if (rmd_timestamp > html_timestamp) {
files_to_update <- c(files_to_update, rmd_files[i])
}
}
return(files_to_update)
}
build_rmd_external <- function(rmd, seed, log_dir, verbose = FALSE, ...) {
if (!(is.character(rmd) && length(rmd) == 1))
stop("rmd must be a one element character vector")
if (!fs::file_exists(rmd))
stop("rmd does not exist: ", rmd)
if (!(is.numeric(seed) && length(seed) == 1))
stop("seed must be a one element numeric vector")
if (!(is.character(log_dir) && length(log_dir) == 1))
stop("log_dir must be a one element character vector")
if (!fs::dir_exists(log_dir)) {
fs::dir_create(log_dir)
message("log directory created: ", log_dir)
}
rmd_base <- basename(rmd)
date_current <- format(Sys.time(), "%Y-%m-%d-%Hh-%Mm-%Ss")
stdout_file <- file.path(log_dir,
paste(rmd_base, date_current, "out.txt", sep = "-"))
stderr_file <- file.path(log_dir,
paste(rmd_base, date_current, "err.txt", sep = "-"))
result <- tryCatch(
callr::r_safe(build_rmd,
args = list(rmd, seed, ...),
stdout = stdout_file,
stderr = stderr_file,
show = verbose),
error = function(e) {
message(wrap("Build failed. See log files for full details."))
message("stdout: ", stdout_file)
message("stderr: ", stderr_file)
stdout_lines <- readLines(stdout_file)
n <- length(stdout_lines)
# Print the final 10 lines of standard out to give context to error
cat(stdout_lines[pmax(1, n - 10):n], sep = "\n")
stop(conditionMessage(e), call. = FALSE)
}
)
return(invisible(rmd))
}
build_rmd <- function(rmd, seed, ...) {
if (!(is.character(rmd) && length(rmd) == 1))
stop("rmd must be a one element character vector")
if (!(is.numeric(seed) && length(seed) == 1))
stop("seed must be a one element numeric vector")
if (!fs::file_exists(rmd))
stop("rmd must exist")
set.seed(seed)
rmarkdown::render_site(rmd, ...)
}
process_log_dir <- function(log_dir) {
if (is.null(log_dir)) {
log_dir <-use_default_log_dir()
}
assert_is_character(log_dir)
assert_has_length(log_dir, 1)
log_dir <- absolute(log_dir)
fs::dir_create(log_dir)
return(log_dir)
}
use_default_log_dir <- function() file.path(tempdir(), "workflowr")
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/wflow_build.R
|
#' Commit files
#'
#' \code{wflow_git_commit} adds and commits files with Git. This is a convenience
#' function to run Git commands from the R console instead of the shell. For
#' most use cases, you should use \code{\link{wflow_publish}} instead, which
#' calls \code{wflow_git_commit} and then subsequently also builds and commits the
#' website files.
#'
#' Some potential use cases for \code{wflow_git_commit}:
#'
#' \itemize{
#'
#' \item Commit drafts which you do not yet want to be included in the website
#'
#' \item Commit files which do not directly affect the website (e.g. when you
#' are writing scripts for a data processing pipeline)
#'
#' \item Manually commit files in \code{docs/} (proceed with caution!). This
#' should only be done for content that is not automatically generated from the
#' source files in the analysis directory, e.g. an image file you want to
#' include in one of your pages.
#'
#' }
#'
#' Under the hood, \code{wflow_git_commit} is a wrapper for \code{\link[git2r]{add}}
#' and \code{\link[git2r]{commit}} from the package \link{git2r}.
#'
#' @param files character (default: NULL). Files to be added and committed with
#' Git. Supports file
#' \href{https://en.wikipedia.org/wiki/Glob_(programming)}{globbing}.
#' @param message character (default: NULL). A commit message.
#' @param all logical (default: FALSE). Automatically stage files that have been
#' modified and deleted. Equivalent to: \code{git commit -a}
#' @param force logical (default: FALSE). Allow adding otherwise ignored files.
#' Equivalent to: \code{git add -f}
#' @param dry_run logical (default: FALSE). Preview the proposed action but do
#' not actually add or commit any files.
#' @param project character (default: ".") By default the function assumes the
#' current working directory is within the project. If this is not true,
#' you'll need to provide the path to the project directory.
#'
#' @return An object of class \code{wflow_git_commit}, which is a list with the
#' following elements:
#'
#' \itemize{
#'
#' \item \bold{files}: The input argument \code{files}.
#'
#' \item \bold{message}: The message describing the commit.
#'
#' \item \bold{all}: The input argument \code{all}.
#'
#' \item \bold{force}: The input argument \code{force}.
#'
#' \item \bold{dry_run}: The input argument \code{dry_run}.
#'
#' \item \bold{commit}: The object returned by
#' \link{git2r}::\code{\link[git2r]{commit}} (only included if \code{dry_run ==
#' FALSE}).
#'
#' \item \bold{commit_files}: The relative path(s) to the file(s) included in
#' the commit (only included if \code{dry_run == FALSE}).
#'
#' }
#'
#' @seealso \code{\link{wflow_publish}}
#'
#' @examples
#' \dontrun{
#'
#' # Commit a single file
#' wflow_git_commit("analysis/file.Rmd", "Add new analysis")
#' # Commit multiple files
#' wflow_git_commit(c("code/process-data.sh", "output/small-data.txt"),
#' "Process data set")
#' # Add and commit all tracked files, similar to `git commit -a`
#' wflow_git_commit(message = "Lots of changes", all = TRUE)
#' }
#'
#' @export
wflow_git_commit <- function(files = NULL, message = NULL, all = FALSE,
force = FALSE, dry_run = FALSE, project = ".") {
files <- process_input_files(files, allow_null = TRUE, files_only = FALSE,
convert_to_relative_paths = TRUE)
if (is.null(message)) {
message <- deparse(sys.call())
message <- paste(message, collapse = "\n")
} else if (is.character(message)) {
message <- create_newlines(message)
} else {
stop("message must be NULL or a character vector")
}
assert_is_flag(all)
assert_is_flag(force)
assert_is_flag(dry_run)
check_wd_exists()
assert_is_single_directory(project)
project <- absolute(project)
# Fail early if no Git repository
if (!git2r::in_repository(project)) {
stop("No Git repository detected.")
}
if (!dry_run) check_git_config(project, "`wflow_git_commit`")
if (is.null(files) && !all)
stop("Must specify files to commit, set `all = TRUE`, or both",
call. = FALSE)
# Additional checks of files to be committed
if (!is.null(files)) {
check_files_in_git_repo(files)
check_file_sizes(files)
}
do.call(wflow_git_commit_, args = as.list(environment()))
}
# Internal function that performs add/commit. Called by wflow_git_commit.
#
# The primary motivation for having a separate internal function that is called
# by the user facing `wflow_git_commit` is so that `wflow_publish` can bypass the
# input checks in order to run Step 3 to publish the website files. In a dry
# run, some of the files may not yet be built (which would cause an error).
# Also, not every Rmd file will create output figures, but it's easier to just
# attempt to add figures for every file.
wflow_git_commit_ <- function() {}
formals(wflow_git_commit_) <- formals(wflow_git_commit)
body(wflow_git_commit_) <- quote({
# Establish connection to Git repository
r <- git2r::repository(path = project)
# Files cannot have merge conflicts
s <- git2r::status(r, ignored = TRUE)
s_df <- status_to_df(s)
# Fix file paths
s_df$file <- file.path(git2r::workdir(r), s_df$file)
s_df$file <- relative(s_df$file)
f_conflicted <- s_df$file[s_df$substatus == "conflicted"]
if (length(f_conflicted) > 0) {
stop(call. = FALSE, wrap(
"Cannot proceed due to merge conflicts in the following file(s):"
), "\n\n", paste(f_conflicted, collapse = "\n"))
}
if (!dry_run) {
# Add the specified files
if (!is.null(files)) {
git2r_add(r, files, force = force)
}
if (all) {
# Temporary fix until git2r::commit can do `git commit -a`
# https://github.com/ropensci/git2r/pull/283
#
# The above was merged, but `all = TRUE` is still unreliable. Just found a
# bug that affects Ubuntu and Windows, but not macOS. Manually adding all
# unstaged changes.
unstaged <- unlist(git2r::status(r)$unstaged)
unstaged <- file.path(git2r::workdir(r), unstaged)
git2r_add(r, unstaged)
}
# Commit
tryCatch(
commit <- git2r::commit(r, message = message, all = all),
error = function(e) {
if (stringr::str_detect(conditionMessage(e), "Nothing added to commit")) {
reason <- "Commit failed because no files were added."
if (!is.null(files)) {
reason <- c(reason, " Attempted to commit the following files:\n",
paste(absolute(files), collapse = "\n"))
}
} else {
reason <- "Commit failed for unknown reason."
}
stop(wrap(reason, "\n\nAny untracked files must manually specified even if
`all = TRUE`."), call. = FALSE)
}
)
}
o <- list(files = files, message = message, all = all, force = force,
dry_run = dry_run)
class(o) <- "wflow_git_commit"
if (!dry_run) {
commit_files <- obtain_files_in_commit(r, commit)
o$commit <- commit
o$commit_files <- relative(commit_files)
}
return(o)
})
#' @export
print.wflow_git_commit <- function(x, ...) {
cat("Summary from wflow_git_commit\n\n")
if (x$dry_run) {
cat(wrap("The following would be attempted:"), "\n\n")
} else {
cat(wrap("The following was run:"), "\n\n")
}
if (!is.null(x$files)) {
if (x$force) {
cat(" $ git add -f", x$files, "\n")
} else {
cat(" $ git add", x$files, "\n")
}
}
if (x$all) {
cat(" $ git commit -a -m", deparse(x$message), "\n")
} else {
cat(" $ git commit -m", deparse(x$message), "\n")
}
if (!x$dry_run) {
cat(sep = "", "\n",
wrap("The following file(s) were included in commit ",
stringr::str_sub(x$commit$sha, start = 1, end = 7)),
":\n")
cat(shorten_site_libs(x$commit_files), sep = "\n")
}
return(invisible(x))
}
# When printing the files included in a commit, only list the first subdirectory
# within `site_libs/`.
shorten_site_libs <- function(files) {
is_site_libs <- stringr::str_detect(files, "site_libs")
f_split <- stringr::str_split(files, .Platform$file.sep,
simplify = TRUE)
out <- files
for (i in seq_along(files)) {
if (is_site_libs[i]) {
col_site_libs <- which(f_split[i, ] == "site_libs")
out[i] <- paste(f_split[i, seq(col_site_libs + 1)],
collapse = .Platform$file.sep)
# Add final / so that it is clear it's a directory
if (fs::dir_exists(out[i])) {
out[i] <- paste0(out[i], .Platform$file.sep)
}
}
}
return(unique(out))
}
check_files_in_git_repo <- function(files) {
if (!all(sapply(files, git2r::in_repository)))
stop("Not all files are inside the Git repository")
}
# Files cannot be larger than 100MB
check_file_sizes <- function(files) {
sizes <- file.size(files) / 10^6
if (any(sizes >= 100))
stop(wrap(
"All files to be committed must be less than 100 MB. This is the max
file size able to be pushed to GitHub, and is in general a good practice
to follow no matter what Git hosting service you are using. Large files
will make each push and pull take much longer and increase the risk of
the download timing out. Run Git directly in the Terminal if you really
want to commit these files."
))
}
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/wflow_git_commit.R
|
#' Configure Git settings
#'
#' \code{wflow_git_config} configures the global Git settings on the current
#' machine. This is a convenience function to run Git commands from the R
#' console instead of the Terminal. The same functionality can be achieved by
#' running \code{git config} in the Terminal.
#'
#' The main purpose of \code{wflow_git_config} is to set the user.name and
#' user.email to use with Git commits. Note that these do not need to match the
#' name and email you used to register your online account with a Git hosting
#' service (e.g. GitHub or GitLab). However, it can also handle arbitrary Git
#' settings (see examples below).
#'
#' There are two main limitations of \code{wflow_git_config} for the sake of
#' simplicity. First, \code{wflow_git_config} only affects the global Git
#' settings that apply to all Git repositories on the local machine and is
#' unable to configure settings for one specific Git repository. Second,
#' \code{wflow_git_config} can only add or change the user.name and user.email
#' settings, but not delete them. To perform either of these actions, please use
#' \code{git config} in the Terminal.
#'
#' Under the hood, \code{wflow_git_config} is a wrapper for
#' \code{\link[git2r]{config}} from the package \link{git2r}.
#'
#' To learn more about how to configure Git, see the Software Carpentry lesson
#' \href{https://swcarpentry.github.io/git-novice/02-setup.html}{Setting Up Git}.
#'
#' @param user.name character (default: NULL). Git user name. Git assigns an
#' author when committing (i.e. saving) changes. If you have never used Git
#' before on your computer, make sure to set this.
#'
#' @param user.email character (default: NULL). Git user email. Git assigns an
#' email when committing (i.e. saving) changes. If you have never used Git
#' before on your computer, make sure to set this.
#'
#' @param overwrite logical (default: FALSE). Overwrite existing Git global
#' settings.
#'
#' @param ... Arbitrary Git settings, e.g. \code{core.editor = "nano"}.
#'
#' @return An object of class \code{wflow_git_config}, which is a list with the
#' following elements:
#'
#' \itemize{
#'
#' \item \bold{user.name}: The current global Git user.name
#'
#' \item \bold{user.email}: The current global Git user.email
#'
#' \item \bold{all_settings}: A list of all current global Git settings
#'
#' }
#'
#' @examples
#' \dontrun{
#'
#' # View current Git settings
#' wflow_git_config()
#' # Set user.name and user.email
#' wflow_git_config(user.name = "A Name", user.email = "email@domain")
#' # Set core.editor (the text editor that Git opens to write commit messages)
#' wflow_git_config(core.editor = "nano")
#'
#' }
#'
#' @export
wflow_git_config <- function(user.name = NULL, user.email = NULL, ...,
overwrite = FALSE) {
# Check input arguments ------------------------------------------------------
if (!is.null(user.name)) {
assert_is_character(user.name)
assert_has_length(user.name, 1)
}
if (!is.null(user.email)) {
assert_is_character(user.email)
assert_has_length(user.email, 1)
}
assert_is_flag(overwrite)
# Create .gitconfig on Windows -----------------------------------------------
# If ~/.gitconfig does not exist, it is created in ~/Documents on Windows. If
# the user has supplied values to be set and the file doesn't exist, create an
# empty .gitconfig in ~ first.
if (.Platform$OS.type == "windows") {
values_to_be_set <- !is.null(user.name) || !is.null(user.email) ||
length(list(...)) > 0
if (values_to_be_set) {
# Can't use ~ because the default on Windows is the user's Documents
# directory.
# https://cran.r-project.org/bin/windows/base/rw-FAQ.html#What-are-HOME-and-working-directories_003f
user_home <- get_home()
config_file <- file.path(user_home, ".gitconfig")
if (!fs::file_exists(config_file)) {
fs::file_create(config_file)
}
}
}
# Check for existing settings ------------------------------------------------
existing <- git2r::config(global = TRUE)$global
variables_to_set <- c(user.name = user.name, user.email = user.email, list(...))
to_be_overwritten <- intersect(names(existing), names(variables_to_set))
if (length(to_be_overwritten) > 0) {
message("Current settings:")
for (var in to_be_overwritten) {
message(glue::glue(" {var}: {existing[[var]]}"))
}
if (overwrite) {
message("The settings above will be overwritten.")
} else {
stop("Some settings already exist. Set overwrite=TRUE to overwrite them.",
call. = FALSE)
}
}
# Configure ------------------------------------------------------------------
# user.name
if (!is.null(user.name)) {
git2r::config(global = TRUE, user.name = user.name)
}
# user.email
if (!is.null(user.email)) {
git2r::config(global = TRUE, user.email = user.email)
}
# Other settings
other <- list(...)
if (length(other) > 0) {
git2r::config(global = TRUE, ...)
}
# Prepare output -------------------------------------------------------------
git_config <- git2r::config(global = TRUE)
o <- list(user.name = git_config$global$user.name,
user.email = git_config$global$user.email,
all_settings = git_config$global)
class(o) <- "wflow_git_config"
return(o)
}
#' @export
print.wflow_git_config <- function(x, ...) {
if (is.null(x$user.name)) {
cat("Current Git user.name needs to be configured\n")
} else {
cat(sprintf("Current Git user.name:\t%s\n", x$user.name))
}
if (is.null(x$user.email)) {
cat("Current Git user.email needs to be configured\n")
} else {
cat(sprintf("Current Git user.email:\t%s\n", x$user.email))
}
other_settings <- x$all_settings
other_settings[["user.name"]] <- NULL
other_settings[["user.email"]] <- NULL
settings_names <- names(other_settings)
if (length(x) > 0) {
cat("Other Git settings:\n")
for (i in seq_along(other_settings)) {
cat(sprintf("\t%s:\t%s\n", settings_names[i], other_settings[[i]]))
}
}
cat("\n")
return(invisible(x))
}
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/wflow_git_config.R
|
#' Pull files from remote repository
#'
#' \code{wflow_git_pull} pulls the remote files from your remote repository
#' online (e.g. GitHub or GitLab) into your repository on your local machine.
#' This is a convenience function to run Git commands from the R console instead
#' of the Terminal. The same functionality can be achieved by running \code{git
#' pull} in the Terminal.
#'
#' \code{wflow_git_pull} tries to choose sensible defaults if the user does not
#' explicitly specify the remote repository and/or the remote branch:
#'
#' \itemize{
#'
#' \item If both \code{remote} and \code{branch} are \code{NULL},
#' \code{wflow_git_pull} checks to see if the current local branch is tracking a
#' remote branch. If yes, it pulls to this tracked remote branch.
#'
#' \item If the argument \code{remote} is left as \code{NULL} and there is only
#' one remote, it is used. If there is more than one remote, the one named
#' "origin" is used.
#'
#' \item If the argument \code{branch} is left as \code{NULL}, the
#' name of the current local branch is used (referred to as \code{HEAD} by Git).
#'
#' }
#'
#' Under the hood, \code{wflow_git_pull} is a wrapper for
#' \code{\link[git2r]{pull}} from the package \link{git2r}.
#'
#' @param remote character (default: NULL). The name of the remote repository.
#' See Details for the default behavior.
#' @param branch character (default: NULL). The name of the branch in the remote
#' repository to pull from. If \code{NULL}, the name of the current local
#' branch is used.
#' @param username character (default: NULL). Username for online Git hosting
#' service (e.g. GitHub or GitLab). The user is prompted if necessary.
#' @param password character (default: NULL). Password for online Git hosting
#' service (e.g. GitHub or GitLab). The user is prompted if necessary.
#' @param fail logical (default: TRUE) Abort the pull if any merge conflicts
#' are detected. If you are sure you want to manually cleanup the merge
#' conflicts, set \code{fail = FALSE}. The argument \code{fail} is passed to
#' the git2r function \code{\link[git2r:reexports]{merge.git_repository}}.
#' @param dry_run logical (default: FALSE). Preview the proposed action but do
#' not actually pull from the remote repository.
#' @param project character (default: ".") By default the function assumes the
#' current working directory is within the project. If this is not true,
#' you'll need to provide the path to the project directory.
#'
#' @return An object of class \code{wflow_git_pull}, which is a list with the
#' following elements:
#'
#' \itemize{
#'
#' \item \bold{remote}: The remote repository.
#'
#' \item \bold{branch}: The branch of the remote repository.
#'
#' \item \bold{username}: Username for online Git hosting service (e.g. GitHub
#' or GitLab).
#'
#' \item \bold{merge_result}: The \code{git_merge_result} object returned by
#' \link{git2r} (only included if \code{dry_run == FALSE}).
#'
#' \item \bold{fail}: The input argument \code{fail}.
#'
#' \item \bold{dry_run}: The input argument \code{dry_run}.
#'
#' \item \bold{protocol}: The authentication protocol for the remote repository
#' (either \code{"https"} or \code{"ssh"}.
#'
#' \item \bold{project}: The input argument \code{project}.
#'
#' }
#'
#' @examples
#' \dontrun{
#'
#' # Pull from remote repository
#' wflow_git_pull()
#' # Preview by running in dry run mode
#' wflow_git_pull(dry_run = TRUE)
#' }
#'
#' @export
wflow_git_pull <- function(remote = NULL, branch = NULL, username = NULL,
password = NULL, fail = TRUE, dry_run = FALSE,
project = ".") {
# Check input arguments ------------------------------------------------------
if (!(is.null(remote) || (is.character(remote) && length(remote) == 1)))
stop("remote must be NULL or a one-element character vector")
if (!(is.null(branch) || (is.character(branch) && length(branch) == 1)))
stop("branch must be NULL or a one-element character vector")
if (!(is.null(username) || (is.character(username) && length(username) == 1)))
stop("username must be NULL or a one-element character vector")
if (!(is.null(password) || (is.character(password) && length(password) == 1)))
stop("password must be NULL or a one-element character vector")
assert_is_flag(fail)
assert_is_flag(dry_run)
check_wd_exists()
assert_is_single_directory(project)
project <- absolute(project)
# Assess status of repository ------------------------------------------------
# Must be using Git
p <- wflow_paths(error_git = TRUE, project = project)
r <- git2r::repository(path = p$git)
git_head <- git2r::repository_head(r)
remote_avail <- wflow_git_remote(verbose = FALSE, project = project)
# Fail early if HEAD does not point to a branch
check_branch(git_head)
# Fail early if remote not specified properly
check_remote(remote = remote, remote_avail = remote_avail)
# Determine remote and branch ------------------------------------------------
remote_and_branch <- determine_remote_and_branch(r, remote, branch)
remote <- remote_and_branch$remote
branch <- remote_and_branch$branch
# Send warning if the remote branch is not the same one as local branch (HEAD)
warn_branch_mismatch(remote_branch = branch,
local_branch = git_head$name)
# Determine protocol ---------------------------------------------------------
protocol <- get_remote_protocol(remote = remote, remote_avail = remote_avail)
if (protocol == "ssh" && !git2r::libgit2_features()$ssh) {
stop(wrap(
"You cannot use the SSH protocol for authentication on this machine because
git2r/libgit2 was not built with SSH support. You can either switch to
using the HTTPS protocol for authentication (see ?wflow_git_remote) or
re-install git2r after installing libSSH2."),
"\n\nFrom the git2r documentation:\n\n",
"To build with SSH support, please install:\n",
" libssh2-1-dev (package on e.g. Debian and Ubuntu)\n",
" libssh2-devel (package on e.g. Fedora, CentOS and RHEL)\n",
" libssh2 (Homebrew package on OS X)"
, call. = FALSE)
}
# Obtain authentication ------------------------------------------------------
credentials <- authenticate_git(protocol = protocol,
username = username, password = password,
dry_run = dry_run)
# Pull! ----------------------------------------------------------------------
# Do the pull in 2 steps: fetch+merge, b/c git2r::pull only allows pulling
# from the tracked branch.
git_alternative <- glue::glue("
Alternatively, if you have Git installed on your machine, the easiest
solution is to instead run `git pull` in the terminal. This is equivalent
to wflow_git_pull(). Specifically, copy-paste the following in the
terminal:
git pull {remote} {branch}
")
if (!dry_run) {
tryCatch(git2r::fetch(r, name = remote,
refspec = paste0("refs/heads/", branch),
credentials = credentials),
error = function(e) {
if (protocol == "ssh" &&
stringr::str_detect(conditionMessage(e), "unsupported URL protocol")) {
reason <-
"workflowr was unable to use your SSH keys because your
computer does not have the required software installed. If
you want to be able to pull directly from R, re-install the
package git2r and follow its advice for how to enable SSH
for your operating system."
reason <- c(reason, "\n\n", git_alternative)
} else if (protocol == "ssh" &&
stringr::str_detect(conditionMessage(e), "Failed to authenticate SSH session")) {
reason <-
"workflowr was unable to use your SSH keys because it has a
passphrase. You'll need to activate ssh-agent and add your
keys."
reason <- c(reason, "\n\n", git_alternative)
} else {
reason <- c("Pull failed for unknown reason.",
"\n\nThe error message from git2r::pull() was:\n\n",
conditionMessage(e),
"\n\nThese sorts of errors are difficult to
troubleshoot. You can search for similar errors
on the git2r GitHub repository for advice on how
to fix it.")
reason <- c(reason, "\n\n", git_alternative)
}
stop(wrap(reason), call. = FALSE)
}
)
merge_result <- git2r_merge(r, paste(remote, branch, sep = "/"), fail = fail)
} else {
merge_result <- NULL
}
# Prepare output -------------------------------------------------------------
o <- list(remote = remote, branch = branch, username = username,
merge_result = merge_result, fail = fail, dry_run = dry_run,
protocol = protocol, project = project)
class(o) <- "wflow_git_pull"
return(o)
}
#' @export
print.wflow_git_pull <- function(x, ...) {
cat("Summary from wflow_git_pull\n\n")
cat(wrap(sprintf(
"Pulling from the branch \"%s\" of the remote repository \"%s\"",
x$branch, x$remote)), "\n\n")
cat(glue::glue("Using the {toupper(x$protocol)} protocol\n\n"))
if (x$dry_run) {
cat("The following Git command would be run:\n\n")
} else {
cat("The following Git command was run:\n\n")
}
git_cmd <- " $ git pull"
git_cmd <- paste(git_cmd, x$remote, x$branch)
cat(git_cmd)
cat("\n")
# Note: Use "exit early" strategy instead of nested if-else clauses
if (is.null(x$merge_result)) return(invisible(x))
if (x$merge_result$up_to_date) {
m <- "No changes were made because your local and remote repositories are in sync."
cat("\n", wrap(m), "\n", sep = "")
cat("\n")
return(invisible(x))
}
if (x$merge_result$fast_forward) {
m <- "The latest changes in the remote repository were successfully pulled
into your local repository (fast-forward merge)."
cat("\n", wrap(m), "\n", sep = "")
cat("\n")
return(invisible(x))
}
if (!is.na(x$merge_result$sha)) {
m <- sprintf(
"The latest changes in the remote repository were successfully pulled
into your local repository. To combine the changes that differed
between the two repositories, the merge commit %s was created.",
x$merge_result$sha)
cat("\n", wrap(m), "\n", sep = "")
cat("\n")
return(invisible(x))
}
# At this point, there must have been a merge conflict of some sort
m <- "There were conflicts that Git could not resolve automatically when
trying to pull changes from the remote repository."
cat("\n", wrap(m), "\n", sep = "")
cat("\n")
if (x$fail) {
m <- "No changes were made to your files because workflowr aborted the pull.
Try cleaning up your files by committing the changes you want and
discarding those you don't. To allow workflowr to proceed with the pull
and potentially generate merge conflicts, re-run wflow_git_pull() with
the argument fail=FALSE."
cat("\n", wrap(m), "\n", sep = "")
cat("\n")
return(invisible(x))
}
# Merge conflicts from committed changes. Merge conflicts are now unstaged changes
if (x$merge_result$conflicts) {
conflicted_files <- get_conflicted_files(x$project)
cat("\nThe following file(s) contain conflicts:\n")
cat(conflicted_files, sep = "\n")
if (interactive() && rstudioapi::isAvailable(version_needed = "0.99.719")) {
ans <- ""
while(!tolower(ans) %in% c("y", "n")) {
ans <- readline("Do you want workflowr to open the conflicting files in RStudio? (y/n) ")
}
if (tolower(ans) == "y") {
conflicted_lines <- get_conflicted_lines(conflicted_files)
open_files_rstudio(conflicted_files, conflicted_lines)
}
}
m <- "You will need to use Git from the Terminal to resolve these conflicts
manually. Run `git status` in the Terminal to get started."
cat("\n", wrap(m), "\n", sep = "")
cat("\n")
return(invisible(x))
}
# Merge conflicts from unstaged or staged changes. Need to clean up repo first.
m <- "The pull **failed** because you have made local changes to your files
that would be overwritten by pulling the latest versions of the files.
You need to first commit or discard these changes and then pull
again."
cat("\n", wrap(m), "\n", sep = "")
cat("\n")
return(invisible(x))
}
# Return conflicted files in a Git repository
get_conflicted_files <- function(path) {
r <- git2r::repository(path)
s <- git2r::status(r)
s_df <- status_to_df(s)
conflicted <- s_df[s_df$substatus == "conflicted", "file"]
if (length(conflicted) == 0) return(NA)
conflicted <- file.path(git2r::workdir(r), conflicted)
return(conflicted)
}
get_conflicted_lines <- function(files) {
list_of_lines <- Map(readLines, files)
conflicted_lines <- Map(find_conflicted_line, list_of_lines)
conflicted_lines <- unlist(conflicted_lines)
return(conflicted_lines)
}
find_conflicted_line <- function(lines) {
stringr::str_which(lines, "^<<<")[1]
}
open_files_rstudio <- function(files, lines = -1L) {
mapply(rstudioapi::navigateToFile, file = files, line = lines)
return(invisible(files))
}
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/wflow_git_pull.R
|
#' Push files to remote repository
#'
#' \code{wflow_git_push} pushes the local files on your machine to your remote
#' repository on a remote Git hosting service (e.g. GitHub or GitLab). This is a
#' convenience function to run Git commands from the R console instead of the
#' Terminal. The same functionality can be achieved by running \code{git push}
#' in the Terminal.
#'
#' \code{wflow_git_push} tries to choose sensible defaults if the user does not
#' explicitly specify the remote repository and/or the remote branch:
#'
#' \itemize{
#'
#' \item If both \code{remote} and \code{branch} are \code{NULL},
#' \code{wflow_git_push} checks to see if the current local branch is tracking a
#' remote branch. If yes, it pushes to this tracked remote branch.
#'
#' \item If the argument \code{remote} is left as \code{NULL} and there is only
#' one remote, it is used. If there is more than one remote, the one named
#' "origin" is used.
#'
#' \item If the argument \code{branch} is left as \code{NULL}, the
#' name of the current local branch is used (referred to as \code{HEAD} by Git).
#'
#' }
#'
#' Under the hood, \code{wflow_git_push} is a wrapper for \code{\link[git2r]{push}}
#' from the package \link{git2r}.
#'
#' @param remote character (default: NULL). The name of the remote repository.
#' See Details for the default behavior.
#' @param branch character (default: NULL). The name of the branch to push to in
#' the remote repository. If \code{NULL}, the name of the current local branch
#' is used.
#' @param username character (default: NULL). Username for online Git hosting
#' service (e.g. GitHub or GitLab). The user is prompted if necessary.
#' @param password character (default: NULL). Password for online Git hosting
#' service (e.g. GitHub or GitLab). The user is prompted if necessary.
#' @param force logical (default: FALSE). Force the push to the remote
#' repository. Do not use this if you are not 100\% sure of what it is doing.
#' Equivalent to: \code{git push -f}
#' @param set_upstream logical (default: TRUE). Set the current local branch to
#' track the remote branch if it isn't already tracking one. This is likely
#' what you want. Equivalent to: \code{git push -u remote branch}
#' @param view logical (default: \code{getOption("workflowr.view")}). Open the
#' URL to the repository in the browser. Ignored if \code{dry_run = TRUE}.
#' Also note that this only works if the option \code{browser} is set, which
#' you can check with \code{getOption("browser")}.
#' @param dry_run logical (default: FALSE). Preview the proposed action but do
#' not actually push to the remote repository.
#' @param project character (default: ".") By default the function assumes the
#' current working directory is within the project. If this is not true,
#' you'll need to provide the path to the project directory.
#'
#' @return An object of class \code{wflow_git_push}, which is a list with the
#' following elements:
#'
#' \itemize{
#'
#' \item \bold{remote}: The remote repository.
#'
#' \item \bold{branch}: The branch of the remote repository.
#'
#' \item \bold{username}: Username for online Git hosting service (e.g. GitHub
#' or GitLab).
#'
#' \item \bold{force}: The input argument \code{force}.
#'
#' \item \bold{set_upstream}: The input argument \code{set_upstream}.
#'
#' \item \bold{view}: The input argument \code{view}.
#'
#' \item \bold{dry_run}: The input argument \code{dry_run}.
#'
#' \item \bold{protocol}: The authentication protocol for the remote repository
#' (either \code{"https"} or \code{"ssh"}.
#'
#' }
#'
#' @examples
#' \dontrun{
#'
#' # Push to remote repository
#' wflow_git_push()
#' # Preview by running in dry run mode
#' wflow_git_push(dry_run = TRUE)
#' }
#'
#' @export
wflow_git_push <- function(remote = NULL, branch = NULL, username = NULL,
password = NULL, force = FALSE, set_upstream = TRUE,
view = getOption("workflowr.view"), dry_run = FALSE,
project = ".") {
# Check input arguments ------------------------------------------------------
if (!(is.null(remote) || (is.character(remote) && length(remote) == 1)))
stop("remote must be NULL or a one-element character vector")
if (!(is.null(branch) || (is.character(branch) && length(branch) == 1)))
stop("branch must be NULL or a one-element character vector")
if (!(is.null(username) || (is.character(username) && length(username) == 1)))
stop("username must be NULL or a one-element character vector")
if (!(is.null(password) || (is.character(password) && length(password) == 1)))
stop("password must be NULL or a one-element character vector")
assert_is_flag(force)
assert_is_flag(set_upstream)
assert_is_flag(view)
assert_is_flag(dry_run)
check_wd_exists()
assert_is_single_directory(project)
project <- absolute(project)
# Assess status of repository ------------------------------------------------
# Must be using Git
p <- wflow_paths(error_git = TRUE, project = project)
r <- git2r::repository(path = p$git)
git_head <- git2r::repository_head(r)
remote_avail <- wflow_git_remote(verbose = FALSE, project = project)
# Fail early if HEAD does not point to a branch
check_branch(git_head)
# Fail early if remote not specified properly
check_remote(remote = remote, remote_avail = remote_avail)
# Determine remote and branch ------------------------------------------------
remote_and_branch <- determine_remote_and_branch(r, remote, branch)
remote <- remote_and_branch$remote
branch <- remote_and_branch$branch
# Send warning if the remote branch is not the same one as local branch (HEAD)
warn_branch_mismatch(remote_branch = branch,
local_branch = git_head$name)
# Determine protocol ---------------------------------------------------------
protocol <- get_remote_protocol(remote = remote, remote_avail = remote_avail)
if (protocol == "ssh" && !git2r::libgit2_features()$ssh) {
stop(wrap(
"You cannot use the SSH protocol for authentication on this machine because
git2r/libgit2 was not built with SSH support. You can either switch to
using the HTTPS protocol for authentication (see ?wflow_git_remote) or
re-install git2r after installing libSSH2."),
"\n\nFrom the git2r documentation:\n\n",
"To build with SSH support, please install:\n",
" libssh2-1-dev (package on e.g. Debian and Ubuntu)\n",
" libssh2-devel (package on e.g. Fedora, CentOS and RHEL)\n",
" libssh2 (Homebrew package on OS X)"
, call. = FALSE)
}
# Obtain authentication ------------------------------------------------------
credentials <- authenticate_git(protocol = protocol,
username = username, password = password,
dry_run = dry_run)
# Push! ----------------------------------------------------------------------
if (!dry_run) {
# First check for and execute any pre-push hooks. libgit2 does not support
# this. Only supported on unix-alike systems.
pre_push_file <- file.path(git2r::workdir(r), ".git/hooks/pre-push")
pre_push_file_rel <- fs::path_rel(pre_push_file, start = getwd())
if (fs::file_exists(pre_push_file) && .Platform$OS.type != "windows") {
message(glue::glue("Executing pre-push hook in {pre_push_file_rel}"))
hook <- suppressWarnings(system(pre_push_file, intern = TRUE))
message(hook)
if (attributes(hook)$status != 0) {
stop(glue::glue("Execution stopped by {pre_push_file_rel}"),
call. = FALSE)
}
}
git_alternative <- glue::glue("
Alternatively, if you have Git installed on your machine, the easiest
solution is to instead run `git push` in the terminal. This is
equivalent to wflow_git_push(). Specifically, copy-paste the following
in the terminal:
git push -u {remote} {branch}
")
tryCatch(git2r::push(r, name = remote,
refspec = paste0("refs/heads/", branch),
force = force, credentials = credentials),
error = function(e) {
if (protocol == "ssh" &&
stringr::str_detect(conditionMessage(e), "unsupported URL protocol")) {
reason <-
"workflowr was unable to use your SSH keys because your
computer does not have the required software installed. If
you want to be able to push directly from R, re-install the
package git2r and follow its advice for how to enable SSH
for your operating system."
reason <- c(reason, "\n\n", git_alternative)
} else if (protocol == "ssh" &&
stringr::str_detect(conditionMessage(e), "Failed to authenticate SSH session")) {
reason <-
"workflowr was unable to use your SSH keys because it has a
passphrase. You'll need to activate ssh-agent and add your
keys."
reason <- c(reason, "\n\n", git_alternative)
} else if (stringr::str_detect(conditionMessage(e), "remote contains commits that are not present locally")) {
reason <-
"workflowr was unable to push because the remote repository
contains changes that are not present in your local
repository. Run wflow_git_pull() first to pull down these
changes to your local computer."
} else {
reason <- c("Push failed for unknown reason.",
"\n\nThe error message from git2r::push() was:\n\n",
conditionMessage(e),
"\n\nThese sorts of errors are difficult to
troubleshoot. You can search for similar errors
on the git2r GitHub repository for advice on how
to fix it.")
reason <- c(reason, "\n\n", git_alternative)
}
stop(wrap(reason), call. = FALSE)
}
)
# Set upstream tracking branch if it doesn't exist and `set_upstream=TRUE`
local_branch_object <- git2r::repository_head(r)
if (is.null(git2r::branch_get_upstream(local_branch_object)) && set_upstream) {
git2r::branch_set_upstream(branch = local_branch_object,
name = paste(remote, branch, sep = "/"))
}
}
# Prepare output -------------------------------------------------------------
o <- list(remote = remote, branch = branch, username = username,
force = force, set_upstream = set_upstream, view = view,
dry_run = dry_run, protocol = protocol)
class(o) <- "wflow_git_push"
browser <- check_browser()
if (view && browser && !dry_run) {
remote_url <- remote_avail[remote]
# Remove trailing .git
remote_url <- stringr::str_replace(remote_url, "\\.git$", "")
# If SSH, replace with HTTPS URL
remote_url <- stringr::str_replace(remote_url, "^git@(.+):", "https://\\1/")
utils::browseURL(remote_url)
}
return(o)
}
#' @export
print.wflow_git_push <- function(x, ...) {
cat("Summary from wflow_git_push\n\n")
cat(wrap(sprintf(
"Pushing to the branch \"%s\" of the remote repository \"%s\"",
x$branch, x$remote)), "\n\n")
cat(glue::glue("Using the {toupper(x$protocol)} protocol\n\n"))
if (x$dry_run) {
cat("The following Git command would be run:\n\n")
} else {
cat("The following Git command was run:\n\n")
}
if (x$force) {
git_cmd <- " $ git push -f"
} else {
git_cmd <- " $ git push"
}
git_cmd <- paste(git_cmd, x$remote, x$branch)
cat(git_cmd)
cat("\n")
return(invisible(x))
}
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/wflow_git_push.R
|
#' Manage remote Git repositories
#'
#' \code{wflow_git_remote} is a convenience function for managing remote
#' repositories from R. By default it displays the current remote repositories
#' (analogous to \code{git remote -v}). It can add a remote, remove a remote, or
#' update the URL for an existing remote.
#'
#' \code{wflow_git_remote} constructs a URL to a remote repository based on the
#' input username, repository name, protocol (https or ssh), and domain (e.g.
#' "github.com" or "gitlab.com"). It can add a remote (\code{action = "add"}),
#' remove a remote (\code{action = "remove"}), or update the URL for an existing
#' remote (\code{action = "set_url"}).
#'
#' This function cannot change the name of an existing remote. To accomplish
#' this, you could run Git from the Terminal (\code{git remote rename <old>
#' <new>}) or use \code{git2r::remote_rename} from R.
#'
#' @param remote character (default: NULL). The name of the remote.
#' @param user character (default: NULL). The username for the remote
#' repository.
#' @param repo character (default: NULL). The name of the remote repository on
#' the Git hosting service (e.g. GitHub or GitLab).
#' @param protocol character (default: "https"). The protocol for communicating
#' with the Git hosting service (e.g. GitHub or GitLab). Must be either
#' "https" or "ssh".
#' @param action character (default: "add"). The action to perform on the
#' remotes. Must be one of "add", "remove", or "set_url". This argument is
#' ignored if \code{remote = NULL}.
#' @param domain character (default: "github.com"). The domain of the remote
#' host. For example, if you want to host your Git repository at GitLab, you
#' would specify "gitlab.com".
#' @param verbose logical (default: TRUE). Display the current remotes.
#' Analogous to \code{git remote -v}.
#' @param project character (default: ".") By default the function assumes the
#' current working directory is within the project. If this is not true,
#' you'll need to provide the path to the project directory.
#'
#' @return Invisibly returns a named character vector of the remote URLs.
#'
#' @examples
#' \dontrun{
#'
#' # Display the current remotes
#' wflow_git_remote()
#'
#' # Add a remote called origin that points to the
#' # GitHub repository example_repo owned by
#' # the GitHub user example_user
#' wflow_git_remote("origin", "example_user", "example_repo")
#'
#' # Remove the remote named upstream
#' wflow_git_remote("upstream", action = "remove")
#'
#' # Change the protocol of the remote origin from https to ssh
#' wflow_git_remote("origin", "example_user", "example_repo", protocol = "ssh",
#' action = "set_url")
#'
#' # Add a remote called origin that points to the
#' # GitLab repository example_repo owned by
#' # the GitLab user example_user
#' wflow_git_remote("origin", "example_user", "example_repo", domain = "gitlab.com")
#' }
#' @export
wflow_git_remote <- function(remote = NULL, user = NULL, repo = NULL,
protocol = "https", action = "add",
domain = "github.com",
verbose = TRUE, project = ".") {
if (!(is.null(remote) | (is.character(remote) & length(remote) == 1)))
stop("remote must be a one element character vector. You entered: ", remote)
if (any(stringr::str_detect(remote, c("[:blank:]", "[:punct:]"))))
stop("Limit the remote name to alphanumeric characters to avoid errors.\n",
"You entered: ", remote)
if (!(is.null(user) | (is.character(user) & length(user) == 1)))
stop("user must be a one element character vector. You entered: ", user)
if (!(is.null(repo) | (is.character(repo) & length(repo) == 1)))
stop("repo must be a one element character vector. You entered: ", repo)
if (!(protocol %in% c("https", "ssh")))
stop("protocol must be either https or ssh. You entered: ", protocol)
if (!(action %in% c("add", "remove", "set_url")))
stop("action must be add, remove, or set_url. You entered: ", action)
if (!is.character(domain) | length(domain) != 1)
stop("domain must be a one element character vector. You entered: ", domain)
assert_is_flag(verbose)
check_wd_exists()
assert_is_single_directory(project)
project <- absolute(project)
if (!git2r::in_repository(project))
stop("The specified path to the project is not in a Git repository: ",
project)
r <- git2r::repository(project, discover = TRUE)
remotes_current <- git2r::remotes(r)
# Add, remove, or change URL based on value of `action`
if (!is.null(remote)) {
switch(action,
add = git_remote_add(r, remotes_current, remote,
user, repo, protocol, domain),
remove = git_remote_remove(r, remotes_current, remote),
set_url = git_remote_set_url(r, remotes_current, remote,
user, repo, protocol, domain))
}
remotes <- git2r::remotes(r)
urls <- git2r::remote_url(r, remotes)
remote_df <- data.frame(name = remotes, url = urls)
# Output a table of the current remote repositories
if (verbose) {
if (nrow(remote_df) > 0) {
remote_df_string <- utils::capture.output(print(remote_df, quote = FALSE,
row.names = FALSE))
remote_df_string <- paste(remote_df_string, "\n", sep = "")
message("The repository has the following remotes set:\n\n",
remote_df_string)
} else {
message("The repository has no remotes set.")
}
}
# Return invisibly a named character vector of the remote URLs.
names(urls) <- remotes
return(invisible(urls))
}
# Add a remote repository
git_remote_add <- function(r, remotes_current, remote, user, repo, protocol,
domain) {
if (remote %in% remotes_current)
stop(remote, " is already defined as a remote.\n",
"Use `action = \"set_url\"` to update the URL.")
if (is.null(user) | is.null(repo))
stop("Must specify both `user` and `repo` to add remote.")
remote_url <- create_remote_url(user, repo, protocol, domain)
git2r::remote_add(r, remote, remote_url)
return(invisible(remote_url))
}
# Remove a remote repository
git_remote_remove <- function(r, remotes_current, remote) {
if (!(remote %in% remotes_current))
stop(remote, " is not defined as a remote. Unable to remove.")
git2r::remote_remove(r, remote)
}
# Set URL for a remote repository
git_remote_set_url <- function(r, remotes_current, remote, user, repo, protocol,
domain) {
if (!(remote %in% remotes_current))
stop(remote, " is not defined as a remote.\n",
"Use `action = \"add\"` to add it.")
if (is.null(user) | is.null(repo))
stop("Must specify both `user` and `repo` to change URL with set_url.")
remote_url <- create_remote_url(user, repo, protocol, domain)
git2r::remote_set_url(r, remote, remote_url)
return(invisible(remote_url))
}
# Create remote URLs.
#
# Examples:
# > workflowr:::create_remote_url("fakename", "fakerepo", "https")
# [1] "https://github.com/fakename/fakerepo.git"
#
# > workflowr:::create_remote_url("fakename", "fakerepo", "ssh")
# [1] "[email protected]:fakename/fakerepo.git"
#
# > workflowr:::create_remote_url("fakename", "fakerepo", "https",
# domain = "gitlab.com")
# [1] "https://gitlab.com/fakename/fakerepo.git"
#
# > workflowr:::create_remote_url("fakename", "fakerepo", "ssh"
# domain = "gitlab.com")
# [1] "[email protected]:fakename/fakerepo.git"
#
create_remote_url <- function(user, repo, protocol, domain = "github.com") {
switch(protocol,
https = sprintf("https://%s/%s/%s.git",
domain, user, repo),
ssh = sprintf("git@%s:%s/%s.git",
domain, user, repo))
}
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/wflow_git_remote.R
|
#' Convert to a workflowr HTML document
#'
#' Workflowr custom format for converting from R Markdown to an HTML document.
#' \code{wflow_html} has two distinct functionalities: 1) configure the
#' formatting of the HTML by extending \code{\link[rmarkdown]{html_document}}
#' (see the
#' \href{https://bookdown.org/yihui/rmarkdown/html-document.html}{RStudio
#' documentation} for the available options), and 2) configure the workflowr
#' reproducibility features (typically specified in a file named
#' \code{_workflowr.yml}). \code{wflow_html} is intended to be used to generate
#' webpages for a workflowr website, but it can also be used outside a workflowr
#' project to implement reproducibility features for single R Markdown documents.
#'
#' @section HTML formatting:
#'
#' \code{wflow_html} extends
#' \code{\link[rmarkdown]{html_document}}. To set default formatting options to
#' be shared across all of your HTML files, set them in the file
#' \code{analysis/_site.yml}. This special file can also be used to configure
#' other aspects of the website like the navigation bar (for more details see
#' the documentation on
#' \href{https://bookdown.org/yihui/rmarkdown/rmarkdown-site.html}{R Markdown
#' websites}). For example, to use the theme "cosmo" and add a table of contents
#' to every webpage, you would add the following to \code{analysis/_site.yml}:
#'
#' \preformatted{
#' output:
#' workflowr::wflow_html:
#' toc: true
#' theme: cosmo
#' }
#'
#' Formatting options can also be set for a specific file, which will override
#' the default options set in \code{analysis/_site.yml}. For example, to remove
#' the table of contents from one specific file, you would add the following to
#' the YAML header of that file:
#'
#' \preformatted{
#' output:
#' workflowr::wflow_html:
#' toc: false
#' }
#'
#' However, this will preserve any of the other shared options (e.g. the theme
#' in the above example). If you are not overriding any of the shared options,
#' it is not necessary to specify \code{wflow_html} in the YAML header of your
#' workflowr R Markdown files.
#'
#' @section Reproducibility features:
#'
#' \code{wflow_html} also implements the workflowr reproducibility features. For
#' example, it automatically sets a seed with \code{\link{set.seed}}; inserts
#' the current code version (i.e. Git commit ID); runs \code{\link{sessionInfo}}
#' at the end of the document; and inserts links to past versions of the file
#' and figures.
#'
#' These reproducibility options are not passed directly as arguments to
#' \code{wflow_html}. Instead these options are specified in
#' \code{_workflowr.yml} or in the YAML header of an R Markdown file (using the
#' field \code{workflowr:}). These options (along with their default values) are
#' as follows:
#'
#' \describe{
#' \item{knit_root_dir}{The directory where code inside an R Markdown file is
#' executed; this ultimately sets argument \code{knit_root_dir} in
#' \code{\link[rmarkdown]{render}}. By default, \code{\link{wflow_start}} sets
#' \code{knit_root_dir} in the file \code{_workflowr.yml} to be the path
#' \code{"."}. This path is a
#' \href{https://swcarpentry.github.io/shell-novice/reference.html#relative-path}{relative
#' path} from the location of \code{_workflowr.yml} to the directory for the
#' code to be executed. The path \code{"."} is shorthand for "current working
#' directory", and thus code is executed in the root of the workflowr project.
#' You can change this to be a relative path to any subdirectory of your
#' project. Also, if you were to delete this line from \code{_workflowr.yml},
#' then this would cause the code to be executed from the same directory in
#' which the R Markdown files are located (i.e. \code{analysis/} in the
#' default workflowr setup).
#'
#' It is also possible (though in general not recommended) to configure the
#' \code{knit_root_dir} to apply to only one of the R Markdown files by
#' specifying it in the YAML header of that particular file. In this case, the
#' supplied path is interpreted as relative to the R Markdown file itself.
#' Thus \code{knit_root_dir: "../data"} would execute the code in the
#' subdirectory \code{data/}.}
#'
#' \item{seed}{The \code{seed} argument in the call to \code{\link{set.seed}},
#' which is added to the beginning of an R Markdown file. In
#' \code{\link{wflow_start}}, this is set to the date using the format
#' \code{YYYYMMDD}. If no seed is specified, the default is \code{12345}.}
#'
#' \item{sessioninfo}{The function that is run to record the session
#' information. The default is \code{"sessionInfo()"}.}
#'
#' \item{github}{The URL of the remote repository for creating links to past
#' results. If unspecified, the URL is guessed from the "git remote" settings
#' (see \code{\link{wflow_git_remote}}). Specifying this setting inside
#' \code{_workflowr.yml} is especially helpful if multiple users are
#' collaborating on a project since it ensures that everyone generates the
#' same URLs.}
#'
#' \item{suppress_report}{By default a workflowr report is inserted at the top
#' of every HTML file containing useful summaries of the reproducibility
#' features and links to past versions of the analysis. To suppress this
#' report, set \code{suppress_report} to \code{TRUE}}.
#' }
#'
#' In the default workflowr setup, the file \code{_workflowr.yml} is located in
#' the root of the project. For most users it is best to leave it there, but if
#' you are interested in experimenting with the directory layout, the
#' \code{_workflowr.yml} file can be located in the same directory as the R
#' Markdown files or in any directory upstream of that directory.
#'
#' Here is an example of a customized \code{_workflowr.yml} file:
#'
#' \preformatted{
#' # Execute code in project directory
#' knit_root_dir: "."
#' # Set a custom seed
#' seed: 4815162342
#' # Use sessioninfo package to generate the session information.
#' sessioninfo: "sessioninfo::session_info()"
#' # Use this URL when inserting links to past results.
#' github: https://github.com/repoowner/mainrepo
#' }
#'
#' And here is an example of a YAML header inside an R Markdown file with the
#' same exact custom settings as above:
#'
#' \preformatted{
#' ---
#' title: "About"
#' output:
#' workflowr::wflow_html:
#' toc: false
#' workflowr:
#' knit_root_dir: ".."
#' seed: 4815162342
#' sessioninfo: "sessioninfo::session_info()"
#' github: https://github.com/repoowner/mainrepo
#' ---
#' }
#'
#' Note that the path passed to \code{knit_root_dir} changed to \code{".."}
#' because it is relative to the R Markdown file instead of
#' \code{_workflowr.yml}. Both have the effect of having the code executed in
#' the root of the workflowr project.
#'
#' @param ... Arguments passed to \code{\link[rmarkdown]{html_document}}.
#'
#' @return An \code{\link[rmarkdown]{output_format}} object to pass to
#' \code{\link[rmarkdown]{render}}.
#'
#' @seealso \code{\link{wflow_pre_knit}}, \code{\link{wflow_post_knit}},
#' \code{\link{wflow_pre_processor}}
#'
#' @import rmarkdown
#'
#' @export
#'
wflow_html <- function(...) {
cache_hook_final <- get_cache_hook()
knitr <- rmarkdown::knitr_options(opts_chunk = list(comment = NA,
fig.align = "center",
tidy = FALSE),
knit_hooks = list(plot = plot_hook,
chunk = cache_hook_final),
opts_hooks = list(fig.path = hook_fig_path))
# Have to explicitly pass on keep_md to output_format()
opts <- list(...)
if (!is.null(opts$keep_md)) keep_md <- opts$keep_md else keep_md <- FALSE
o <- rmarkdown::output_format(knitr = knitr,
pandoc = pandoc_options(to = "html"),
keep_md = keep_md,
pre_knit = wflow_pre_knit,
post_knit = wflow_post_knit,
pre_processor = wflow_pre_processor,
base_format = rmarkdown::html_document(...))
return(o)
}
# knitr options ----------------------------------------------------------------
# Save the figures in "figure/<basename-of-Rmd-file>/"
# https://yihui.name/knitr/hooks/#option-hooks
hook_fig_path <- function(options) {
# Record the original figure path. If it was set by the user, a warning will
# be inserted into the document by the knit hook `plot_hook` to notify that
# the setting was ignored.
options$fig.path.orig <- options$fig.path
input <- knitr::current_input()
options$fig.path <- create_figure_path(input)
# Requires trailing slash
options$fig.path <- paste0(options$fig.path, .Platform$file.sep)
return(options)
}
# This knit hook inserts a table of previous versions of the figure
plot_hook <- function(x, options) {
# Exit early if there is no Git repository
if (!git2r::in_repository(".")) {
return(wflow_hook_plot_md(x, options))
}
r <- git2r::repository(".", discover = TRUE)
input <- file.path(getwd(), x)
# Need to refactor obtaining workflowr options
github = get_host_from_remote(getwd())
output_dir <- get_output_dir(directory = getwd())
if (!is.null(output_dir)) {
input <- file.path(output_dir, x)
}
fig_versions <- get_versions_fig(fig = input, r = r, github = github)
# Exit early if no previous versions of the figure are available
if (fig_versions == "") {
return(wflow_hook_plot_md(x, options))
}
return(paste(c(wflow_hook_plot_md(x, options), fig_versions),
collapse = "\n"))
}
# Inserts Bootstrap warning(s) into the HTML file if any of the following
# issues are detected:
#
# * User set custom fig.path which gets ignored by workflowr
# * Python chunk creates a plot, but using old version of reticulate (< 1.15).
# Older versions of reticulate used an absolute path to the figure file, which
# caused the images to not be rendered in a non-standalone document.
wflow_hook_plot_md <- function(x, options) {
warnings_to_add <- character()
# Check fig.path
expected <- paste0(tools::file_path_sans_ext(knitr::current_input()),
"_files", .Platform$file.sep, "figure-html",
.Platform$file.sep)
if (options$fig.path.orig != expected) {
warnings_to_add <- c(warnings_to_add,
"<strong>Warning!</strong> The custom <code>fig.path</code> you set was ignored by workflowr.")
}
if (length(warnings_to_add) == 0) {
return(knitr::hook_plot_md(x, options))
}
# Add alert class
warnings_to_add <- paste("<div class=\"alert alert-warning\">",
warnings_to_add,
"</div>")
warnings_to_add <- paste(warnings_to_add, collapse = "\n")
return(glue::glue("{knitr::hook_plot_md(x, options)}
{warnings_to_add}"))
}
# This knit hook warns if a chunk has cache=TRUE, autodep=FALSE, dependson=NULL
cache_hook <- function(x, options) {
if (options$cache && is.null(options$dependson) && !options$autodep) {
x <- glue::glue("{x}
<div class=\"alert alert-warning\">
<p><strong>Warning:</strong>
The above code chunk cached its results, but it won't be
re-run if previous chunks it depends on are updated. If
you need to use caching, it is highly recommended to
also set <code>knitr::opts_chunk$set(autodep =
TRUE)</code> at the top of the file (in a chunk that is
not cached). Alternatively, you can customize the option
<code>dependson</code> for each individual chunk that is
cached. Using either <code>autodep</code> or
<code>dependson</code> will remove this warning. See the
<a href=\"https://yihui.name/knitr/options/#cache\"
>knitr cache options</a> for more details.
</p>
</div>")
}
return(x)
}
# Access the default chunk hook function from knitr because it isn't exported.
knitr_hook_chunk <- function() {
knitr::render_markdown()
f <- knitr::knit_hooks$get("chunk")
knitr::knit_hooks$restore()
return(f)
}
# First run the chunk through knitr's default markdown chunk function
get_cache_hook <- function() {
default_hook_chunk <- knitr_hook_chunk()
wflow_hook_chunk <- cache_hook
result <- function(x, options) {
x <- default_hook_chunk(x, options)
wflow_hook_chunk(x, options)
}
return(result)
}
# pre_knit function ------------------------------------------------------------
#' pre_knit function for workflowr
#'
#' This is the \code{pre_knit} function that \code{\link{wflow_html}} passes to
#' the function \code{\link[rmarkdown]{output_format}} from the package
#' \link{rmarkdown}. For advanced usage only.
#'
#' If you'd like to insert the workflowr reproducibility report into other R
#' Markdown output formats such as \code{blogdown::html_page}, you can use
#' \code{wflow_pre_knit}.
#'
#' @param input Name of R Markdown file
#' @param ... currently ignored
#'
#' @seealso \code{\link{wflow_html}}, \code{\link{wflow_post_knit}},
#' \code{\link{wflow_pre_processor}}
#'
#' @export
#' @keywords internal
wflow_pre_knit <- function(input, ...) {
# This function copies the R Markdown file to a temporary directory and then
# modifies it.
# Access parent environment. Have to go up 2 frames because of the function
# that combines pre_knit function from the current and base output_formats.
#
# Inspired by rmarkdowntown by Romain FranΓ§ois
# https://github.com/romainfrancois/rmarkdowntown/blob/deef97a5cd6f0592318ecc6e78c6edd7612eb449/R/html_document2.R#L12
frames <- sys.frames()
e <- frames[[length(frames) - 2]]
lines_in <- readLines(input)
tmpfile <- file.path(tempdir(), basename(input))
e$knit_input <- tmpfile
wflow_opts <- wflow_options(input)
# Set the knit_root_dir option for rmarkdown::render. However, the user can
# override the knit_root_dir option by passing it directly to render.
if (is.null(e$knit_root_dir)) {
e$knit_root_dir <- wflow_opts$knit_root_dir
} else {
wflow_opts$knit_root_dir <- e$knit_root_dir
}
# Find the end of the YAML header for inserting new lines
header_delims <- stringr::str_which(lines_in, "^-{3}|^\\.{3}")
if (length(header_delims) >= 2) {
header_end <- header_delims[2]
header_lines <- lines_in[seq(header_end)]
} else {
# A valid R Markdown file does not require a YAML header
header_end <- 0
header_lines <- NULL
}
# Get output directory if it exists
output_dir <- get_output_dir(directory = dirname(input))
has_code <- detect_code(input)
report <- create_report(input, output_dir, has_code, wflow_opts)
# Set seed at beginning
if (has_code && is.numeric(wflow_opts$seed) && length(wflow_opts$seed) == 1) {
seed_chunk <- c("",
"```{r seed-set-by-workflowr, echo = FALSE}",
sprintf("set.seed(%d)", wflow_opts$seed),
"```",
"")
} else {
seed_chunk <- ""
}
# Add session information at the end
if (has_code && wflow_opts$sessioninfo != "") {
sessioninfo <- glue::glue('
<br>
<p>
<button type="button" class="btn btn-default btn-workflowr btn-workflowr-sessioninfo"
data-toggle="collapse" data-target="#workflowr-sessioninfo"
style = "display: block;">
<span class="glyphicon glyphicon-wrench" aria-hidden="true"></span>
Session information
</button>
</p>
<div id="workflowr-sessioninfo" class="collapse">
```{{r session-info-chunk-inserted-by-workflowr}}
{wflow_opts$sessioninfo}
```
</div>
')
# If there is a bibliography, make sure it appears before the session
# information
header <- rmarkdown::yaml_front_matter(input)
if (!is.null(header$bibliography)) {
sessioninfo <- add_bibliography(sessioninfo, lines_in)
}
} else {
sessioninfo <- ""
}
lines_out <- c(header_lines,
"",
report,
"",
seed_chunk,
"",
lines_in[(header_end + 1):length(lines_in)],
"",
sessioninfo)
writeLines(lines_out, tmpfile)
}
# Add the bibliography prior to the session information, but only if they
# haven't manually inserted the bibliography already.
#
# sessioninfo - character vector with session information lines to insert at end
# of R Markdown file
#
# lines - character vector of the lines of current R Markdown file
#
# Prepends <div id="refs"></div> if this string is not already present in the
# documents.
add_bibliography <- function(sessioninfo, lines) {
stopifnot(is.character(sessioninfo), is.character(lines))
if (!any(stringr::str_detect(lines, "<div id=[\'\"]refs[\'\"]>"))) {
sessioninfo <- c("", "<div id=\"refs\"></div>", "", sessioninfo)
}
return(sessioninfo)
}
# post_knit function -----------------------------------------------------------
#' post_knit function for workflowr
#'
#' This is the \code{post_knit} function that \code{\link{wflow_html}} passes to
#' the function \code{\link[rmarkdown]{output_format}} from the package
#' \link{rmarkdown}. For advanced usage only.
#'
#' If you'd like to combine workflowr with another R Markdown output format, you
#' may need to use \code{wflow_post_knit}. This function fixes the path to the R
#' Markdown file (which is manipulated by \code{\link{wflow_pre_knit}}).
#'
#' @param metadata The metadata specified in the YAML header of the R Markdown
#' file
#' @param input_file Name of R Markdown file
#' @param ... arguments passed to the \code{post_knit} function of
#' \code{rmarkdown::\link[rmarkdown]{html_document}}
#'
#' @inheritParams rmarkdown::render
#'
#' @seealso \code{\link{wflow_html}}, \code{\link{wflow_pre_knit}},
#' \code{\link{wflow_pre_processor}}
#'
#' @export
#' @keywords internal
wflow_post_knit <- function(metadata, input_file, runtime, encoding, ...) {
# This function adds the navigation bar for websites defined in either
# _navbar.html or _site.yml. Below I just fix the path to the input file that
# I had changed for pre_knit and then execute the post_knit from
# rmarkdown::html_document.
# Change the input_file back to its original so that the post_knit defined
# in rmarkdown::html_document() can find the navbar defined in _site.yml.
input_file_original <- file.path(getwd(), basename(input_file))
# I tried to find a better solution than directly calling it myself (since
# it is run afterwards anyways since html_document() is the base format),
# but nothing I tried worked.
rmarkdown::html_document()$post_knit(metadata, input_file_original,
runtime, encoding, ...)
}
# pre_processor function -----------------------------------------------------
#' pre_processor function for workflowr
#'
#' This is the \code{pre_processor} function that \code{\link{wflow_html}}
#' passes to the function \code{\link[rmarkdown]{output_format}} from the
#' package \link{rmarkdown}. For advanced usage only.
#'
#' If you'd like to combine workflowr with another R Markdown output format, you
#' may need to use \code{wflow_pre_processor}. This function only has minor
#' effects on the style of the resulting HTML page, and is not essential for
#' using workflowr.
#'
#' @param input_file Name of Markdown file created by
#' \code{knitr::\link[knitr]{knit}} to be passed to
#' \href{https://pandoc.org/}{pandoc}
#' @param files_dir Directory for saving intermediate files
#'
#' @inheritParams rmarkdown::render
#' @inheritParams wflow_post_knit
#'
#' @seealso \code{\link{wflow_html}}, \code{\link{wflow_pre_knit}},
#' \code{\link{wflow_post_knit}}
#'
#' @export
#' @keywords internal
wflow_pre_processor <- function(metadata, input_file, runtime, knit_meta,
files_dir, output_dir) {
# Pass additional arguments to Pandoc. I use this to add a custom header
# (--include-in-header) and footer (--include-after-body). The template text
# for these are in the list `includes` defined in R/infrastructure.R.
# header
fname_header <- tempfile("header", fileext = ".html")
writeLines(includes$header, con = fname_header)
# footer
fname_footer <- tempfile("footer", fileext = ".html")
writeLines(includes$footer, con = fname_footer)
# Pandoc args
args <- c("--include-in-header", fname_header,
"--include-after-body", fname_footer)
# Add pagetitle if missing title to avoid pandoc2 error
if (rmarkdown::pandoc_available("2.0")) {
args <- c(args, add_pagetitle(metadata, input_file))
}
return(args)
}
# Add a pagetitle (if needed) to avoid pandoc2 warning about missing title
add_pagetitle <- function(metadata, input_file) {
# Only add pagetitle if all the following conditions are met
# No title
if (!is.null(metadata$title)) return(character(0))
# No pagetitle
if (!is.null(metadata$pagetitle)) return(character(0))
# No title/pagetitle defined with pandoc_args
if (is.list(metadata$output)) {
pandoc_args <- metadata$output$`workflowr::wflow_html`$pandoc_args
} else {
pandoc_args <- NULL
}
if (!is.null(pandoc_args) && any(stringr::str_detect(pandoc_args, "title")))
return(character(0))
pagetitle <- input_file
return(c("--metadata", paste0("pagetitle=", pagetitle)))
}
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/wflow_html.R
|
#' Open R Markdown analysis file(s)
#'
#' \code{wflow_open} opens R Markdown files in RStudio and sets the working
#' directory to the knit directory (see Details). If a file does not exist, a
#' minimal one is created.
#'
#' \code{wflow_open} is a convenience function to make it easier to begin
#' working, especially when starting a new analysis. First, it creates a new
#' file if necessary and tries to make educated guesses about metadata like the
#' title, author, and date. Second, it sets the working directory to the knit
#' directory. The knit directory is where the code in the R Markdown files is
#' executed, and may be defined via the field \code{knit_root_dir} in the file
#' \code{_workflowr.yml} (see \code{\link{wflow_html}} for all the details). If
#' this field is not defined, then the knit directory is the R Markdown
#' directory. Third, it opens the file(s) in RStudio if applicable. The latter
#' two side effects can be turned off if desired.
#'
#' If you would like to create an R Markdown file with \code{wflow_open} for an
#' analysis that is not part of a workflowr project, set \code{project = NULL}.
#' Otherwise \code{wflow_open} will throw an error. Note that the working
#' directory is \bold{not} changed when \code{project = NULL}.
#'
#' @param files character. R Markdown file(s) to open. Files must have the
#' extension Rmd or rmd. Supports file
#' \href{https://en.wikipedia.org/wiki/Glob_(programming)}{globbing}. Set
#' \code{project = NULL} to create an R Markdown file outside of the R
#' Markdown directory of a workflowr project.
#' @param change_wd logical (default: TRUE). Change the working directory to the
#' knit directory. If \code{project = NULL}, the working directory is
#' \bold{not} changed.
#' @param edit_in_rstudio logical (default: TRUE). Open the file(s) in the
#' RStudio editor.
#' @param project character (or NULL). By default the function assumes the
#' current working directory is within the project. If this is not true,
#' you'll need to provide the path to the project directory. Set \code{project
#' = NULL} if running this command to create a file for a non-workflowr
#' project.
#'
#' @return An object of class \code{wflow_open}, which is a list with the
#' following elements:
#'
#' \item{files}{The input argument \code{files} as absolute paths.}
#'
#' \item{change_wd}{The input argument \code{change_wd}.}
#'
#' \item{edit_in_rstudio}{The input argument \code{edit_in_rstudio}.}
#'
#' \item{knit_root_dir}{The knit directory (see \code{\link{wflow_html}} for
#' details). This is \code{NULL} if \code{project} was set to \code{NULL}.}
#'
#' \item{previous_wd}{The working directory in which \code{wflow_open} was
#' executed.}
#'
#' \item{new_wd}{The working directory that \code{wflow_open} changed to. The
#' value is \code{NULL} if the working directory was not changed.}
#'
#' \item{files_new}{The subset of the input argument \code{files} that were
#' newly created. Paths are absolute.}
#'
#' @examples
#' \dontrun{
#' wflow_open("analysis/model-data.Rmd")
#' # Multiple files
#' wflow_open(c("analysis/model-data.Rmd", "analysis/another-analysis.Rmd"))
#' # Open all R Markdown files
#' wflow_open("analysis/*Rmd")
#' # Create an R Markdown file in a non-worklowr project
#' wflow_open("model-data.Rmd", project = NULL)
#' }
#' @import rmarkdown
#' @export
wflow_open <- function(files,
change_wd = TRUE,
edit_in_rstudio = TRUE,
project = ".") {
# Check input arguments ------------------------------------------------------
files <- process_input_files(files, rmd_only = TRUE, must_exist = FALSE)
files <- absolute(files)
assert_is_flag(change_wd)
assert_is_flag(edit_in_rstudio)
check_wd_exists()
if (!is.null(project)) {
assert_is_single_directory(project)
project <- absolute(project)
}
files_new <- files[!fs::file_exists(files)]
# Determine knit directory ---------------------------------------------------
# If project is NULL, set `knit_directory` to NULL
if (is.null(project)) {
knit_directory <- NULL
} else {
# If project is set, find the knit directory
# Confirm project is a valid workflowr project
tryCatch(p <- wflow_paths(project = project),
error = function(e) {
stop(call. = FALSE, wrap(
"This isn't a workflowr project. Set project=NULL to create
an R Markdown file outside of a workflowr project. See
?wflow_open for details."
))
}
)
# Send a warning if user has a beta workflowr project and tries to create a
# new file
yml_index <- rmarkdown::yaml_front_matter(file.path(p$analysis, "index.Rmd"))
usingBeta <- is.null(yml_index[["site"]]) || yml_index[["site"]] != "workflowr::wflow_site"
if (usingBeta && length(files_new) > 0) {
warning(call. = FALSE, wrap(
"It appears that your site was created using a beta release of
workflowr, thus you likely don't want to use the R Markdown file
created by wflow_open. It doesn't have the necessary template chunks.
Here are you options:"
), "\n\n",
"To continue using your site as is, you can:\n\n",
"1. Create a new R Markdown file by copying an existing one\n\n",
"2. Install the beta release of workflowr and use its wflow_open:\n\n",
"remotes::install_github(\"jdblischak/workflowrBeta\")\n\n",
"To update to a workflowr 1.0+ site, you can:\n\n",
"1. Run wflow_update() to preview the files that would be affected\n\n",
"2. Follow the instructions in ?wflow_update to make the transition\n\n",
"(Note that wflow_update() is only available in workflowr <= 1.6.2)")
}
# Throw error if Rmd files not saved in R Markdown directory
if (.Platform$OS.type == "windows") {
non_analysis <- !absolute(dirname(files)) == absolute(p$analysis)
} else {
non_analysis <- !dirname(files) == absolute(p$analysis)
}
if (any(non_analysis)) {
stop(call. = FALSE, wrap(
"Argument \"files\" specifies at least one file outside the R Markdown
source directory:"), "\n\n",
paste(files[non_analysis], collapse = "\n"), "\n\n",
wrap(
"R Markdown files inside your workflowr project must be in the
following directory:"), "\n\n",
absolute(p$analysis),
"\n\n",
wrap(
"Set project=NULL to create an R Markdown file outside of the R
Markdown directory of your workflowr project. See ?wflow_open for
details."
)
)
}
# Find knit directory (knit_root_dir). This ignores any potential settings
# in the YAML header(s) of existing file(s).
wflow_opts <- wflow_options(files[1])
knit_directory <- wflow_opts$knit_root_dir
if (is.null(knit_directory)) {
knit_directory <- p$analysis
}
knit_directory <- absolute(knit_directory)
}
# Guess metadata -------------------------------------------------------------
yaml_title <- tools::file_path_sans_ext(basename(files))
yaml_author <- ""
git_config <- git2r::config()
if (!is.null(git_config$global$user.name)) {
yaml_author <- git_config$global$user.name
}
if (!is.null(git_config$local$user.name)) {
yaml_author <- git_config$local$user.name
}
yaml_date <- as.character(Sys.Date())
# Create files ---------------------------------------------------------------
for (i in seq_along(files_new)) {
header <- glue::glue("---
title: \"{yaml_title[i]}\"
author: \"{yaml_author}\"
date: \"{yaml_date}\"
output: workflowr::wflow_html
editor_options:
chunk_output_type: console
---")
boilerplate <- c("",
"## Introduction",
"",
"```{r}",
"",
"```",
"")
fs::dir_create(dirname(files_new[i]))
writeLines(c(header, boilerplate), files_new[i])
}
# Now that files all exist, ensure that symlinks are expanded
files <- absolute(files)
files_new <- absolute(files_new)
# Set working directory ------------------------------------------------------
current_wd <- absolute(getwd())
if (change_wd && !is.null(project) && current_wd != knit_directory) {
setwd(knit_directory)
new_wd <- knit_directory
} else {
new_wd <- NULL
}
# Open files -----------------------------------------------------------------
if (rstudioapi::isAvailable() && edit_in_rstudio) {
for (rmd in files) {
rstudioapi::navigateToFile(rmd)
}
}
# Prepare output -------------------------------------------------------------
o <- list(files = files,
change_wd = change_wd,
edit_in_rstudio = edit_in_rstudio,
knit_root_dir = knit_directory,
previous_wd = current_wd,
new_wd = new_wd,
files_new = files_new)
class(o) <- "wflow_open"
return(o)
}
#' @export
print.wflow_open <- function(x, ...) {
cat("wflow_open:\n")
if (length(x$files_new) > 0) {
cat("- New file(s):\n")
cat(paste0(" ", x$files_new), sep = "\n")
}
files_existing <- setdiff(x$files, x$files_new)
if (length(files_existing) > 0) {
cat("- Existing file(s):\n")
cat(paste0(" ", files_existing), sep = "\n")
}
if (x$change_wd && !is.null(x$new_wd)) {
cat(sprintf("- New working directory: %s\n", x$new_wd))
} else {
cat(sprintf("- Same working directory: %s\n", x$previous_wd))
}
return(invisible(x))
}
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/wflow_open.R
|
# Find the workflowr options set in _workflowr.yml (if it exists).
#
# Input: path to R Markdown file
#
# Output: list with following components
# knit_root_dir - directory to execute code
# seed - random seed to set at beginning of each analysis
# github - URL to associated remote repository (e.g. GitHub or GitLab)
# sessioninfo - Function to record session information
# fig_path_ext - figures directory with or without .Rmd
wflow_options <- function(file) {
# Default wflow options
wflow_opts <- list(knit_root_dir = NULL,
seed = 12345,
github = get_host_from_remote(dirname(file)),
sessioninfo = "sessionInfo()",
fig_path_ext = FALSE,
suppress_report = FALSE)
# Get options from a potential _workflowr.yml file
wflow_root <- try(rprojroot::find_root(rprojroot::has_file("_workflowr.yml"),
path = dirname(file)), silent = TRUE)
if (!inherits(wflow_root, "try-error")) {
wflow_yml <- file.path(wflow_root, "_workflowr.yml")
wflow_yml_opts <- yaml::yaml.load_file(wflow_yml)
for (opt in names(wflow_yml_opts)) {
wflow_opts[[opt]] <- wflow_yml_opts[[opt]]
}
# If knit_root_dir is a relative path, interpret it as relative to the
# location of _workflowr.yml
if (!is.null(wflow_opts$knit_root_dir)) {
if (fs::is_absolute_path(wflow_opts$knit_root_dir)) {
m <-
"The value of knit_root_dir in _workflowr.yml is an absolute path.
This means that the workflowr project will only execute on your
current computer. To facilitate reproducibility on other machines,
change it to a relative path."
warning(wrap(m), call. = FALSE)
} else {
wflow_opts$knit_root_dir <- absolute(file.path(wflow_root,
wflow_opts$knit_root_dir))
}
}
}
# If file exists, get potential options from YAML header. These override the
# options specified in _workflowr.yml.
if (fs::file_exists(file)) {
wflow_opts <- wflow_options_from_file(file, wflow_opts)
}
# If knit_root_dir hasn't been configured in _workflowr.yml or the YAML header,
# set it to the location of the original file
if (is.null(wflow_opts$knit_root_dir)) {
wflow_opts$knit_root_dir <- dirname(absolute(file))
}
return(wflow_opts)
}
# Check for potential workflowr options in YAML header
#
# file - path to existing R Markdown file
# wflow_opts - list of workflowr options
#
# Returns updated list of workflowr options
#
# See wflow_options() and wflow_html() for more details
wflow_options_from_file <- function(file, wflow_opts = list()) {
header <- rmarkdown::yaml_front_matter(file)
header_opts <- header$workflowr
for (opt in names(header_opts)) {
wflow_opts[[opt]] <- header_opts[[opt]]
}
# If knit_root_dir was specified as a relative path in the YAML header,
# interpret it as relative to the location of the file
if (!is.null(wflow_opts$knit_root_dir)) {
if (!fs::is_absolute_path(wflow_opts$knit_root_dir)) {
wflow_opts$knit_root_dir <- absolute(file.path(dirname(file),
wflow_opts$knit_root_dir))
}
}
return(wflow_opts)
}
#' check the fig_path_ext option
#'
#' @param input the path to an R Markdown file
#'
#' @keywords internal
is_fig_path_ext <- function(input) {
wflow_opts <- wflow_options(input)
wflow_opts$fig_path_ext
}
#' create the path to the figure folder
#'
#' @param input the path to an R Markdown file
#'
#' @keywords internal
create_figure_path <- function(input) {
if (is_fig_path_ext(input)){
res <- file.path("figure", basename(tools::file_path_sans_ext(input)))
} else {
res <- file.path("figure", basename(input))
}
res
}
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/wflow_options.R
|
# wflow_paths
#
# Internal function to obtain relevant paths for workflowr project. The paths
# are relative to the current working directory.
#
# error_git: Should the function fail if it can't find a Git repo.
#
# project: path to the workflowr project
#
# Return a list with following elements:
#
# $root: The root directory of the workflowr project
#
# $analysis: The directory that contains \code{_site.yml} and the R Markdown
# files.
#
# $docs: The directory that contains the HTML files and figures.
#
# $git: The .git directory
wflow_paths <- function(error_git = FALSE, project = ".") {
o <- list()
# workflowr root
project <- absolute(project)
o$root <- try(rprojroot::find_rstudio_root_file(path = project),
silent = TRUE)
if (inherits(o$root, "try-error"))
stop(wrap(
"Unable to detect a workflowr project. This could be due to one of the
following reasons:
1) The function was not executed inside a workflowr project. Run
`getwd()` to determine the current working directory. Is the working
directory a workflowr project or one of its subdirectories?
2) The RStudio .Rproj file was deleted. workflowr requires an RStudio
.Rproj file to be located at the root of the project. Was it deleted?"),
call. = FALSE)
# Analysis directory with _site.yml
top_level_files <- list.files(path = o$root, full.names = TRUE)
subdirs <- top_level_files[fs::dir_exists(top_level_files)]
site_file <- list.files(path = subdirs, pattern = "^_site.yml$",
full.names = TRUE)
if (length(site_file) == 0) {
stop(wrap("Unable to find the file _site.yml in the analysis directory.
Is this a workflowr project?"), call. = FALSE)
} else if (length(site_file) > 1) {
stop(wrap("Found more than one _site.yml file. Only one subdirectory at the
top level of the workflowr project can contain _site.yml."),
call. = FALSE)
} else {
o$analysis <- dirname(site_file)
}
# rmarkdown website requires index.Rmd file
index <- file.path(o$analysis, "index.Rmd")
if (!fs::file_exists(index)) {
stop(wrap(glue::glue(
"Invalid workflowr project. R Markdown websites require an index.Rmd
file. Unable to locate expected file: {index}")),
call. = FALSE)
}
# docs/ directory
output_dir <- yaml::yaml.load_file(site_file)$output_dir
if (is.null(output_dir))
stop(wrap("Unable to locate the website directory. Make sure to set the
variable output_dir in the file _site.yml"),
call. = FALSE)
o$docs <- absolute(file.path(o$analysis, output_dir))
# Git repository
r <- try(git2r::repository(o$root, discover = TRUE), silent = TRUE)
if (inherits(r, "try-error")) {
if (error_git) {
stop(wrap("A Git repository is required for this functionality."),
call. = FALSE)
} else {
o$git <- NA_character_
}
} else {
o$git <- absolute(git2r::workdir(r))
}
# Make paths relative to working directory
o <- lapply(o, relative)
return(o)
}
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/wflow_paths.R
|
#' Publish the site
#'
#' \code{wflow_publish} is the main workflowr function. Use it when you are
#' ready to publish an analysis to your site. \code{wflow_publish} performs
#' three steps: 1) commit the file(s) (can include both Rmd and non-Rmd files,
#' e.g. \code{_site.yml}), 2) rebuild the R Markdown file(s), 3) commit the
#' generated website file(s). These steps ensure that the version of the HTML
#' file is created by the latest version of the R Markdown file, which is
#' critical for reproducibility.
#'
#' @param files character (default: NULL). R Markdown files and other files to
#' be added and committed with Git (step 1). Any R Markdown files will also be
#' built (step 2) and their output HTML and figures will be subsequently
#' committed (step 3). Supports file
#' \href{https://en.wikipedia.org/wiki/Glob_(programming)}{globbing}.
#' The files are always built in the order they are listed.
#' @inheritParams wflow_git_commit
#' @inheritParams wflow_build
#'
#' @return Returns an object of class \code{wflow_publish}, which is a list with
#' the following elements:
#'
#' \itemize{
#'
#' \item \bold{step1}: An object of class \code{wflow_git_commit} from the first
#' step of committing the files.
#'
#' \item \bold{step2}: An object of class \code{wflow_build} from the second
#' step of building the HTML files.
#'
#' \item \bold{step3}: An object of class \code{wflow_git_commit} from the third
#' step of committing the HTML files.
#'
#' }
#'
#' @seealso \code{\link{wflow_git_commit}}, \code{\link{wflow_build}}
#'
#' @examples
#' \dontrun{
#' # single file
#' wflow_publish("analysis/file.Rmd", "Informative commit message")
#' # All tracked files that have been edited
#' wflow_publish(all = TRUE, message = "Informative commit message")
#' # A new file plus all tracked files that have been edited
#' wflow_publish("analysis/file.Rmd", "Informative commit message", all = TRUE)
#' # Multiple files
#' wflow_publish(c("analysis/file.Rmd", "analysis/another.Rmd"),
#' "Informative commit message")
#' # All R Markdown files that start with the pattern "new_"
#' wflow_publish("analysis/new_*Rmd", "Informative commit message")
#' # Republish all published files even though they haven't been modified.
#' # Useful for changing some universal aspect of the site, e.g. the theme
#' # specified in _site.yml.
#' wflow_publish("analysis/_site.yml", "Informative commit message",
#' republish = TRUE)
#' # Publish all previously published files that have been committed more
#' # recently than their corresponding HTML files. This is useful if you like to
#' # manually commit your R Markdown files.
#' wflow_publish(update = TRUE)
#' }
#'
#' @import rmarkdown
#' @export
wflow_publish <- function(
# args to wflow_git_commit
files = NULL,
message = NULL,
all = FALSE,
force = FALSE,
# args to wflow_build
update = FALSE,
republish = FALSE,
combine = "or",
view = getOption("workflowr.view"),
delete_cache = FALSE,
seed = 12345,
verbose = FALSE,
# general
dry_run = FALSE,
project = "."
) {
# To do:
# * Warning for cache directories
# * Warning if files in docs/ included
# Check for modifications to _site.yml. Refuse to build if it is modified
# Check input arguments ------------------------------------------------------
files <- process_input_files(files, allow_null = TRUE, files_only = FALSE,
convert_to_relative_paths = TRUE)
if (is.null(message)) {
message <- deparse(sys.call())
message <- paste(message, collapse = "\n")
} else if (is.character(message)) {
message <-create_newlines(message)
} else {
stop("message must be NULL or a character vector")
}
assert_is_flag(all)
assert_is_flag(force)
assert_is_flag(update)
assert_is_flag(republish)
combine <- match.arg(combine, choices = c("or", "and"))
assert_is_flag(view)
assert_is_flag(delete_cache)
if (!(is.numeric(seed) && length(seed) == 1))
stop("seed must be a one element numeric vector")
assert_is_flag(verbose)
assert_is_flag(dry_run)
check_wd_exists()
assert_is_single_directory(project)
project <- absolute(project)
if (isTRUE(getOption("workflowr.autosave"))) autosave()
# Assess project status ------------------------------------------------------
s0 <- wflow_status(project = project)
r <- git2r::repository(path = s0$git)
commit_current <- git2r::commits(r, n = 1)[[1]]
if (!dry_run) check_git_config(project, "`wflow_publish`")
# Step 0: Confirm there is something to do -----------------------------------
if (is.null(files) && !all && !update && !republish && !dry_run)
stop("You did not tell wflow_publish() what to publish.\n",
"Unlike wflow_build(), it requires that you name the Rmd files you want to publish.\n")
# Step 1: Commit analysis files ----------------------------------------------
# Decide if wflow_git_commit should be run. At least one of the following
# scenarios must be true:
#
# 1) Rmd files were specified and at least one is scratch (untracked) or has
# unstaged/staged changes
#
# 2) `all == TRUE` and at least one tracked file has unstaged/staged changes
#
# 3) At least one non-Rmd file was specified
scenario1 <- !is.null(files) &&
any(unlist(s0$status[files, c("mod_unstaged", "mod_staged", "scratch")]),
na.rm = TRUE)
scenario2 <- all &&
any(unlist(s0$status[s0$status$tracked, c("mod_unstaged", "mod_staged")]),
na.rm = TRUE)
scenario3 <- !is.null(files) &&
any(!(files %in% rownames(s0$status)))
if (scenario1 || scenario2 || scenario3) {
step1 <- wflow_git_commit_(files = files, message = message,
all = all, force = force,
dry_run = dry_run, project = project)
# If subsequent steps fail, undo this action by resetting the Git repo to
# its initial state.
on.exit(git2r::reset(commit_current, reset_type = "mixed"), add = TRUE)
s1 <- wflow_status(project = project)
} else {
step1 <- NULL
s1 <- s0
}
# Step 2: Build HTML files----------------------------------------------------
# Determine if there are any files to be built.
files_to_build <- character()
# Specified files
files_to_build <- union(files_to_build,
files[files %in% rownames(s1$status)])
# Files committed in Step 1
files_to_build <- union(files_to_build,
step1$commit_files[
step1$commit_files %in% rownames(s1$status)])
# Check if the user wants an intersect build or union build of files
if (combine == "and" && length(files_to_build) == 0) {
stop("combine = \"and\" can only be used when explicitly specifying Rmd files to build with the argument `files`")
}
if (combine == "and") {
combine_files_function <- intersect
} else if (combine == "or") {
combine_files_function <- union
}
# If `republish == TRUE`, all published files
if (republish) {
files_to_build <- combine_files_function(files_to_build,
rownames(s1$status)[s1$status$published])
}
# If `update == TRUE`, all published files with committed modifications
if (update) {
files_to_build <- combine_files_function(files_to_build,
rownames(s1$status)[s1$status$mod_committed])
}
# None of these files can have unstaged/staged changes
files_to_build <- files_to_build[!s1$status[files_to_build, "mod_unstaged"]]
files_to_build <- files_to_build[!s1$status[files_to_build, "mod_staged"]]
if (length(files_to_build) > 0) {
# Create a backup copy of the docs/ directory. If either step 2 (build the
# HTML) or step 3 (commit the HTML) fails, delete docs/ and restore backup
if (fs::dir_exists(s1$docs) && !dry_run) {
docs_backup <- tempfile(pattern = sprintf("docs-backup-%s-",
format(Sys.time(),
"%Y-%m-%d-%Hh-%Mm-%Ss")))
fs::dir_create(docs_backup)
docs_backup <- absolute(docs_backup)
file.copy(from = file.path(s1$docs, "."), to = docs_backup,
recursive = TRUE, copy.date = TRUE)
on.exit(unlink(s1$docs, recursive = TRUE), add = TRUE)
on.exit(fs::dir_create(s1$docs), add = TRUE)
on.exit(file.copy(from = file.path(docs_backup, "."), to = s1$docs,
recursive = TRUE, copy.date = TRUE), add = TRUE)
}
step2 <- wflow_build_(files = files_to_build, make = FALSE,
update = update, republish = republish,
combine = combine,
view = view, clean_fig_files = TRUE,
delete_cache = delete_cache, seed = seed,
local = FALSE, verbose = verbose,
log_dir = use_default_log_dir(),
dry_run = dry_run, project = project)
} else {
step2 <- NULL
}
# Step 3 : Commit HTML files -------------------------------------------------
# Step 3 only needs to be performed if files were built in step 2.
if (length(step2$built) > 0) {
# Have to loop on step2$built as an underlying git2r function requires a
# length 1 character vector
figs_path <- vapply(step2$built, create_figure_path, character(1))
dir_figure <- file.path(s0$docs, figs_path)
site_libs <- file.path(s0$docs, "site_libs")
docs_nojekyll <- file.path(s0$docs, ".nojekyll")
docs_css <- list.files(path = s0$docs, pattern = "css$", full.names = TRUE)
docs_js <- list.files(path = s0$docs, pattern = "js$", full.names = TRUE)
files_to_commit <- c(step2$html, dir_figure, site_libs, docs_nojekyll,
docs_css, docs_js)
# Call directly to internal function `wflow_git_commit_` to bypass input checks.
# In a dry run, some files may not actually exist yet. Also, not every Rmd
# file creates figures, but it's easier to just attempt to add figures for
# every file.
step3 <- wflow_git_commit_(files = files_to_commit, message = "Build site.",
all = FALSE, force = force,
dry_run = dry_run, project = project)
} else {
step3 <- NULL
}
# Prepare output -------------------------------------------------------------
o <- list(step1 = step1, step2 = step2, step3 = step3)
class(o) <- "wflow_publish"
# If everything worked, erase the on.exit code that would have reset
# everything.
on.exit()
return(o)
}
#' @export
print.wflow_publish <- function(x, ...) {
cat("Summary from wflow_publish\n\n")
cat("**Step 1: Commit analysis files**\n\n")
if (is.null(x$step1)) {
cat("No files to commit\n\n")
} else {
print(x$step1)
}
cat("\n**Step 2: Build HTML files**\n\n")
if (is.null(x$step2)) {
cat("No files to build\n\n")
} else {
print(x$step2)
}
cat("\n**Step 3: Commit HTML files**\n\n")
if (is.null(x$step3)) {
cat("No HTML files to commit\n\n")
} else {
print(x$step3)
}
return(invisible(x))
}
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/wflow_publish.R
|
# RStudio Addin (Shiny Gadget) for wflow_publish().
wflow_publish_addin <- function() {
if (!requireNamespace("miniUI", quietly = TRUE))
stop("The miniUI package is required to use this RStudio addin.\n",
"Please install with install.packages(\"miniUI\")",
call. = FALSE)
s <- wflow_status()
site_yml_path <- relative(file.path(s$analysis, "_site.yml"))
wflow_yml_path <- relative(file.path(s$root, "_workflowr.yml"))
flower_url <- "https://raw.githubusercontent.com/workflowr/workflowr-assets/main/img/flower-purple.png"
logo_url <- "https://raw.githubusercontent.com/workflowr/workflowr-assets/main/img/logo-workflowr-inverse.png"
ui <- miniUI::miniPage(
miniUI::gadgetTitleBar("Publish workflowr website",
right = miniUI::miniTitleBarButton("done", "Publish",
primary = TRUE)),
miniUI::miniContentPanel(
shiny::sidebarLayout(position = "right",
shiny::sidebarPanel(
shiny::h3(shiny::img(src = flower_url, height = "50px"), "Options"),
shiny::checkboxInput("all", "all", value = FALSE),
shiny::checkboxInput("force", "force", value = FALSE),
shiny::checkboxInput("update", "update", value = FALSE),
shiny::checkboxInput("republish", "republish", value = FALSE),
shiny::checkboxInput("delete_cache", "delete_cache", value = FALSE),
shiny::checkboxInput("dry_run", "dry_run", value = FALSE)
),
shiny::mainPanel(
shiny::selectInput(inputId = "files",
label = "Select files to publish:",
choices = rownames(s$status),
selected = NULL,
multiple = TRUE),
shiny::checkboxInput("site_yml",
glue::glue("Include {site_yml_path}"),
value = s$site_yml),
shiny::checkboxInput("wflow_yml",
glue::glue("Include {wflow_yml_path}"),
value = !is.null(s$wflow_yml) && s$wflow_yml),
shiny::textAreaInput(inputId = "message",
label = "Describe the changes you made",
placeholder = "Enter your commit message")
)
),
shiny::p("Click the Publish button to execute the following command:"),
shiny::verbatimTextOutput("cmd_to_run")
)
)
server <- function(input, output, session) {
cmd <- shiny::reactive({
files <- c(input$files)
if (input$site_yml) files <- c(site_yml_path, files)
if (input$wflow_yml) files <- c(wflow_yml_path, files)
cmd_parts <- c(" workflowr::wflow_publish(")
if (!is.null(files)) {
files_string <- paste(utils::capture.output(dput(files)), collapse = "")
cmd_parts <- c(cmd_parts, glue::glue("\n files = {files_string},", .trim = FALSE))
}
if (input$message != "") cmd_parts <- c(cmd_parts, glue::glue("\n message = \"{input$message}\",", .trim = FALSE))
if (input$all) cmd_parts <- c(cmd_parts, "\n all = TRUE,")
if (input$force) cmd_parts <- c(cmd_parts, "\n force = TRUE,")
if (input$update) cmd_parts <- c(cmd_parts, "\n update = TRUE,")
if (input$republish) cmd_parts <- c(cmd_parts, "\n republish = TRUE,")
if (input$delete_cache) cmd_parts <- c(cmd_parts, "\n delete_cache = TRUE,")
if (input$dry_run) cmd_parts <- c(cmd_parts, "\n dry_run = TRUE,")
cmd_parts[length(cmd_parts)] <- stringr::str_replace(cmd_parts[length(cmd_parts)],
",$", "")
cmd_parts <- c(cmd_parts, ")")
return(cmd_parts)
})
output$cmd_to_run <- shiny::renderText({ cmd() })
shiny::observeEvent(input$done, {
shiny::stopApp()
rstudioapi::sendToConsole(cmd())
})
}
viewer <- shiny::dialogViewer("wflow_publish()", width = 1000, height = 800)
shiny::runGadget(ui, server, viewer = viewer)
}
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/wflow_publish_addin.R
|
#' Quickly start a workflowr project
#'
#' \code{wflow_quickstart} provides a simple interface to effortlessly create a
#' workflowr project from an existing data analysis.
#'
#' \code{wflow_quickstart} performs the following steps:
#'
#' \itemize{
#'
#' \item Starts a new project with \code{\link{wflow_start}}
#'
#' \item Copies the Rmd file(s) to the subdirectory \code{analysis/}
#'
#' \item Copies the supporting file(s) and/or directory(s) to the root of the
#' project (Note: by default Rmd files are executed in the root of the project,
#' so relative file paths should still work)
#'
#' \item Adds link(s) to the results to the main index page
#'
#' \item Publishes the Rmd files with \code{\link{wflow_publish}}
#'
#' \item Configures the remote repository with \code{\link{wflow_use_github}} or
#' \code{\link{wflow_use_gitlab}}
#'
#' }
#'
#' Once it has completed, you can push to the remote service with
#' \code{\link{wflow_git_push}}. Alternatively you can run \code{git push} in the
#' terminal.
#'
#' If you are using GitHub and you chose to not allow workflowr to create the
#' repository for you, then you will have to login to your account and create
#' the new repository yourself. If you're using GitLab, you don't have to worry
#' about this because the new repository will be automatically created when you
#' push.
#'
#' @param files character. The R Markdown file(s) to be copied into the
#' subdirectory \code{analysis/} of the newly created workflowr project. If
#' the argument \code{directory} is left as \code{NULL}, the workflowr project
#' will be named after the first Rmd file. This new directory will be located
#' in the current working directory. Supports file
#' \href{https://en.wikipedia.org/wiki/Glob_(programming)}{globbing}.
#' @param username character (default: NULL). The GitHub or GitLab personal
#' account you want to use to create the remote Git repository. It can also be
#' the name of a GitLab Group that you belong to. However, if it is a GitHub
#' organization, instead use the argument \code{organization}.
#' @param organization The GitHub organization account you want to use to
#' create the remote Git repository.
#' @param supporting_files character (default: NULL) Supporting files or
#' directories that are used by the Rmd files. These will be copied to the
#' root of the project. Since by default Rmd files are executed in the root of
#' the project, any relative file paths should still work. Long term it is
#' recommended to move these supporting files to subdirectories of the
#' workflowr project, e.g. \code{data/}.
#' @param directory character (default: NULL). The path to the directory to
#' create the workflowr project. This directory will also be used to name the
#' remote Git repository. If left as \code{NULL}, the name is derived from the
#' first Rmd file that is passed to the argument \code{files}.
#' @param change_wd logical (default: TRUE). Change the working directory to
#' the newly created workflowr project. Passed to \code{\link{wflow_start}}.
#' @param delete_on_error logical (default: TRUE). Delete the newly created
#' project if any error occurs.
#' @param view logical (default: \code{getOption("workflowr.view")}). View the
#' local website after it is built (will open the home page in the RStudio
#' Viewer pane or your web browser).
#' @param git.user.name character (default: \code{NULL}). The user name
#' used by Git to sign commits, e.g., "Ada Lovelace". This setting
#' only applies to the workflowr project being created. To specify the
#' global setting for the Git user name, use
#' \code{\link{wflow_git_config}} instead. When \code{user.name =
#' NULL}, no user name is recorded for the project, and the global
#' setting will be used. This setting can be modified later
#' by running \code{git config --local} in the Terminal.
#' @param git.user.email character (default: \code{NULL}). The email
#' address used by Git to sign commits, e.g.,
#' "[email protected]". This setting only applies to the workflowr
#' project being created. To specify the global setting for the Git
#' email address, use \code{\link{wflow_git_config}} instead. When
#' \code{user.name = NULL}, no email address is recorded for the
#' project, and the global setting will be used. This setting can be
#' modified later by running \code{git config --local} in the Terminal.
#' @param host character. Choose the service for hosting the Git repository.
#' Must be either "github" for GitHub.com or "gitlab" for GitLab.com.
#' @inheritParams wflow_use_github
#'
#' @return Invisibly returns the absolute path to the newly created workflowr
#' project.
#'
#' @seealso \link{workflowr}, \code{\link{wflow_start}}, \code{\link{wflow_publish}},
#' \code{\link{wflow_use_github}}, \code{\link{wflow_use_gitlab}},
#' \code{\link{wflow_git_push}}
#'
#' @examples
#' \dontrun{
#'
#' wflow_quickstart(files = "existing-analysis.Rmd", username = "your-github-username")
#' }
#'
#' @export
wflow_quickstart <- function(files,
username = NULL,
organization = NULL,
supporting_files = NULL,
directory = NULL,
change_wd = TRUE,
delete_on_error = TRUE,
view = getOption("workflowr.view"),
git.user.name = NULL,
git.user.email = NULL,
host = c("github", "gitlab"),
create_on_github = NULL
) {
message("wflow_quickstart:")
# Check input arguments ------------------------------------------------------
files <- process_input_files(files, rmd_only = TRUE)
files <- absolute(files)
if (!is.null(username))
if (!(is.character(username) && length(username) == 1))
stop("username must be NULL or a one element character vector: ", username)
if (!is.null(organization))
if (!(is.character(organization) && length(organization) == 1))
stop("organization must be NULL or a one element character vector: ", organization)
if (is.character(username) && is.character(organization))
stop("Cannot set both username and organization.",
" Only one GitHub account can own the repository.")
if (is.character(organization) && host == "gitlab")
stop("Do not use the argument \"organization\" for creating a repository on ",
"GitLab. Instead use the argument \"username\" for either a personal or ",
"Group account.")
supporting_files <- process_input_files(supporting_files, allow_null = TRUE,
files_only = FALSE)
supporting_files <- absolute(supporting_files)
assert_is_flag(delete_on_error)
assert_is_flag(view)
if (!is.null(directory))
if (!(is.character(directory) && length(directory) == 1))
stop("directory must be NULL or a one element character vector: ", directory)
check_wd_exists()
# Determine directory --------------------------------------------------------
if (is.null(directory)) {
directory <- fs::path_ext_remove(fs::path_file(files[1]))
}
directory <- absolute(directory)
if (fs::dir_exists(directory))
stop("wflow_quickstart() does not support existing directories")
# Delete on error ------------------------------------------------------------
cwd <- getwd()
delete_on_error_fun <- function(path, wd) {
if (getwd() != wd) {
setwd(wd)
message(glue::glue("* Returned working directory to {wd}"))
}
if (fs::dir_exists(path)) {
wflow_delete(path)
message(glue::glue("* Deleted workflowr project at {path}"))
message("* To keep the project in its unfinished state, set delete_on_error=FALSE")
}
}
if (delete_on_error) {
on.exit(delete_on_error_fun(directory, wd = cwd))
} else {
on.exit(message("* An unexpected error occurred"))
}
# Start the project ----------------------------------------------------------
start <- wflow_start(directory = directory, change_wd = change_wd,
user.name = git.user.name, user.email = git.user.email)
message(glue::glue("* Started the project with wflow_start() in {directory}/"))
if (change_wd) {
message(glue::glue("* Changed working directory to {directory}/"))
}
# Copy the Rmd file(s) -------------------------------------------------------
if (change_wd) {
files <- relative(files, start = directory)
}
new_path <- file.path(directory, "analysis")
for (f in files) {
fs::file_copy(f, new_path)
message(glue::glue("* Copied {fs::path_file(f)} to {new_path}/"))
}
# Add links to index.Rmd -----------------------------------------------------
index <- file.path(directory, "analysis", "index.Rmd")
rmd_names <- fs::path_ext_remove(fs::path_file(files))
html <- paste0(rmd_names, ".html")
links <- glue::glue("* [{rmd_names}]({html})")
cat(c("\n", links), file = index, sep = "\n", append = TRUE)
# Copy the supporting files --------------------------------------------------
if (change_wd) {
supporting_files <- relative(supporting_files, start = directory)
}
if (!is.null(supporting_files)) {
for (f in supporting_files) {
if (fs::is_dir(f)) {
fs::dir_copy(f, directory)
message(glue::glue("* Copied {fs::path_file(f)}/ to {directory}/"))
} else {
fs::file_copy(f, directory)
message(glue::glue("* Copied {fs::path_file(f)} to {directory}/"))
}
}
}
# Commit the supporting files ------------------------------------------------
if (!is.null(supporting_files)) {
commit_supporting <- wflow_git_commit(
files = file.path(directory, fs::path_file(supporting_files)),
message = "Commit supporting files from wflow_quickstart()",
project = directory
)
message("* Committed supporting files")
}
# Configure remote repository ------------------------------------------------
host <- match.arg(host, choices = c("github", "gitlab"))
if (host == "github") {
# For now, only perform local operations. Attempt to create GitHub repo
# below after publishing.
gh_result <- suppressMessages(wflow_use_github(username = username,
organization = organization,
create_on_github = FALSE,
project = directory))
message("* Configured local Git repo to host project on GitHub.com")
} else if (host == "gitlab") {
suppressMessages(wflow_use_gitlab(username = username, project = directory))
message("* Configured local Git repo to host on project GitLab.com")
}
# Publish the Rmd file(s) ----------------------------------------------------
message("* Building files")
publish <- suppressMessages(wflow_publish(files = file.path(directory, "analysis", "*Rmd"),
message = "Quickstart commit from wflow_quickstart()",
view = FALSE,
project = directory))
message("* Published the analysis files with wflow_publish()")
# Attempt to create remote repository on GitHub.com --------------------------
if (host == "github") {
gh_result <- suppressMessages(wflow_use_github(username = username,
organization = organization,
create_on_github = create_on_github,
project = directory))
if (!gh_result$repo_created) {
message(glue::glue("* To do: Create {gh_result$repository} on GitHub.com"))
}
}
# Return ---------------------------------------------------------------------
if (!change_wd) {
message(glue::glue("* Current working directory is still {getwd()}/"))
}
message(glue::glue("* To do: Run wflow_git_push() to push your project to {host}"))
if (view) {
viewed <- wflow_view(index, project = directory)
}
# Cancel exit function delete_on_error_fun() since there was no error
on.exit()
return(invisible(directory))
}
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/wflow_quickstart.R
|
#' Remove files
#'
#' \code{wflow_remove} removes files. If the file to be removed is an R Markdown
#' file, the corresponding HTML and other related files are also removed. If the
#' workflowr project uses Git, \code{wflow_remove} commits the changes.
#'
#' @param files character. Files to be removed. Supports file
#' \href{https://en.wikipedia.org/wiki/Glob_(programming)}{globbing}.
#' @param git logical (default: TRUE). Commit the changes (only applicable if
#' Git repository is present).
#' @param dry_run logical (default: FALSE). Preview the files to be removed but
#' do not actually remove them.
#' @inheritParams wflow_git_commit
#'
#' @return An object of class \code{wflow_remove}, which is a list with the
#' following elements:
#'
#' \itemize{
#'
#' \item \bold{files}: The relative path(s) to the removed file(s).
#'
#' \item \bold{message}: The message describing the commit (if applicable).
#'
#' \item \bold{dry_run}: The input argument \code{dry_run}.
#'
#' \item \bold{commit}:The object returned by
#' \link{git2r}::\code{\link[git2r]{commit}} (only included if \code{dry_run
#' == FALSE}).
#'
#' \item \bold{files_git}: The relative path(s) to the file(s) removed from
#' the Git repository.
#'
#' }
#'
#' @seealso \code{\link{wflow_git_commit}}
#'
#' @examples
#' \dontrun{
#'
#' # Remove a single file
#' wflow_remove("analysis/file.Rmd", "Remove old analysis.")
#' # Remove multiple files
#' wflow_remove(c("analysis/file.Rmd", "output/small-data.txt"),
#' "Remove old analysis and its associated data.")
#' }
#'
#' @export
wflow_remove <- function(files,
message = NULL,
git = TRUE,
dry_run = FALSE,
project = ".") {
# Check input arguments ------------------------------------------------------
files <- process_input_files(files, files_only = FALSE,
convert_to_relative_paths = TRUE)
if (is.null(message)) {
message <- deparse(sys.call())
message <- paste(message, collapse = "\n")
} else if (is.character(message)) {
message <- create_newlines(message)
} else {
stop("message must be NULL or a character vector")
}
assert_is_flag(git)
assert_is_flag(dry_run)
check_wd_exists()
assert_is_single_directory(project)
project <- absolute(project)
# Assess project status ------------------------------------------------------
p <- wflow_paths(project = project)
# Should changes be committed?
if (!is.na(p$git) && git) {
use_git <- TRUE
r <- git2r::repository(path = p$git)
} else {
use_git <- FALSE
}
if (use_git && !dry_run) check_git_config(project,
"`wflow_remove` with `git = TRUE`")
# Gather files to remove -----------------------------------------------------
# Are any of the specified files R Markdown files in the analysis directory?
files_ext <- tools::file_ext(files)
files_rmd <- files[files_ext %in% c("Rmd", "rmd")]
files_rmd <- files_rmd[absolute(files_rmd) ==
absolute(file.path(p$analysis, basename(files_rmd)))]
# If the user inputs a directory, obtain all the files in those directories so
# that they can be removed from the Git repo if applicable.
is_dir <- fs::dir_exists(files)
files_to_remove <- files[!is_dir]
dirs_to_remove <- files[is_dir]
for (d in dirs_to_remove) {
d_files <- list.files(path = d, full.names = TRUE)
files_to_remove <- c(files_to_remove, d_files)
}
for (rmd in files_rmd) {
# Corresponding HTML?
html <- to_html(rmd, outdir = p$docs)
if (fs::file_exists(html)) {
files_to_remove <- c(files_to_remove, html)
}
# Any figure files in analysis directory?
fig_path <- create_figure_path(rmd)
if (p$analysis == ".") {
dir_figs_analysis <- fig_path
} else {
dir_figs_analysis <- file.path(p$analysis, fig_path)
}
figs_analysis <- list.files(path = dir_figs_analysis, full.names = TRUE)
if (length(figs_analysis) > 0) {
files_to_remove <- c(files_to_remove, figs_analysis)
dirs_to_remove <- c(dirs_to_remove, dir_figs_analysis)
}
# Any figure files in docs directory?
if (p$docs == ".") {
dir_figs_docs <- fig_path
} else {
dir_figs_docs <- file.path(p$docs, fig_path)
}
figs_docs <- list.files(path = dir_figs_docs, full.names = TRUE)
if (length(figs_docs) > 0) {
files_to_remove <- c(files_to_remove, figs_docs)
dirs_to_remove <- c(dirs_to_remove, dir_figs_docs)
}
# Cache directory?
dir_cache <- paste0(tools::file_path_sans_ext(basename(rmd)), "_cache")
if (p$analysis != ".") {
dir_cache <- file.path(p$analysis, dir_cache)
}
if (fs::dir_exists(dir_cache)) {
files_cache <- list.files(path = dir_cache, full.names = TRUE,
recursive = TRUE)
files_to_remove <- c(files_to_remove, files_cache)
dirs_to_remove <- c(dirs_to_remove, dir_cache)
}
}
# Gather files to remove from Git --------------------------------------------
if (use_git) {
# Obtain committed files
files_committed <- relative(get_committed_files(r))
# Obtain files to be removed from Git
logical_files_git <- files_to_remove %in% files_committed
files_to_remove_from_git <- files_to_remove[logical_files_git]
} else {
files_to_remove_from_git <- NA
}
# Remove files ---------------------------------------------------------------
if (!dry_run) {
wflow_delete(files_to_remove)
# Remove the empty (though potentially nested) directories
unlink(dirs_to_remove, recursive = TRUE)
}
# Commit removed files -------------------------------------------------------
if (use_git && !dry_run && length(files_to_remove_from_git) > 0) {
git2r_add(r, files_to_remove_from_git)
git2r::commit(r, message = message)
commit <- git2r::commits(r, n = 1)[[1]]
} else {
commit <- NA
}
# Prepare output -------------------------------------------------------------
o <- list(files = files_to_remove,
message = message,
dry_run = dry_run,
commit = commit,
files_git = files_to_remove_from_git)
class(o) <- "wflow_remove"
return(o)
}
#' @export
print.wflow_remove <- function(x, ...) {
cat("Summary from wflow_remove\n\n")
if (x$dry_run) {
cat(wrap("The following would be removed:"), "\n\n")
} else {
cat(wrap("The following was removed:"), "\n\n")
}
cat(x$files, sep = "\n")
if (length(x$files_git) > 0 && !all(is.na(x$files_git))) {
if (x$dry_run) {
cat("\n", wrap("The following would be removed from the Git repo:"),
"\n\n", sep = "")
} else {
cat("\n", wrap(sprintf(
"The following was removed from the Git repo in commit %s:",
stringr::str_sub(x$commit$sha, start = 1, end = 7))),
"\n\n", sep = "")
}
cat(x$files_git, sep = "\n")
cat("\ncommit message:\n")
cat(x$message)
cat("\n")
}
return(invisible(x))
}
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/wflow_remove.R
|
#' Rename files and directories
#'
#' \code{wflow_rename} renames files and directories. If the file to be renamed
#' is an R Markdown file, the corresponding HTML and other related files are
#' also renamed. If the workflowr project uses Git, \code{wflow_rename} commits
#' the changes.
#'
#' @param files character. Files to be renamed. Supports file
#' \href{https://en.wikipedia.org/wiki/Glob_(programming)}{globbing}.
#' @param to character. New names for the files. Must be the same length as
#' \code{files}.
#' @param git logical (default: TRUE). Commit the changes (only applicable if
#' Git repository is present).
#' @param dry_run logical (default: FALSE). Preview the files to be renamed but
#' do not actually rename them.
#' @inheritParams wflow_git_commit
#'
#' @return An object of class \code{wflow_rename}, which is a list with the
#' following elements:
#'
#' \itemize{
#'
#' \item \bold{files}: The relative path(s) to the renamed file(s).
#'
#' \item \bold{to}: The new relative path(s) to rename the file(s).
#'
#' \item \bold{message}: The message describing the commit (if applicable).
#'
#' \item \bold{git}: The input argument \code{git}.
#'
#' \item \bold{dry_run}: The input argument \code{dry_run}.
#'
#' \item \bold{commit}:The object returned by
#' \link{git2r}::\code{\link[git2r]{commit}} (only included if \code{dry_run
#' == FALSE}).
#'
#' \item \bold{files_git}: The relative path(s) to the file(s) renamed from
#' the Git repository.
#'
#' }
#'
#' @seealso \code{\link{wflow_git_commit}}
#'
#' @examples
#' \dontrun{
#'
#' # rename a single file
#' wflow_rename("analysis/file.Rmd", "analysis/new.Rmd", "rename old analysis.")
#' # rename multiple files
#' wflow_rename(c("analysis/file.Rmd", "output/small-data.txt"),
#' c("analysis/new.Rmd", "output/new-data.txt"),
#' "rename old analysis and its associated data.")
#' }
#'
#' @export
wflow_rename <- function(files,
to,
message = NULL,
git = TRUE,
dry_run = FALSE,
project = ".") {
# Check input arguments ------------------------------------------------------
files <- process_input_files(files, files_only = FALSE,
convert_to_relative_paths = TRUE)
assert_is_character(to)
if (length(to) != length(files))
stop("to must be a character vector of filenames the same length as files")
# Warning: this will not resolve symlinks since the files do not yet exist
to <- relative(to)
if (is.null(message)) {
message <- deparse(sys.call())
message <- paste(message, collapse = "\n")
} else if (is.character(message)) {
message <- create_newlines(message)
} else {
stop("message must be NULL or a character vector")
}
assert_is_flag(git)
assert_is_flag(dry_run)
check_wd_exists()
assert_is_single_directory(project)
project <- absolute(project)
# Assess project status ------------------------------------------------------
p <- wflow_paths(project = project)
# Should changes be committed?
use_git <- !is.na(p$git) && git
# Even if not committing files, still assess the Git repository
if (!is.na(p$git)) {
r <- git2r::repository(path = p$git)
}
# Early stops
if (use_git && !dry_run) {
# Git must be configured
check_git_config(project, "`wflow_rename` with `git = TRUE`")
# No staged files
check_staged_changes(project, "`wflow_rename` with `git = TRUE`")
}
# No HTML files in website directory
# No figures or figure directories
# from and to must have same file extensions
# Rmd file in analyis/ must be renamed to same diretory
# Gather R Markdown accessory files to rename --------------------------------
# Are any of the specified files R Markdown files in the analysis directory?
files_ext <- tools::file_ext(files)
rmd <- which(files_ext %in% c("Rmd", "rmd") &
absolute(files) == absolute(file.path(p$analysis, basename(files))))
for (i in rmd) {
# Corresponding HTML?
html1 <- to_html(files[i], outdir = p$docs)
html2 <- to_html(to[i], outdir = p$docs)
if (fs::file_exists(html1)) {
files <- c(files, html1)
to <- c(to, html2)
}
# Any figure files in docs directory?
if (p$docs == ".") {
dir_figs_docs1 <- file.path("figure", basename(files[i]))
dir_figs_docs2 <- file.path("figure", basename(to[i]))
} else {
dir_figs_docs1 <- file.path(p$docs, "figure", basename(files[i]))
dir_figs_docs2 <- file.path(p$docs, "figure", basename(to[i]))
}
if (fs::dir_exists(dir_figs_docs1)) {
files <- c(files, dir_figs_docs1)
to <- c(to, dir_figs_docs2)
}
}
# Expand directories ---------------------------------------------------------
is_dir <- fs::dir_exists(files)
dirs_from <- files[is_dir]
dirs_to <- to[is_dir]
files <- files[!is_dir]
to <- to[!is_dir]
for (i in seq_along(dirs_from)) {
d_files_from <- list.files(path = dirs_from[i], all.files = TRUE,
full.names = TRUE, recursive = TRUE)
# Replace with new directory name
d_files_to <- stringr::str_replace(d_files_from, dirs_from[i], dirs_to[i])
files <- c(files, d_files_from)
to <- c(to, d_files_to)
}
# Gather files to commit -----------------------------------------------------
if (!is.na(p$git)) {
# Obtain committed files
files_committed <- relative(get_committed_files(r))
# Obtain files to commit
logical_files_git <- files %in% files_committed
files_to_commit <- c(files[logical_files_git], to[logical_files_git])
} else {
files_to_commit <- NA_character_
}
# rename files ---------------------------------------------------------------
if (!dry_run) {
# Create any new directories, otherwise rename would not work
lapply(to, function(x) {
fs::dir_create(dirname(x))
})
# Rename individual files
fs::file_move(path = files, new_path = to)
# Remove any previous directories
lapply(dirs_from, unlink, recursive = TRUE)
}
# Commit renamed files -------------------------------------------------------
if (use_git && !dry_run && length(files_to_commit) > 0) {
git2r_add(r, files_to_commit)
git2r::commit(r, message = message)
commit <- git2r::commits(r, n = 1)[[1]]
} else {
commit <- NA
}
# Prepare output -------------------------------------------------------------
o <- list(files = files,
to = to,
message = message,
git = git,
dry_run = dry_run,
commit = commit,
# Re-run relative() on files_to_commit to resolve any potential
# symlinks in paths to newly created files
files_git = relative(files_to_commit))
class(o) <- "wflow_rename"
return(o)
}
#' @export
print.wflow_rename <- function(x, ...) {
cat("Summary from wflow_rename\n\n")
if (x$dry_run) {
cat(wrap("The following file(s) would be renamed:"), "\n\n")
} else {
cat(wrap("The following files(s) were renamed:"), "\n\n")
}
cat(sprintf("%s -> %s", x$files, x$to), sep = "\n")
if (length(x$files_git) > 0 && !all(is.na(x$files_git)) && x$git) {
if (x$dry_run) {
cat("\n", wrap("The following file(s) would be included in the Git commit:"),
"\n\n", sep = "")
} else {
cat("\n", wrap(sprintf(
"The following file(s) were included in Git commit %s:",
stringr::str_sub(x$commit$sha, start = 1, end = 7))),
"\n\n", sep = "")
}
cat(x$files_git, sep = "\n")
cat("\ncommit message:\n")
cat(x$message)
cat("\n")
}
return(invisible(x))
}
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/wflow_rename.R
|
#' Rename a workflowr project
#'
#' If you want to rename an existing workflowr project, use
#' \code{wflow_rename_proj} to update the name throughout all the project files.
#'
#' \code{wflow_rename_proj} performs the following steps and then commits the
#' changes:
#'
#' \itemize{
#'
#' \item Rename RStudio Project file (\code{.Rproj})
#'
#' \item Update URL of remote repository (see \code{\link{wflow_git_remote}})
#'
#' \item Update project name in the navigation bar (defined in \code{_site.yml})
#'
#' \item Update title of README file
#'
#' \item Rename the project directory itself
#'
#' }
#'
#' After renaming the project with \code{wflow_rename_proj}, you should
#' republish the R Markdown files with \code{wflow_publish(republish = TRUE)}.
#' Also, you should go to the settings of your Git repository on the online Git
#' hosting platform to change its name.
#'
#' @param name character. The new name for the workflowr project.
#' @param rproj logical (default: TRUE). Rename the RStudio Project file.
#' @param remote logical (default: TRUE). Rename the remote URL.
#' @param navbar logical (default: TRUE). Rename the navbar title.
#' @param readme logical (default: TRUE). Rename the README title.
#' @param commit logical (default: TRUE). Commit the changes to Git.
#' @param directory logical (default: TRUE). Rename the project directory.
#' @inheritParams wflow_git_commit
#'
#' @return Invisibly returns the path to the project directory
#'
#' @seealso \code{\link{wflow_publish}}
#'
#' @examples
#' \dontrun{
#'
#' wflow_rename_proj("new-project-name")
#' }
#'
#'@export
wflow_rename_proj <- function(name,
rproj = TRUE,
remote = TRUE,
navbar = TRUE,
readme = TRUE,
commit = TRUE,
directory = TRUE,
project = ".") {
# Check input arguments ------------------------------------------------------
if (!(is.character(name) && length(name) == 1))
stop("name must be NULL or a one element character vector: ", name)
assert_is_flag(rproj)
assert_is_flag(remote)
assert_is_flag(navbar)
assert_is_flag(readme)
assert_is_flag(commit)
assert_is_flag(directory)
check_wd_exists()
assert_is_single_directory(project)
project <- absolute(project)
check_git_config(project, "`wflow_rename_proj`")
message("Summary from wflow_rename_proj():")
# Status ---------------------------------------------------------------------
s <- wflow_status(project = project)
r <- git2r::repository(path = s$git)
# Rename RStudio Project file ------------------------------------------------
if (rproj) {
rproj_old <- fs::dir_ls(path = s$root, regexp = "\\.Rproj$")
rproj_new <- relative(file.path(s$root, paste0(name, ".Rproj")))
if (rproj_new == rproj_old) {
message("* RStudio Project file already named ", rproj_new)
} else {
fs::file_move(rproj_old, rproj_new)
message("* RStudio Project file renamed to ", rproj_new)
}
}
# Update URL of remote repository --------------------------------------------
if (remote) {
remote_avail <- wflow_git_remote(verbose = FALSE, project = project)
if ("origin" %in% names(remote_avail)) {
url_old <- remote_avail["origin"]
url_new <- stringr::str_replace(url_old, "/[:alnum:]+\\.git$",
paste0("/", name, ".git"))
git2r::remote_set_url(repo = r, name = "origin", url = url_new)
message("* Remote \"origin\" URL renamed to ", url_new)
}
}
# Update project name in the navigation bar ----------------------------------
if (navbar) {
site_yml_fname <- file.path(s$analysis, "_site.yml")
site_yml <- yaml::yaml.load_file(site_yml_fname)
site_yml$name <- name
site_yml$navbar$title <- name
yaml::write_yaml(site_yml, file = site_yml_fname)
message("* Renamed project in navigation bar")
}
# Update title of README file ------------------------------------------------
if (readme) {
readme_fname <- file.path(s$root, "README.md")
readme_title <- paste("#", name)
readme_lines <- readLines(readme_fname)
readme_lines[1] <- paste("#", name)
writeLines(readme_lines, readme_fname)
message("* README.md title: ", paste("#", name))
}
# Commit changes -------------------------------------------------------------
if (commit) {
if (rproj) git2r_add(r, c(rproj_new, rproj_old))
if (navbar) git2r_add(r, site_yml_fname)
if (readme) git2r_add(r, readme_fname)
staged <- git2r::status(r)$staged
if (length(staged) > 0) {
commit_rename <- git2r::commit(r, paste("Rename project to", name))
sha <- commit_rename$sha
message("* Committed changes in ", stringr::str_sub(sha, 1, 7))
} else {
message("* No changes to commit")
}
}
if (any(s$status$published))
message("* To do: Republish analyses with wflow_publish(republish = TRUE)")
# Rename project directory ---------------------------------------------------
dir_path <- absolute(s$root)
dir_path_parts <- fs::path_split(dir_path)[[1]]
dir_path_parts[[length(dir_path_parts)]] <- name
dir_path_new <- fs::path_join(dir_path_parts)
dir_path_new <- as.character(dir_path_new)
if (directory) {
if (dir_path != dir_path_new) {
wd <- absolute(getwd())
if (wd == dir_path) { # Cannot rename current directory in Windows
setwd(fs::path_temp())
on.exit(setwd(dir_path_new))
}
fs::file_move(dir_path, dir_path_new)
message("* Renamed project directory: ", dir_path_new)
} else {
message("* Project directory already named: ", dir_path_new)
}
}
# Return ---------------------------------------------------------------------
message("* To do: Rename repository in settings of your Git hosting service")
return(invisible(dir_path_new))
}
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/wflow_rename_proj.R
|
#' Run the code
#'
#' \code{wflow_run} executes the code chunks of an R Markdown file in the
#' current R session without affecting any of the website files. This is meant
#' to be used while interactively developing an analysis. It does \strong{not}
#' change the working directory, isolate the computation from the current R
#' session, nor set the seed of the random number generator. This is analogous
#' to the RStudio option "Run all" to run all the code chunks. Use
#' \code{\link{wflow_publish}} when you are ready to add the results to the
#' website.
#'
#' @param file character (default: \code{NULL}). The R Markdown file to execute.
#' Must have file extension Rmd or rmd. If \code{NULL}, the most recently
#' modified Rmd file will be executed.
#' @param verbose logical (default: \code{TRUE}). Should the lines of code (and
#' their output) be echoed in the R console as they are executed? This
#' argument is passed directly to the argument \code{echo} of the function
#' \code{\link{source}}.
#' @inheritParams wflow_git_commit
#'
#' @return Invisibly returns the path to the Rmd file that was executed
#'
#' @seealso \code{\link{wflow_build}} with argument \code{local = TRUE},
#' \code{\link{source}} with argument \code{echo = TRUE}
#'
#' @examples
#' \dontrun{
#'
#' # Run the most recently modified Rmd file
#' wflow_run()
#' # Run a specific Rmd file
#' wflow_run("analysis/file.Rmd")
#' }
#'
#' @export
wflow_run <- function(file = NULL, verbose = TRUE, project = ".") {
file <- process_input_files(file, allow_null = TRUE, rmd_only = TRUE,
convert_to_relative_paths = TRUE)
assert_is_flag(verbose)
check_wd_exists()
assert_is_single_directory(project)
project <- absolute(project)
# If file not specfied, get the most recently modified file
if (is.null(file)) {
p <- wflow_paths(project = project)
files_analysis <- list.files(path = p$analysis, pattern = "^[^_].+[Rr]md$",
full.names = TRUE)
files_analysis <- relative(files_analysis)
files_modified <- fs::file_info(files_analysis)$modification_time
file <- files_analysis[which.max(files_modified)]
}
assert_has_length(file, 1)
# Determine knit directory
wd <- getwd()
wflow_opts <- wflow_options(file)
if (wflow_opts$knit_root_dir != wd) {
warning(sprintf("Working directory does not match knit_root_dir: %s",
wflow_opts$knit_root_dir),
call. = FALSE)
}
# setwd(wflow_opts$knit_root_dir)
# on.exit(setwd(wd), add = TRUE)
r_tmp <- fs::file_temp(pattern = "workflowr-purl-", ext = ".R")
knitr::purl(file, output = r_tmp, quiet = TRUE, documentation = 0L)
source(r_tmp, echo = verbose)
return(invisible(file))
}
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/wflow_run.R
|
#' Custom site generator for workflowr websites
#'
#' \code{wflow_site} is a
#' \href{https://bookdown.org/yihui/rmarkdown/rmarkdown-site.html}{custom
#' site generator} to be used in combination with the R Markdown output format
#' \code{\link{wflow_html}}.
#'
#' Do not call the function \code{wflow_site} directly. Instead insert the line
#' below directly into the YAML header of the file \code{index.Rmd}:
#'
#' \preformatted{
#' ---
#' title: "Home"
#' site: workflowr::wflow_site
#' output:
#' workflowr::wflow_html:
#' toc: false
#' ---
#' }
#'
#' Then you can build the website by running \code{\link[rmarkdown]{render_site}}
#' in the R console or clicking the Knit button in RStudio.
#'
#' If you receive an error when using the RStudio Knit button (the error is
#' about an unused argument), make sure the Knit Directory is set to Document
#' Directory (you can set this with the dropdown menu next to the Knit button).
#'
#' @param input character. The name of the website directory or a specific R
#' Markdown file in the website directory.
#' @param encoding character. The
#' \href{https://en.wikipedia.org/wiki/Character_encoding}{character encoding}
#' to use to read the file.
#' @param ... Placeholder for potential future use.
#'
#' @seealso \code{\link{wflow_html}}, \code{\link[rmarkdown]{render_site}}
#'
#' @import rmarkdown
#' @export
wflow_site <- function(input, encoding = getOption("encoding"), ...) {
# Get output directory if it exists
output_dir <- get_output_dir(directory = input)
render <- function(input_file,
output_format,
envir,
quiet,
encoding, ...) {
# input is defined in the enclosing environment, i.e. wflow_site
input <- absolute(input)
if (is.null(input_file)) {
files <- list.files(input, pattern = "^[^_].*\\.[Rr]md$",
full.names = TRUE)
} else {
files <- input_file
}
# For an R Markdown website, the output_options self_contained and lib_dir
# must be set. Force them here instead of temporarily editing the _site.yml
# file.
# To improve: only do this for HTML output:
# https://github.com/rstudio/rmarkdown/pull/1177
output_options <- list(self_contained = FALSE,
lib_dir = "site_libs")
for (f in files) {
suppressMessages(
output_file <- rmarkdown::render(f,
output_format = output_format,
output_options = output_options,
knit_root_dir = NULL,
envir = envir,
quiet = quiet,
encoding = encoding)
)
# output_dir is defined in the enclosing environment (i.e. render is
# defined inside of wflow_html)
if (output_dir != input) {
# Move HTML file
fs::file_copy(output_file, output_dir, overwrite = TRUE)
unlink(output_file)
output_file <- file.path(output_dir, basename(output_file))
# Move figures
fig_dir <- create_figure_path(f)
fig_dir <- file.path(input, fig_dir)
if (fs::dir_exists(fig_dir)) {
fig_output_dir <- file.path(output_dir, "figure")
fs::dir_create(fig_output_dir)
file.copy(fig_dir, fig_output_dir, recursive = TRUE)
unlink(fig_dir, recursive = TRUE)
}
# Copy CSS/Javascript files
files_css <- list.files(path = input, pattern = "css$", full.names = TRUE)
fs::file_copy(files_css, output_dir, overwrite = TRUE)
files_js <- list.files(path = input, pattern = "js$", full.names = TRUE)
fs::file_copy(files_js, output_dir, overwrite = TRUE)
}
}
# Clean up source directory
if (output_dir != input) {
# Move site libraries
site_libs <- file.path(input, "site_libs")
file.copy(site_libs, output_dir, recursive = TRUE)
unlink(site_libs, recursive = TRUE)
# Remove figure directory
unlink(file.path(input, "figure"), recursive = TRUE)
}
# Open in RStudio Viewer
if (!quiet) {
message("\nOutput created: ", output_file)
}
}
# return site generator
list(
name = "not implemented",
output_dir = output_dir,
render = render,
clean = function() stop("Not implemented", call. = FALSE)
)
}
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/wflow_site.R
|
#' Start a new workflowr project
#'
#' \code{wflow_start} creates a directory with the essential files for
#' a workflowr project. The default behavior is to add these files to
#' a new directory, but it is also possible to populate an existing
#' directory. By default, the working directory is changed to the
#' workflowr project directory.
#'
#' This is recommended function to set up the file infrastructure for
#' a workflowr project. If you are using RStudio, you can also create
#' a new workflowr project as an "RStudio Project Template". Go to
#' "File" -> "New Project..." then select "workflowr project" from the
#' list of project types. In the future, you can return to your
#' project by choosing menu option "Open Project..." and selecting the
#' \code{.Rproj} file located at the root of the workflowr project
#' directory. In RStudio, opening this file will change the working
#' directory to the appropriate location, set the file navigator to
#' the workflowr project directory, and configure the Git pane.
#'
#' \code{wflow_start} populates the chosen directory with the
#' following files:
#'
#' \preformatted{|--- .gitignore
#' |--- .Rprofile
#' |--- _workflowr.yml
#' |--- analysis/
#' | |--- about.Rmd
#' | |--- index.Rmd
#' | |--- license.Rmd
#' | |--- _site.yml
#' |--- code/
#' | |--- README.md
#' |--- data/
#' | |--- README.md
#' |--- docs/
#' |--- <directory>.Rproj
#' |--- output/
#' | |--- README.md
#' |--- README.md
#' }
#'
#' The two \bold{required} subdirectories are \code{analysis/} and
#' \code{docs/}. These directories should never be removed from the
#' workflowr project.
#'
#' \code{analysis/} contains all the source R Markdown files that
#' implement the analyses for your project. It contains a special R
#' Markdown file, \code{index.Rmd}, that typically does not include R
#' code, and is will be used to generate \code{index.html}, the
#' homepage for the project website. Additionally, this directory
#' contains the important configuration file \code{_site.yml}. The
#' website theme, navigation bar, and other properties can be
#' controlled through this file (for more details see the
#' documentation on
#' \href{https://bookdown.org/yihui/rmarkdown/rmarkdown-site.html}{R
#' Markdown websites}). Do not delete \code{index.Rmd} or
#' \code{_site.yml}.
#'
#' \code{docs/} will contain all the webpages generated from the R
#' Markdown files in \code{analysis/}. Any figures generated by
#' rendering the R Markdown files are also stored here. Each figure is
#' saved according to the following convention:
#' \code{docs/figure/<Rmd-filename>/<chunk-name>-#.png}, where
#' \code{#} corresponds to which of the plots the chunk generated (one
#' chunk can produce several plots).
#'
#' \code{_workflowr.yml} is an additional configuration file used only
#' by workflowr. It is used to apply the workflowr reproducibility
#' checks consistently across all R Markdown files. The most important
#' setting is \code{knit_root_dir} which determines the directory
#' where the scripts in \code{analysis/} are executed. The default is
#' to run code from the project root (\emph{i.e.,} \code{"."}). To
#' execute the code from \code{analysis/}, for example, change the
#' setting to \code{knit_root_dir: "analysis"}. See
#' \code{\link{wflow_html}} for more details.
#'
#' Another required file is the RStudio project file (ending in
#' \code{.Rproj}). \emph{Do not delete this file even if you do not
#' use RStudio; among other essential tasks, it is used to determine
#' the project root directory.}
#'
#' The \bold{optional} directories are \code{data/}, \code{code/}, and
#' \code{output/}. These directories are suggestions for organizing
#' your workflowr project and can be removed if you do not find them
#' relevant to your project.
#'
#' \code{data/} should be used to store "raw" (unprocessed) data
#' files.
#'
#' \code{code/} should be used to store additional code that might not
#' be appropriate to include in R Markdown files (e.g., code to
#' preprocess the data, long-running scripts, or functions that are
#' used in multiple R Markdown files).
#'
#' \code{output/} should be used to store processed data files and
#' other outputs generated from the code and analyses. For example,
#' scripts in \code{code/} that pre-process raw data files from
#' \code{data/} should save the processed data files in
#' \code{output/}.
#'
#' All these subdirectories except for \code{docs/} include a README
#' file summarizing the contents of the subdirectory, and can be
#' modified as desired, for example, to document the files stored in
#' each directory.
#'
#' \code{.Rprofile} is an optional file in the root directory of the
#' workflowr project containing R code that is executed whenever the
#' \code{.Rproj} file is loaded in RStudio, or whenever R is started
#' up inside the project root directory. This file includes the line
#' of code \code{library("workflowr")} to ensure that the workflowr
#' package is loaded.
#'
#' Finally, \code{.gitignore} is an optional file that indicates to
#' Git which files should be ignored---that is, files that are never
#' committed to the repository. Some suggested files to ignore such as
#' \code{.Rhistory} and \code{.Rdata} are listed here.
#'
#' @note Do not delete the file \code{.Rproj} even if you do not use
#' RStudio; workflowr will not work correctly unless this file is
#' there.
#'
#' @param directory character. The directory where the workflowr
#' project files will be added, e.g., "~/my-wflow-project". When
#' \code{existing = FALSE}, the directory will be created.
#'
#' @param name character (default: \code{NULL}). The name of the
#' project, e.g. "My Workflowr Project". When \code{name = NULL}, the
#' project name is automatically determined based on
#' \code{directory}. For example, if \code{directory =
#' "~/projects/my-wflow-project"}, then \code{name} is set to
#' \code{"my-wflow-project"}. The project name is displayed on the
#' website's navigation bar and in the \code{README.md} file.
#'
#' @param git logical (default: \code{TRUE}). Should the workflowr files be
#' committed with Git? If \code{git = TRUE} and no existing Git repository is
#' detected, \code{wflow_start} will initialize the repository and make an
#' initial commit. If a Git repository already exists in the chosen directory,
#' \code{wflow_start} will commit any newly created or modified files to the
#' existing repository (also need to set \code{existing = TRUE}). If \code{git
#' = FALSE}, \code{wflow_start} will not perform any Git commands.
#'
#' @param existing logical (default: \code{FALSE}). Indicate whether
#' \code{directory} already exists. This argument is added to prevent
#' accidental creation of files in an existing directory; setting
#' \code{existing = FALSE} prevents files from being created if the
#' specified directory already exists.
#'
#' @param overwrite logical (default: \code{FALSE}). Similar to
#' \code{existing}, this argument prevents files from accidentally
#' being overwritten when \code{overwrite = FALSE}. When
#' \code{overwrite = TRUE}, any existing file in \code{directory} that
#' has the same name as a workflowr file will be replaced by the
#' workflowr file. When \code{git = TRUE}, all the standard workflowr
#' files will be added and committed (regardless of whether they were
#' overwritten or still contain the original content).
#'
#' @param change_wd logical (default: \code{TRUE}). Change the working
#' directory to the \code{directory}.
#'
#' @param disable_remote logical (default: \code{FALSE}). Create a Git
#' \href{https://git-scm.com/book/en/v2/Customizing-Git-Git-Hooks}{pre-push
#' hook} that prevents pushing to a remote Git repository (i.e. using
#' \code{\link{wflow_git_push}}). This is useful for extremely confidential
#' projects that cannot be shared via an online Git hosting service (e.g.
#' GitHub or GitLab). The hook is saved in the file
#' \code{.git/hooks/pre-push}. If you change your mind and want to push the
#' repository, you can delete that file. Note that this option is only
#' available if \code{git = TRUE}. Note that this is currently only supported
#' for Linux and macOS.
#'
#' @param dry_run logical (default: \code{FALSE}). When \code{dry_run
#' = TRUE}, the actions are previewed without executing them.
#'
#' @param user.name character (default: \code{NULL}). The user name
#' used by Git to sign commits, e.g., "Ada Lovelace". This setting
#' only applies to the workflowr project being created. To specify the
#' global setting for the Git user name, use
#' \code{\link{wflow_git_config}} instead. When \code{user.name =
#' NULL}, no user name is recorded for the project, and the global
#' setting will be used. This setting can be modified later
#' by running \code{git config --local} in the Terminal.
#'
#' @param user.email character (default: \code{NULL}). The email
#' address used by Git to sign commits, e.g.,
#' "[email protected]". This setting only applies to the workflowr
#' project being created. To specify the global setting for the Git
#' email address, use \code{\link{wflow_git_config}} instead. When
#' \code{user.name = NULL}, no email address is recorded for the
#' project, and the global setting will be used. This setting can be
#' modified later by running \code{git config --local} in the Terminal.
#'
#' @return An object of class \code{wflow_start}, which is a list with the
#' following elements:
#'
#' \item{directory}{The input argument \code{directory}.}
#'
#' \item{name}{The input argument \code{name}.}
#'
#' \item{git}{The input argument \code{git}.}
#'
#' \item{existing}{The input argument \code{existing}.}
#'
#' \item{overwrite}{The input argument \code{overwrite}.}
#'
#' \item{change_wd}{The input argument \code{change_wd}.}
#'
#' \item{disable_remote}{The input argument \code{disable_remote}.}
#'
#' \item{dry_run}{The input argument \code{dry_run}.}
#'
#' \item{user.name}{The input argument \code{user.name}.}
#'
#' \item{user.email}{The input argument \code{user.email}.}
#'
#' \item{commit}{The object returned by
#' \link{git2r}::\code{\link[git2r]{commit}}, or \code{NULL} if \code{git =
#' FALSE}.}
#'
#' @seealso vignette("wflow-01-getting-started")
#'
#' @examples
#' \dontrun{
#'
#' wflow_start("path/to/new-project")
#'
#' # Provide a custom name for the project.
#' wflow_start("path/to/new-project", name = "My Project")
#'
#' # Preview what wflow_start would do
#' wflow_start("path/to/new-project", dry_run = TRUE)
#'
#' # Add workflowr files to an existing project.
#' wflow_start("path/to/current-project", existing = TRUE)
#'
#' # Add workflowr files to an existing project, but do not automatically
#' # commit them.
#' wflow_start("path/to/current-project", git = FALSE, existing = TRUE)
#' }
#'
#' @export
wflow_start <- function(directory,
name = NULL,
git = TRUE,
existing = FALSE,
overwrite = FALSE,
change_wd = TRUE,
disable_remote = FALSE,
dry_run = FALSE,
user.name = NULL,
user.email = NULL) {
# Check input arguments ------------------------------------------------------
if (!is.character(directory) | length(directory) != 1)
stop("directory must be a one element character vector: ", directory)
if (!(is.null(name) | (is.character(name) | length(name) != 1)))
stop("name must be NULL or a one element character vector: ", name)
assert_is_flag(git)
assert_is_flag(existing)
assert_is_flag(overwrite)
if (overwrite && !existing) {
stop("Cannot overwrite non-existent project. Set existing = TRUE if you wish to overwrite existing workflowr files.")
}
assert_is_flag(change_wd)
assert_is_flag(disable_remote)
assert_is_flag(dry_run)
if (!(is.null(user.name) | (is.character(user.name) | length(user.name) != 1)))
stop("user.name must be NULL or a one element character vector: ", user.name)
if (!(is.null(user.email) | (is.character(user.email) | length(user.email) != 1)))
stop("user.email must be NULL or a one element character vector: ", user.email)
if ((is.null(user.name) && !is.null(user.email)) ||
(!is.null(user.name) && is.null(user.email)))
stop("Must specify both user.name and user.email, or neither.")
check_wd_exists()
if (!existing & fs::dir_exists(directory)) {
stop("Directory already exists. Set existing = TRUE if you wish to add workflowr files to an already existing project.")
} else if (existing & !fs::dir_exists(directory)) {
stop("Directory does not exist. Set existing = FALSE to create a new directory for the workflowr files.")
}
directory <- absolute(directory)
# A workflowr directory cannot be created within an existing Git repository if
# git = TRUE & existing = FALSE.
if (git & !existing) {
check_for_existing_git_directory(directory)
}
# Require that user.name and user.email be set locally or globally
if (git && is.null(user.name) && is.null(user.email)) {
check_git_config(path = directory, "`wflow_start` with `git = TRUE`")
}
# Do not allow git = FALSE and disable_remote = TRUE
if (!git && disable_remote) {
stop("disable_remote is only available if git=TRUE")
}
# Do not allow disable_remote = TRUE on Windows
if (disable_remote && .Platform$OS.type == "windows") {
stop("disable_remote is not available on Windows")
}
do.call(wflow_start_, args = as.list(environment()))
}
wflow_start_ <- function() {}
formals(wflow_start_) <- formals(wflow_start)
body(wflow_start_) <- quote({
# Create directory if it doesn't already exist
if (!existing && !fs::dir_exists(directory) && !dry_run) {
fs::dir_create(directory)
}
# Convert to absolute path. Needs to be run again after creating the directory
# because symlinks can only resolved for existing directories.
directory <- absolute(directory)
# Configure name of workflowr project
if (is.null(name)) {
name <- basename(directory)
}
# Get variables to interpolate into _workflowr.yml
wflow_version <- as.character(utils::packageVersion("workflowr"))
the_seed_to_set <- as.numeric(format(Sys.Date(), "%Y%m%d")) # YYYYMMDD
# Add files ------------------------------------------------------------------
# Use templates defined in R/infrastructure.R
names(templates)[which(names(templates) == "Rproj")] <-
glue::glue("{basename(directory)}.Rproj")
names(templates) <- file.path(directory, names(templates))
project_files <- names(templates)
# Create subdirectories
subdirs <- file.path(directory, c("analysis", "code", "data", "docs",
"output"))
if (!dry_run) {
fs::dir_create(subdirs)
}
if (!dry_run) {
for (fname in project_files) {
if (!fs::file_exists(fname) || overwrite) {
cat(glue::glue(templates[[fname]]), file = fname)
}
}
}
# Create .nojekyll file in docs/ directory
nojekyll <- file.path(directory, "docs", ".nojekyll")
project_files <- c(project_files, nojekyll)
if (!dry_run) {
fs::file_create(nojekyll)
}
# Configure, initialize, and commit ------------------------------------------
# Configure RStudio
rs_version <- check_rstudio_version()
# Change working directory to workflowr project
if (change_wd && !dry_run) {
setwd(directory)
}
# Configure Git repository
if (git && !dry_run) {
if (!git2r::in_repository(directory)) {
git2r::init(directory)
}
repo <- git2r::repository(directory)
# Set local user.name and user.email
if (!is.null(user.name) && !is.null(user.email)) {
git2r::config(repo, user.name = user.name, user.email = user.email)
}
# Make the first workflowr commit
git2r_add(repo, project_files, force = TRUE)
status <- git2r::status(repo)
if (length(status$staged) == 0) {
warning("No new workflowr files were committed.")
} else{
commit <- git2r::commit(repo, message = "Start workflowr project.")
}
# Create pre-push hook to prevent pushing confidential projects
if (disable_remote) {
pre_push_file <- file.path(git2r::workdir(repo), ".git/hooks/pre-push")
if (!fs::file_exists(pre_push_file) || overwrite) {
# extras is a list defined in infrastructure.R
cat(glue::glue(extras[["disable_remote"]]), file = pre_push_file)
}
if (!file_is_executable(pre_push_file)) {
fs::file_chmod(pre_push_file, "a+x")
}
}
}
# Prepare output -------------------------------------------------------------
o <- list(directory = directory,
name = name,
git = git,
existing = existing,
overwrite = overwrite,
change_wd = change_wd,
disable_remote = disable_remote,
dry_run = dry_run,
user.name = user.name,
user.email = user.email,
commit = if (exists("commit", inherits = FALSE)) commit else NULL)
class(o) <- "wflow_start"
return(o)
})
#' @export
print.wflow_start <- function(x, ...) {
if (x$dry_run) {
cat("wflow_start (\"dry run mode\"):\n")
if (x$existing) {
cat(sprintf("- Files will be added to existing directory %s\n", x$directory))
} else {
cat(sprintf("- New directory will be created at %s\n", x$directory))
}
cat(sprintf("- Project name will be \"%s\"\n", x$name))
if (x$change_wd) {
cat(sprintf("- Working directory will be changed to %s\n", x$directory))
} else {
cat(sprintf("- Working directory will continue to be %s\n", getwd()))
}
if (x$existing && git2r::in_repository(x$directory)) {
repo <- git2r::repository(x$directory, discover = TRUE)
cat(sprintf("- Git repo already present at %s\n", git2r::workdir(repo)))
} else if (x$git) {
cat(sprintf("- Git repo will be initiated at %s\n", x$directory))
} else {
cat(sprintf("- Git repo will not be initiated at %s\n", x$directory))
}
if (x$git) {
cat("- Files will be committed with Git\n")
} else {
cat("- Files will not be committed with Git\n")
}
if (x$disable_remote) {
cat("- Pushing to remote repository will be disabled\n")
}
} else {
cat("wflow_start:\n")
if (x$existing) {
cat(sprintf("- Files added to existing directory %s\n", x$directory))
} else {
cat(sprintf("- New directory created at %s\n", x$directory))
}
cat(sprintf("- Project name is \"%s\"\n", x$name))
if (x$change_wd) {
cat(sprintf("- Working directory changed to %s\n", x$directory))
} else {
cat(sprintf("- Working directory continues to be %s\n", getwd()))
}
if (git2r::in_repository(x$directory)) {
repo <- git2r::repository(x$directory, discover = TRUE)
if (x$git && !x$existing) {
cat(sprintf("- Git repo initiated at %s\n", git2r::workdir(repo)))
} else if (x$git && x$existing && length(git2r::commits(repo)) == 1) {
cat(sprintf("- Git repo initiated at %s\n", git2r::workdir(repo)))
} else {
cat(sprintf("- Git repo already present at %s\n", git2r::workdir(repo)))
}
if (x$git) {
if (is.null(x$commit)) {
cat("- Files were not committed\n")
} else {
cat(sprintf("- Files were committed in version %s\n",
shorten_sha(x$commit$sha)))
}
}
} else {
cat("- No Git repo\n")
}
if (x$disable_remote) {
cat("- Pushing to remote repository is disabled\n")
}
}
return(invisible(x))
}
check_rstudio_version <- function() {
if (rstudioapi::isAvailable()) {
rs_version <- rstudioapi::getVersion()
if (rs_version < "1.0.0") {
message(strwrap(paste("You can gain lots of new useful features",
"by updating to RStudio version 1.0 or greater.",
"You are running RStudio",
as.character(rs_version)), prefix = "\n"))
}
} else {
rs_version <- NULL
}
return(rs_version)
}
check_for_existing_git_directory <- function(directory) {
# In order to check if location is within an existing Git repository, first
# must obtain the most upstream existing directory
dir_existing <- obtain_existing_path(directory)
if (git2r::in_repository(dir_existing)) {
r <- git2r::repository(dir_existing, discover = TRUE)
stop(call. = FALSE,
"The directory where you have chosen to create a new workflowr directory",
" is already within a Git repository. This is potentially dangerous. If",
" you want to have a workflowr project created within this existing Git",
" repository, re-run wflow_start with `git = FALSE` and then manually",
" commit the new files. The following directory contains the existing .git",
" directory: ", git2r::workdir(r))
}
return(invisible(NULL))
}
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/wflow_start.R
|
## Wrapper function to enhance RStudio Project Template
wflow_start_rstudio <- function(directory,
name = "",
git = TRUE,
existing = FALSE,
overwrite = FALSE,
user.name = "",
user.email = "") {
directory_rs <- directory
# Check if name is blank, use NULL if true
if (name == "") {
name_rs <- NULL
} else {
name_rs <- name
}
git_rs <- git
existing_rs <- existing
overwrite_rs <- overwrite
# Check if user.name is blank, use NULL if true
if (user.name == "") {
user.name_rs <- NULL
check_git_config(directory, custom_message = "the RStudio Project Template")
} else {
user.name_rs <- user.name
}
# Check if user.email is blank, use NULL if true
if (user.email == "") {
user.email_rs <- NULL
check_git_config(directory, custom_message = "the RStudio Project Template")
} else {
user.email_rs <- user.email
}
wflow_start(directory = directory_rs,
name = name_rs,
git = git_rs,
existing = existing_rs,
overwrite = overwrite_rs,
user.name = user.name_rs,
user.email = user.email_rs)
}
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/wflow_start_rstudio.R
|
#' Report status of workflowr project
#'
#' \code{wflow_status} reports the analysis files that require user action.
#'
#' \code{wflow_status} reports analysis files with one of the following
#' statuses:
#'
#' \itemize{
#'
#' \item \bold{Mod}: Modified file. Any published file that has been modified
#' since the last time the HTML was published.
#'
#' \item \bold{Unp}: Unpublished file. Any tracked file whose corresponding HTML
#' is not tracked. May or may not have staged or unstaged changes.
#'
#' \item \bold{Scr}: Scratch file. Any untracked file that is not specifically
#' ignored.
#'
#' }
#'
#' \code{wflow_status} only works for workflowr projects that use Git.
#'
#' @param files character (default: NULL) The analysis file(s) to report the
#' status. By default checks the status of all analysis files. Supports
#' file \href{https://en.wikipedia.org/wiki/Glob_(programming)}{globbing}.
#' @param include_git_status logical (default: TRUE) Include the Git status of
#' the project files in the output. Note that this excludes any files in the
#' website directory, since these generated files should only be committed by
#' workflowr, and not the user.
#' @param project character (default: ".") By default the function assumes the
#' current working directory is within the project. If this is not true,
#' you'll need to provide the path to the project directory.
#'
#' @return Returns an object of class \code{wflow_status}, which is a list with
#' the following elements:
#'
#' \itemize{
#'
#' \item \bold{root}: The relative path to the root directory of the workflowr
#' project (i.e. contains the RStudio .Rproj file).
#'
#' \item \bold{analysis}: The relative path to the directory that contains
#' \code{_site.yml} and the R Markdown files.
#'
#' \item \bold{docs}: The relative path to the directory that contains the HTML
#' files and figures.
#'
#' \item \bold{git}: The relative path to the \code{.git} directory that
#' contains the history of the Git repository.
#'
#' \item \bold{site_yml}: \code{TRUE} if the configuration file \code{_site.yml}
#' has uncommitted changes, otherwise \code{FALSE}.
#'
#' \item \bold{wflow_yml}: \code{TRUE} if the configuration file
#' \code{_workflowr.yml} has uncommitted changes, otherwise \code{FALSE}. If the
#' file does not exist, the result is \code{NULL}. If the file was recently
#' deleted and not yet committed to Git, then it will be \code{TRUE}.
#'
#' \item \bold{git_status} The Git status as a \code{git_status}
#' object from the package \link{git2r} (see \code{git2r::\link[git2r]{status}}).
#'
#' \item \bold{include_git_status} The argument \code{include_git_status}
#' indicating whether the Git status should be printed along with the status of
#' the Rmd files.
#'
#' \item \bold{status}: A data frame with detailed information on the status of
#' each R Markdown file (see below).
#'
#' }
#'
#' The data frame \code{status} contains the following non-mutually exclusive
#' columns (all logical vectors):
#'
#' \itemize{
#'
#' \item \bold{ignored}: The R Markdown file has been ignored by Git according
#' to the patterns in the file \code{.gitignore}.
#'
#' \item \bold{mod_unstaged}: The R Markdown file has unstaged modifications.
#'
#' \item \bold{conflicted}: The R Markdown file has merge conflicts.
#'
#' \item \bold{mod_staged}: The R Markdown file has staged modifications.
#'
#' \item \bold{tracked}: The R Markdown file is tracked by Git.
#'
#' \item \bold{committed}: The R Markdown file has been previously committed to
#' the Git repository.
#'
#' \item \bold{published}: The corresponding HTML file has been previously
#' committed.
#'
#' \item \bold{mod_committed}: The R Markdown file has modifications that have
#' been committed since the last time the HTML was built and committed.
#'
#' \item \bold{modified}: The R Markdown file has been modified since it was
#' last published (i.e. \code{mod_unstaged} or \code{mod_staged} or
#' \code{mod_committed}).
#'
#' \item \bold{unpublished}: The R Markdown file is tracked by Git but not
#' published (i.e. the HTML has not been committed).
#'
#' \item \bold{scratch}: The R Markdown file is untracked by Git, i.e. it is
#' considered a scratch file until it is committed.
#'
#' }
#'
#' @examples
#' \dontrun{
#'
#' wflow_status()
#' # Get status of specific file(s)
#' wflow_status("analysis/file.Rmd")
#' # Save the results
#' s <- wflow_status()
#' }
#' @export
wflow_status <- function(files = NULL, include_git_status = TRUE, project = ".") {
files <- process_input_files(files, allow_null = TRUE, rmd_only = TRUE,
convert_to_relative_paths = TRUE)
assert_is_flag(include_git_status)
check_wd_exists()
assert_is_single_directory(project)
project <- absolute(project)
if (isTRUE(getOption("workflowr.autosave"))) autosave()
# Obtain list of workflowr paths. Throw error if no Git repository.
o <- wflow_paths(error_git = TRUE, project = project)
# Gather analysis files
# (files that start with an underscore are ignored)
files_analysis <- list.files(path = o$analysis, pattern = "^[^_].+[Rr]md$",
full.names = TRUE)
files_analysis <- relative(files_analysis)
if (!is.null(files)) {
files_analysis <- files_analysis[match(files, files_analysis)]
}
if (length(files_analysis) == 0)
stop("files did not include any analysis files")
# Obtain status of each R Markdown file
r <- git2r::repository(o$git)
s <- git2r::status(r, ignored = TRUE)
s_df <- status_to_df(s)
# Fix file paths
s_df$file <- file.path(git2r::workdir(r), s_df$file)
s_df$file <- relative(s_df$file)
# Categorize all files by git status
f_ignored <- s_df$file[s_df$status == "ignored"]
f_unstaged <- s_df$file[s_df$status == "unstaged"]
f_conflicted <- s_df$file[s_df$substatus == "conflicted"]
f_staged <- s_df$file[s_df$status == "staged"]
f_untracked <- s_df$file[s_df$status == "untracked"]
# Determine status of each analysis file (i.e. Rmd) in the Git repository.
# Each status is a logical vector.
ignored <- files_analysis %in% f_ignored
mod_unstaged <- files_analysis %in% f_unstaged
conflicted <- files_analysis %in% f_conflicted
mod_staged <- files_analysis %in% f_staged
tracked <- files_analysis %in% setdiff(files_analysis,
c(f_untracked, f_ignored))
files_committed <- get_committed_files(r)
files_committed <- relative(files_committed)
committed <- files_analysis %in% files_committed
files_html <- to_html(files_analysis, outdir = o$docs)
published <- files_html %in% files_committed
# If a user somehow committed the HTML file but not the source Rmd file, which
# is impossible to do with wflow_publish(), the workflowr report will show a
# warning. However, it will also cause an error when trying to access the date
# of the last commit to the Rmd file
html_only <- !committed & published
if (any(html_only)) {
published[html_only] <- FALSE
html_only_files <- files_analysis[html_only]
warning(call. = FALSE, immediate. = TRUE, wrap(
"The following R Markdown file(s) have not been committed to the
Git repository but their corresponding HTML file(s) have. This
violates the reproducibility guarantee of workflowr. Please
publish these files using wflow_publish() to fix this situation."),
"\n\n", paste(html_only_files, collapse = "\n"))
}
# Do published files have subsequently committed changes?
files_outdated <- get_outdated_files(r,
absolute(files_analysis[published]),
outdir = absolute(o$docs))
files_outdated <- relative(files_outdated)
mod_committed <- files_analysis %in% files_outdated
# Highlevel designations
modified <- published & (mod_unstaged | mod_staged | mod_committed)
# Status Unp
#
# Unpublished file. Any tracked file whose corresponding HTML is not tracked.
# May or may not have staged or unstaged changes.
unpublished <- tracked & !published
# Status Scr
#
# Scratch file. Any untracked file that is not specifically ignored.
scratch <- !tracked & !ignored
# Determine if _site.yml has been edited
o$site_yml <- FALSE
site_yml_path <- relative(file.path(o$analysis, "_site.yml"))
if (site_yml_path %in% s_df$file) o$site_yml <- TRUE
# Determine if _workflowr.yml has been edited
o$wflow_yml <- FALSE
wflow_yml_path <- relative(file.path(o$root, "_workflowr.yml"))
if (!file.exists(wflow_yml_path)) o$wflow_yml <- NULL
if (wflow_yml_path %in% s_df$file) o$wflow_yml <- TRUE
o$status <- data.frame(ignored, mod_unstaged, conflicted, mod_staged, tracked,
committed, published, mod_committed, modified,
unpublished, scratch,
row.names = files_analysis)
# Passing the Git status to print.wflow_status()
o$include_git_status <- include_git_status
o$git_status <- s
class(o) <- "wflow_status"
return(o)
}
#' @export
print.wflow_status <- function(x, ...) {
# The legend key to explain abbreviations of file status
key <- character()
# Report totals
cat(sprintf("Status of %d Rmd files\n\nTotals:\n", nrow(x$status)))
if (sum(x$status$published) > 0 & sum(x$status$modified) > 0) {
cat(sprintf(" %d Published (%d Modified)\n",
sum(x$status$published), sum(x$status$modified)))
key <- c(key, "Mod = Modified")
} else if (sum(x$status$published) > 0) {
cat(sprintf(" %d Published\n", sum(x$status$published)))
}
if (sum(x$status$unpublished) > 0) {
cat(sprintf(" %d Unpublished\n", sum(x$status$unpublished)))
key <- c(key, "Unp = Unpublished")
}
if (sum(x$status$scratch) > 0) {
cat(sprintf(" %d Scratch\n", sum(x$status$scratch)))
key <- c(key, "Scr = Scratch (Untracked)")
}
f <- c(rownames(x$status)[x$status$modified],
rownames(x$status)[x$status$unpublished],
rownames(x$status)[x$status$scratch])
names(f) <- rep(c("Mod", "Unp", "Scr"),
times = c(sum(x$status$modified),
sum(x$status$unpublished),
sum(x$status$scratch)))
if (length(f) > 0) {
cat("\nThe following Rmd files require attention:\n\n")
}
for (i in seq_along(f)) {
o <- sprintf("%s %s\n", names(f)[i], f[i])
cat(o)
}
if (length(f) > 0) {
cat(sprintf("\nKey: %s\n", paste(key, collapse = ", ")))
}
if (x$include_git_status) {
s <- scrub_status(x$git_status, git2r::repository(x$git), output_dir = x$docs,
remove_ignored = TRUE)
s_df <- status_to_df(s)
if (nrow(s_df) > 0) {
s_df$file <- file.path(x$git, s_df$file)
s_df$file <- relative(s_df$file)
cat("\nThe current Git status is:\n\n")
prev <- options(width = 200)
cat(paste(utils::capture.output(print(s_df, row.names = FALSE)), collapse = "\n"))
options(prev)
cat("\n")
} else {
cat("\nThe current Git status is: working directory clean\n")
}
}
if (length(f) == 0) {
cat("\nRmd files are up-to-date\n")
} else {
cat("\n")
cat(wrap("To publish your changes as part of your website, use `wflow_publish()`."))
cat("\n")
cat(wrap("To commit your changes without publishing them yet, use `wflow_git_commit()`."))
cat("\n")
}
if (x$site_yml) {
site_yml_path <- relative(file.path(x$analysis, "_site.yml"))
cat(glue::glue("\n\nThe config file {site_yml_path} has been edited.\n\n"))
}
if (!is.null(x$wflow_yml) && x$wflow_yml) {
wflow_yml_path <- relative(file.path(x$root, "_workflowr.yml"))
cat(glue::glue("\n\nThe config file {wflow_yml_path} has been edited.\n\n"))
}
# It's a convention for S3 print methods to invisibly return the original
# object, e.g. base::print.summaryDefault and stats:::print.lm. I don't
# understand why this is useful. Anyone know why?
return(invisible(x))
}
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/wflow_status.R
|
#' Create table of contents
#'
#' \code{wflow_toc} creates a table of contents of the published R Markdown
#' files. The output is in markdown format, so you can paste it into a document
#' such as \code{index.Rmd}. If the R package
#' \href{https://cran.r-project.org/package=clipr}{clipr} is installed, the
#' table of contents is copied to the clipboard. Otherwise the output is sent to
#' the R console.
#'
#' The default behavior is to attempt to copy the table of contents to the
#' clipboard for easy pasting into an R Markdown document. If this isn't working
#' for you, you can try the following:
#'
#' \itemize{
#'
#' \item Check that the clipr package is installed:
#' \code{install.packages("clipr")}
#'
#' \item Check that the system keyboard is writable. Run
#' \code{\link[clipr]{clipr_available}} and \code{\link[clipr:clipr_available]{dr_clipr}}.
#'
#' \item If it's still not working, set \code{clipboard = FALSE} to send the
#' table of contents to the R console to manually copy-paste.
#'
#' }
#'
#' @param ignore_nav_bar logical (default: TRUE). Ignore any HTML files included
#' as links in the navigation bar.
#' @param clipboard logical (default: TRUE) Attempt to copy table of contents to
#' clipboard. Only relevant if
#' \href{https://cran.r-project.org/package=clipr}{clipr} package is installed
#' and the system keyboard is available.
#' @param only_published logical (default: TRUE) Include only published contents.
#' @inheritParams wflow_git_commit
#'
#' @return Invisibly returns the table of contents as a character vector.
#'
#' @export
wflow_toc <- function(ignore_nav_bar = TRUE, clipboard = TRUE,
only_published = TRUE, project = ".") {
# Check input arguments ------------------------------------------------------
assert_is_flag(ignore_nav_bar)
assert_is_flag(clipboard)
assert_is_flag(only_published)
check_wd_exists()
assert_is_single_directory(project)
project <- absolute(project)
# Create table of contents ---------------------------------------------------
s <- wflow_status(project = project)
if (only_published) {
rmd <- rownames(s$status)[s$status$published]
} else {
rmd <- rownames(s$status)
}
html <- to_html(basename(rmd))
# Obtains the toc except the documents in the navigation bar.
if (ignore_nav_bar) {
yml <- yaml::read_yaml(file.path(s$analysis, "_site.yml"))
navbar <- unlist(c(yml$navbar$left, yml$navbar$right))
html_in_nav <- html %in% navbar
html <- html[!html_in_nav]
rmd <- rmd[!html_in_nav]
}
if (length(rmd) == 0) {
m <-
"No suitable content to be added to the TOC found. If you wish to
include unpublished contents, consider setting `only_published = FALSE`.
If you wish to include contents already linked in the navigation bar,
consider setting `ignore_nav_bar = FALSE`."
warning(wrap(m))
return(invisible(character()))
}
titles <- vapply(rmd, get_rmd_title, character(1))
titles <- ifelse(is.na(titles), basename(rmd), titles)
toc <- glue::glue("1. [{titles}]({html})")
toc <- as.character(toc)
# Output ---------------------------------------------------------------------
write_to_clip <- clipboard &&
requireNamespace("clipr", quietly = TRUE) &&
interactive() &&
clipr::clipr_available()
if (write_to_clip) {
clipr::write_clip(toc)
message("The table of content of your project is on the clipboard.")
} else {
message(paste0(toc, collapse = "\n"))
}
return(invisible(toc))
}
# Obtains the title in the YAML header of an R Markdown file. If not set,
# returns NA_character_.
get_rmd_title <- function(x) {
stopifnot(fs::file_exists(x))
header <- rmarkdown::yaml_front_matter(x)
if (is.null(header$title)) {
return(NA_character_)
} else {
return(header$title)
}
}
wflow_toc_addin <- function() {
if (is.null(rstudioapi::getSourceEditorContext()))
stop("wflow_toc() addin: No file open. Please open a file to paste the table of contents.",
call. = FALSE)
toc <- suppressMessages(wflow_toc(clipboard = FALSE))
if (length(toc) == 0) {
m <-
"wflow_toc() addin: Couldn't find any published files (that aren't part
of the navigation bar). Use wflow_publish() first or call wflow_toc()
directly using one of the arguments described in the warning message
below."
stop(wrap(m), call. = FALSE)
}
toc_single <- paste(toc, collapse = "\n")
toc_single <- paste0(toc_single, "\n")
rstudioapi::insertText(toc_single)
}
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/wflow_toc.R
|
#' Deploy site with GitHub
#'
#' \code{wflow_use_github} automates all the local configuration necessary to
#' deploy your workflowr project with \href{https://pages.github.com/}{GitHub
#' Pages}. Optionally, it can also create the new repository on GitHub (only
#' applies to public repositories hosted on github.com). Afterwards, you will
#' need to run \code{wflow_git_push} in the R console (or \code{git push} in the
#' terminal) to push the code to GitHub.
#'
#' \code{wflow_use_github} performs the following steps and then commits the
#' changes:
#'
#' \itemize{
#'
#' \item Adds a link to the GitHub repository in the navigation bar
#'
#' \item Configures the Git remote settings to use GitHub (via
#' \code{\link{wflow_git_remote}})
#'
#' \item (Only if necessary) Renames the website directory to \code{docs/}
#'
#' \item (Only if necessary) Edits the setting \code{output_dir} in the file
#' \code{_site.yml} to save the website files in \code{docs/}
#'
#' }
#'
#' Furthermore, you have two options for creating the remote repository on GitHub.
#' In an interactive R session, you will be prompted to choose one of the options
#' below. To bypass the prompt, you can set the argument \code{create_on_github}.
#'
#' \itemize{
#'
#' \item 1. Have workflowr create the new repository on GitHub. If you accept, your
#' browser will open for you to provide authorization. If you are not logged
#' into GitHub, you will be prompted to login. Then you will be asked to give
#' permission to the workflowr-oauth-app to create the new repository for you on
#' your behalf. This will allow \code{wflow_use_github}, running on your own
#' machine, to create your new repository. Once \code{wflow_use_github}
#' finishes, workflowr can no longer access your GitHub account.
#'
#' \item 2. Create the remote repository yourself by going to
#' \url{https://github.com/new} and entering the Repository name that matches
#' the name of the directory of your workflowr project (if you used the argument
#' \code{repository} to make it a different name, make sure to instead use that
#' one).
#'
#' }
#'
#' Once the GitHub repository has been created either by \code{wflow_use_github}
#' or yourself, run \code{wflow_git_push} in the R console (or \code{git push
#' origin master} in the terminal) to push your code to GitHub.
#'
#' @param username character (default: NULL). The GitHub account associated with
#' the GitHub repository. This should be your personal GitHub username. If the
#' repository will be created for a GitHub organization, instead use the
#' argument \code{organization}. It will be
#' combined with the arguments \code{repository} and \code{domain} to
#' determine the URL of the new repository, e.g. the default is
#' https://github.com/username/repository. It will be combined with the
#' arguments \code{repository}, \code{domain}, and \code{protocol} to
#' determine the URL for Git to use to push and pull from GitHub, e.g. the
#' default is https://github.com/username/repository.git. If \code{username}
#' is not specified, \code{wflow_use_github} will first attempt to guess it
#' from the current setting for the remote URL named "origin". If you haven't
#' previously configured a remote for this workflowr project (or you are
#' unsure what that means), then you should specify your GitHub username when
#' calling this function.
#' @param repository character (default: NULL). The name of the remote
#' repository on GitHub. If not specified, workflowr will guess the name of
#' the repository. First, it will check the current setting for the remote URL
#' named "origin". Second, it will use the name of the root directory of the
#' workflowr project.
#' @param organization character (default: NULL). The GitHub organization
#' associated with the GitHub repository. Only set one of \code{organization}
#' or \code{username}. See the argument \code{username} above for more
#' details.
#' @param navbar_link logical (default: TRUE). Insert a link to the GitHub
#' repository into the navigation bar.
#' @param create_on_github logical (default: NULL). Should workflowr create the
#' repository on GitHub? This requires logging into your GitHub account to
#' authenticate workflowr to act on your behalf. The default behavior is to
#' ask the user. Note that this only works for public repositories on
#' github.com. If you want to create a private repository or are using GitHub
#' Enterprise, you will need to manually create the repository.
#' @param protocol character (default: "https"). The protocol for communicating
#' with GitHub. Must be either "https" or "ssh".
#' @param domain character (default: "github.com"). The domain of the remote
#' host. You only need to change this if your organization is using GitHub
#' Enterprise.
#' @param project character (default: ".") By default the function assumes the
#' current working directory is within the project. If this is not true,
#' you'll need to provide the path to the project directory.
#'
#' @return Invisibly returns a list of class \code{wflow_use_github}. This is
#' currently for internal use only. Please open an Issue if you'd like to use
#' this information.
#'
#' @section Troubleshooting:
#'
#' The feature to automatically create the GitHub repository for you may fail
#' since it involves using your web browser to authenticate with your GitHub
#' account. If it fails for any reason, it'd probably be easier to manually
#' login to GitHub and create the repository yourself
#' (\href{https://docs.github.com/articles/creating-a-new-repository}{instructions from GitHub}).
#' However, if you have time, please file an
#' \href{https://github.com/workflowr/workflowr/issues/new/choose}{Issue on
#' GitHub} to report what happened, and importantly include which web browser
#' you were using.
#'
#' We have observed the following problems before:
#'
#' \itemize{
#'
#' \item The green button to approve the authentication of the workflowr GitHub
#' app to create the repository on your behalf is grayed out, and unable to be
#' clicked. This is likely a JavaScript problem. Make sure you don't have
#' JavaScript disabled in your web browser. Also, you can try using a different
#' browser.
#'
#' }
#'
#' @seealso \code{\link{wflow_git_push}}, \code{\link{wflow_git_remote}},
#' \code{\link{wflow_use_gitlab}}
#'
#' @examples
#' \dontrun{
#'
#' wflow_use_github("your-username", "name-of-repository")
#' # Login with GitHub account and create new repository
#' wflow_git_push()
#'
#' # Create a repository for an organization you belong to
#' wflow_use_github(organization = "my-org")
#' }
#'
#' @importFrom httpuv startServer
#' @export
wflow_use_github <- function(username = NULL,
repository = NULL,
organization = NULL,
navbar_link = TRUE,
create_on_github = NULL,
protocol = "https",
domain = "github.com",
project = ".") {
# Check input arguments ------------------------------------------------------
if (!is.null(username))
if (!(is.character(username) && length(username) == 1))
stop("username must be NULL or a one element character vector: ", username)
if (!is.null(repository))
if (!(is.character(repository) && length(repository) == 1))
stop("repository must be NULL or a one element character vector: ", repository)
if (!is.null(organization))
if (!(is.character(organization) && length(organization) == 1))
stop("organization must be NULL or a one element character vector: ", organization)
assert_is_flag(navbar_link)
if (!is.null(create_on_github)) {
assert_is_flag(create_on_github)
}
if (!(is.character(protocol) && length(protocol) == 1))
stop("protocol must be a one element character vector: ", protocol)
if (!(is.character(domain) && length(domain) == 1))
stop("domain must be a one element character vector: ", domain)
check_wd_exists()
assert_is_single_directory(project)
project <- absolute(project)
check_git_config(project, "`wflow_use_github`")
# Status ---------------------------------------------------------------------
s <- wflow_status(project = project)
# Convert to absolute paths to facilitate path manipulation below
s$analysis <- absolute(s$analysis)
s$docs <- absolute(s$docs)
r <- git2r::repository(path = s$git)
remotes <- wflow_git_remote(verbose = FALSE, project = project)
message("Summary from wflow_use_github():")
# Determine username and repository ------------------------------------------
# guess based on current remote "origin"
host <- get_host_from_remote(path = project) # returns NA if unavailable
host_parts <- stringr::str_split(host, "/")[[1]]
account_is_organization <- FALSE
if (is.character(username) && is.character(organization)) {
stop("Cannot set both username and organization.",
" Only one GitHub account can own the repository.")
} else if (is.character(organization)) {
account <- organization
account_is_organization <- TRUE
} else if (is.character(username)) {
account <- username
} else {
if (is.na(host)) {
stop("Unable to guess username. Please specify this argument.")
} else {
account <- host_parts[length(host_parts) - 1]
}
}
message("account: ", account)
if (is.null(repository)) {
if (is.na(host)) {
# Use root directory name
repository <- fs::path_file(absolute(s$root))
} else {
repository <- host_parts[length(host_parts)]
}
}
message("respository: ", repository)
# Rename docs/ to public/ ----------------------------------------------------
if (basename(s$docs) == "docs") {
message("* The website directory is already named docs/")
renamed <- NA
} else {
docs <- file.path(dirname(s$docs), "docs")
renamed <- wflow_rename(s$docs, docs, git = FALSE, project = project)
git2r_add(r, renamed$files_git)
message("* Created the website directory docs/")
}
# Edit output_dir in _site.yml -----------------------------------------------
site_yml_fname <- file.path(s$analysis, "_site.yml")
if (!fs::file_exists(site_yml_fname)) {
stop("The website configuration file _site.yml does not exist.")
}
site_yml <- yaml::yaml.load_file(site_yml_fname)
if (site_yml$output_dir == "../docs") {
message("* Output directory is already set to docs/")
} else {
site_yml$output_dir <- "../docs"
yaml::write_yaml(site_yml, file = site_yml_fname)
git2r_add(r, site_yml_fname)
message("* Set output directory to docs/")
}
# Configure Git remote -------------------------------------------------------
# 3 possible scenarios:
# 1. Remote is already set correctly -> Do nothing
# 2. Remote "origin" is currently defined -> Update URL with set_url
# 3. Remote "origin" does not exist -> Add remote "origin"
url_anticipated <- create_remote_url(user = account, repo = repository,
protocol = protocol, domain = domain)
url_current <- remotes["origin"]
if (!is.na(url_current) && url_current == url_anticipated) {
config_remote <- NA
message("* Remote \"origin\" already set to ", remotes["origin"])
} else if ("origin" %in% names(remotes)) {
config_remote <- wflow_git_remote(remote = "origin", user = account,
repo = repository, protocol = protocol,
action = "set_url", domain = domain,
verbose = FALSE, project = project)
message("* Overwrote previous remote \"origin\" to ", config_remote["origin"])
} else {
config_remote <- wflow_git_remote(remote = "origin", user = account,
repo = repository, protocol = protocol,
action = "add", domain = domain,
verbose = FALSE, project = project)
message("* Set remote \"origin\" to ", config_remote["origin"])
}
# Add link in navigation bar -------------------------------------------------
host <- get_host_from_remote(path = project)
if (navbar_link && !is.na(host)) {
site_yml$navbar$right <- list(list(icon = get_fa_brand_icon("github"),
text = "Source code",
href = host))
yaml::write_yaml(site_yml, file = site_yml_fname)
git2r_add(r, site_yml_fname)
message("* Added GitHub link to navigation bar")
}
# Commit changes -------------------------------------------------------------
# Obtain staged files
files_git <- git2r::status(r, staged = TRUE, unstaged = FALSE, untracked = FALSE)
files_git <- unlist(files_git$staged)
names(files_git) <- NULL
if (length(files_git) > 0) {
commit <- git2r::commit(r, message = "Host with GitHub.")
message("* Committed the changes to Git")
} else {
commit <- NA
}
# Create GitHub repository ---------------------------------------------------
repo_created <- FALSE
# Do not create repo if the domain is not github.com
if (domain != "github.com") {
if (isTRUE(create_on_github))
warning("workflowr can only create a repository on github.com",
call. = FALSE, immediate. = TRUE)
create_on_github <- FALSE
}
if (is.null(create_on_github) && interactive()) {
cat("\nTo proceed, you have two options:\n")
cat("\n", wrap(glue::glue(
"1. Have workflowr attempt to automatically create the repository \"{repository}\" on GitHub.
This requires
logging into GitHub and enabling the workflowr-oauth-app access to the
account \"{account}\"."
)), "\n", sep = "")
cat("\n", wrap(glue::glue(
"2. Create the repository \"{repository}\" yourself by going to https://github.com/new and entering \"{repository}\" for the Repository name. This is the default option."
)), "\n", sep = "")
cat("\n")
ans <- ""
while(!ans %in% c("1", "2")) {
ans <- readline("Enter your choice (1 or 2): ")
if (ans == "1") {
create_on_github <- TRUE
cat("You chose option 1: have workflowr attempt to create repo\n")
} else if (ans == "2") {
cat("You chose option 2: create the repo yourself\n")
} else {
cat("Invalid input.\n")
}
}
}
if (is.null(create_on_github)) create_on_github <- FALSE
if (create_on_github) {
repo_url <- create_gh_repo(account, repository,
account_is_organization = account_is_organization)
if (check_browser()) utils::browseURL(repo_url)
repo_created <- TRUE
message(glue::glue("* Created {account}/{repository}"))
}
# Prepare output -------------------------------------------------------------
o <- list(username = username, organization = organization,
account = account, repository = repository,
renamed = renamed, files_git = files_git, commit = commit,
config_remote = config_remote, repo_created = repo_created)
class(o) <- "wflow_use_github"
if (!repo_created) {
message(glue::glue("To do: Create {account}/{repository} at {domain} (if it doesn't already exist)"))
}
message("To do: Run wflow_git_push() to push your project to GitHub")
return(invisible(o))
}
# Create GitHub repository
create_gh_repo <- function(account, repository, account_is_organization = FALSE) {
# Authenticate with GitHub
app <- httr::oauth_app("github",
key = "341566cfd0c8017ba5ac",
secret = "ac5e6d52e3bf71e4535149622f053b9f00f2e155")
# Set user agent
ua <- httr::user_agent("https://github.com/workflowr/workflowr")
message(glue::glue(
"Requesting authorization for workflowr app to access GitHub account {account}"))
oauth_token <- httr::oauth2.0_token(httr::oauth_endpoints("github"),
app,
scope = c("public_repo"),
cache = FALSE)
token <- httr::config(token = oauth_token)
# Ensure they haven't exceeded their rate limit
req_rate <- httr::RETRY("GET", "https://api.github.com/rate_limit", token, ua, terminate_on = c(403, 404))
httr::stop_for_status(req_rate)
content_rate <- httr::content(req_rate)
if (content_rate$resources$core$remaining < 5) {
warning("You've exceeded your rate limit for the GitHub API.",
" Please try again later.")
return(NULL)
}
# Confirm the repository doesn't exist
req_exist <- httr::RETRY("GET", glue::glue("https://api.github.com/repos/{account}/{repository}"),
token, ua, terminate_on = c(403, 404))
status_exist <- httr::http_status(req_exist)
if (status_exist$reason != "Not Found") {
warning(glue::glue("Repository {repository} already exists for account {account}"),
call. = FALSE, immediate. = TRUE)
return(glue::glue("https://github.com/{account}/{repository}"))
}
# Create the repository
message(glue::glue("Creating repository {repository}"))
if (account_is_organization) {
req_create <- httr::RETRY(
"POST", glue::glue("https://api.github.com/orgs/{account}/repos"), token, ua,
body = list(name = repository), encode = "json", terminate_on = c(403, 404)
)
httr::stop_for_status(req_create)
} else {
req_create <- httr::RETRY("POST", "https://api.github.com/user/repos", token, ua,
body = list(name = repository), encode = "json", terminate_on = c(403, 404))
httr::stop_for_status(req_create)
}
# Confirm the repository exists
req_confirm <- httr::RETRY("GET", glue::glue("https://api.github.com/repos/{account}/{repository}"),
token, ua, terminate_on = c(403, 404))
status_confirm <- httr::http_status(req_confirm)
if (status_confirm$category != "Success") {
warning(glue::glue("Failed to create repository {repository}. Reason: {status_confirm$reason}"))
return(NULL)
}
# Return the full URL to new repository
content_confirm <- httr::content(req_confirm)
return(content_confirm$html_url)
}
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/wflow_use_github.R
|
#' Deploy site with GitLab
#'
#' \code{wflow_use_gitlab} automates all the local configuration necessary to
#' deploy your workflowr project with
#' \href{https://docs.gitlab.com/ee/ci/yaml/README.html#pages}{GitLab Pages}.
#' Afterwards, you will need to run \code{wflow_git_push} in the R console (or
#' \code{git push} in the terminal) to push the code to GitLab. Note that this
#' will also create the repository if it doesn't exist yet (this requires GitLab
#' 10.5 or greater). Alternatively, you could manually login to your account and
#' create the new repository on GitLab prior to pushing.
#'
#' \code{wflow_use_gitlab} performs the following steps and then commits the
#' changes:
#'
#' \itemize{
#'
#' \item Renames the website directory from \code{docs/} to \code{public/}
#'
#' \item Edits the setting \code{output_dir} in the file \code{_site.yml} to
#' save the website files in \code{public/}
#'
#' \item Adds a link to the GitLab repository in the navigation bar
#'
#' \item Creates the required file \code{.gitlab-ci.yml}
#'
#' \item Configures the Git remote settings to use GitLab
#'
#' }
#'
#' By default the GitLab repository is set to private, so you are the only one
#' that can access it. If you need to keep it private, you can
#' \href{https://gitlab.com/help/user/project/pages/pages_access_control.md}{grant
#' access} to collaborators in Settings->Members. Otherwise, you can make it
#' public in Settings->General->Visibility.
#'
#' For more details, read the documentation provided by
#' \href{https://docs.gitlab.com/ee/ci/yaml/README.html#pages}{GitLab Pages}.
#'
#' @param username character (default: NULL). The GitLab account associated with
#' the GitLab repository. This is likely your personal GitLab username, but it
#' could also be the name of a GitLab Group you belong to. It will be
#' combined with the arguments \code{repository} and \code{domain} to
#' determine the URL of the new repository, e.g. the default is
#' https://gitlab.com/username/repository. It will be combined with the
#' arguments \code{repository}, \code{domain}, and \code{protocol} to
#' determine the URL for Git to use to push and pull from GitLab, e.g. the
#' default is https://gitlab.com/username/repository.git. If \code{username}
#' is not specified, \code{wflow_use_gitlab} will first attempt to guess it
#' from the current setting for the remote URL named "origin". If you haven't
#' previously configured a remote for this workflowr project (or you are
#' unsure what that means), then you should specify your GitLab username when
#' calling this function.
#' @param repository character (default: NULL). The name of the remote
#' repository on GitLab. If not specified, workflowr will guess the name of
#' the repository. First, it will check the current setting for the remote URL
#' named "origin". Second, it will use the name of the root directory of the
#' workflowr project.
#' @param navbar_link logical (default: TRUE). Insert a link to the GitLab
#' repository into the navigation bar.
#' @param protocol character (default: "https"). The protocol for communicating
#' with GitLab. Must be either "https" or "ssh".
#' @param domain character (default: "gitlab.com"). The domain of the remote
#' host. You only need to change this if you are using a custom GitLab
#' instance hosted by your organization. For example, "git.rcc.uchicago.edu"
#' is the domain for the GitLab instance hosted by the University of Chicago
#' Research Computing Center.
#' @param project character (default: ".") By default the function assumes the
#' current working directory is within the project. If this is not true,
#' you'll need to provide the path to the project directory.
#'
#' @return Invisibly returns a list of class \code{wflow_use_gitlab}. This is
#' currently for internal use only. Please open an Issue if you'd like to use
#' this information.
#'
#' @seealso \code{\link{wflow_git_push}}, \code{\link{wflow_git_remote}},
#' \code{\link{wflow_use_github}}, vignette("wflow-06-gitlab")
#'
#' @examples
#' \dontrun{
#'
#' wflow_use_gitlab("your-username", "name-of-repository")
#' # Login with GitLab account and create new repository
#' wflow_git_push()
#' }
#'
#'@export
wflow_use_gitlab <- function(username = NULL, repository = NULL,
navbar_link = TRUE,
protocol = "https",
domain = "gitlab.com",
project = ".") {
# Check input arguments ------------------------------------------------------
if (!is.null(username))
if (!(is.character(username) && length(username) == 1))
stop("username must be NULL or a one element character vector: ", username)
if (!is.null(repository))
if (!(is.character(repository) && length(repository) == 1))
stop("repository must be NULL or a one element character vector: ", repository)
assert_is_flag(navbar_link)
if (!(is.character(protocol) && length(protocol) == 1))
stop("protocol must be a one element character vector: ", protocol)
if (!(is.character(domain) && length(domain) == 1))
stop("domain must be a one element character vector: ", domain)
check_wd_exists()
assert_is_single_directory(project)
project <- absolute(project)
check_git_config(project, "`wflow_use_gitlab`")
# Status ---------------------------------------------------------------------
s <- wflow_status(project = project)
# Convert to absolute paths to facilitate path manipulation below
s$analysis <- absolute(s$analysis)
s$docs <- absolute(s$docs)
r <- git2r::repository(path = s$git)
remotes <- wflow_git_remote(verbose = FALSE, project = project)
message("Summary from wflow_use_gitlab():")
# Determine username and repository ------------------------------------------
# guess based on current remote "origin"
host <- get_host_from_remote(path = project) # returns NA if unavailable
host_parts <- stringr::str_split(host, "/")[[1]]
if (is.null(username)) {
if (is.na(host)) {
stop("Unable to guess username. Please specify this argument.")
} else {
username <- host_parts[length(host_parts) - 1]
}
}
message("username: ", username)
if (is.null(repository)) {
if (is.na(host)) {
# Use root directory name
repository <- fs::path_file(absolute(s$root))
} else {
repository <- host_parts[length(host_parts)]
}
}
message("respository: ", repository)
# Rename docs/ to public/ ----------------------------------------------------
if (basename(s$docs) == "public") {
message("* The website directory is already named public/")
renamed <- NA
} else {
public <- file.path(dirname(s$docs), "public")
renamed <- wflow_rename(s$docs, public, git = FALSE, project = project)
git2r_add(r, renamed$files_git)
message("* Created the website directory public/")
}
# Edit output_dir in _site.yml -----------------------------------------------
site_yml_fname <- file.path(s$analysis, "_site.yml")
if (!fs::file_exists(site_yml_fname)) {
stop("The website configuration file _site.yml does not exist.")
}
site_yml <- yaml::yaml.load_file(site_yml_fname)
if (site_yml$output_dir == "../public") {
message("* Output directory is already set to public/")
} else {
site_yml$output_dir <- "../public"
yaml::write_yaml(site_yml, file = site_yml_fname)
git2r_add(r, site_yml_fname)
message("* Set output directory to public/")
}
# .gitlab-ci.yml -------------------------------------------------------------
# The list `gitlab` is defined in R/infrastructure.R
gitlab_yml <- gitlab[[".gitlab-ci.yml"]]
gitlab_yml_fname <- file.path(s$root, ".gitlab-ci.yml")
if (fs::file_exists(gitlab_yml_fname)) {
message("* .gitlab-ci.yml file already exists")
} else {
cat(glue::glue(gitlab_yml), file = gitlab_yml_fname)
git2r_add(r, gitlab_yml_fname)
message("* Created the file .gitlab-ci.yml")
}
# Configure Git remote -------------------------------------------------------
# 3 possible scenarios:
# 1. Remote is already set correctly -> Do nothing
# 2. Remote "origin" is currently defined -> Update URL with set_url
# 3. Remote "origin" does not exist -> Add remote "origin"
url_anticipated <- create_remote_url(user = username, repo = repository,
protocol = protocol, domain = domain)
url_current <- remotes["origin"]
if (!is.na(url_current) && url_current == url_anticipated) {
config_remote <- NA
message("* Remote \"origin\" already set to ", remotes["origin"])
} else if ("origin" %in% names(remotes)) {
config_remote <- wflow_git_remote(remote = "origin", user = username,
repo = repository, protocol = protocol,
action = "set_url", domain = domain,
verbose = FALSE, project = project)
message("* Changed remote \"origin\" to ", config_remote["origin"])
} else {
config_remote <- wflow_git_remote(remote = "origin", user = username,
repo = repository, protocol = protocol,
action = "add", domain = domain,
verbose = FALSE, project = project)
message("* Set remote \"origin\" to ", config_remote["origin"])
}
# Add link in navigation bar -------------------------------------------------
host <- get_host_from_remote(path = project)
if (navbar_link && !is.na(host)) {
site_yml$navbar$right <- list(list(icon = get_fa_brand_icon("gitlab"),
text = "Source code",
href = host))
yaml::write_yaml(site_yml, file = site_yml_fname)
git2r_add(r, site_yml_fname)
message("* Added GitLab link to navigation bar")
}
# Commit changes -------------------------------------------------------------
# Obtain staged files
files_git <- git2r::status(r, staged = TRUE, unstaged = FALSE, untracked = FALSE)
files_git <- unlist(files_git$staged)
names(files_git) <- NULL
if (length(files_git) > 0) {
commit <- git2r::commit(r, message = "Host with GitLab.")
message("* Committed the changes to Git")
} else {
commit <- NA
}
# Prepare output -------------------------------------------------------------
o <- list(username = username, repository = repository,
renamed = renamed, files_git = files_git, commit = commit,
config_remote = config_remote)
class(o) <- "wflow_use_gitlab"
message("To do: Run wflow_git_push() to send your project to GitLab")
message("Note: The push will create the new repository if it doesn't exist yet")
return(invisible(o))
}
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/wflow_use_gitlab.R
|
#' View research website locally
#'
#' \code{wflow_view} displays the website locally in your browser or the RStudio
#' Viewer pane.
#'
#' \code{wflow_view} by default displays the file \code{index.html}. To view the
#' most recently modified HTML file, set \code{latest = TRUE}. To specify which
#' file(s) to view, specify either the name(s) of the R Markdown or HTML
#' file(s).
#'
#' \code{wflow_view} uses \code{\link{browseURL}} to view the HTML files in the
#' browser. If you wish to do something non-traditional like view an HTML file
#' that is not in the docs directory or not part of a workflowr project, you can
#' use that function directly.
#'
#' If \code{wflow_view} is run in the RStudio IDE and only one file has been
#' requested to be viewed, the file is displayed in the
#' \href{https://rstudio.github.io/rstudio-extensions/rstudio_viewer.html}{RStudio
#' Viewer}.
#'
#' If R has no default browser set (determined by \code{getOption("browser")}),
#' then \code{wflow_view} cannot open any HTML files. See
#' \code{\link{browseURL}} for setup instructions.
#'
#' @param files character (default: NULL). Name(s) of the specific file(s) to
#' view. These can be either the name(s) of the R Markdown file(s) in the
#' analysis directory or the HTML file(s) in the docs directory. Supports file
#' \href{https://en.wikipedia.org/wiki/Glob_(programming)}{globbing}.
#' @param latest logical (default: FALSE). Display the HTML file with the most
#' recent modification time (in addition to those specified in \code{files}).
#' If \code{files = NULL} and \code{latest = FALSE}, then \code{index.html} is
#' viewed.
#' @param dry_run logical (default: FALSE). Do not actually view file(s). Mainly
#' useful for testing.
#' @param project character (default: ".") By default the function assumes the
#' current working directory is within the project. If this is not true,
#' you'll need to provide the path to the project directory.
#'
#' @return An object of class \code{wflow_view}, which is a list with the
#' following elements:
#'
#' \item{files}{The input argument \code{files} (converted to relative paths).}
#'
#' \item{latest}{The input argument \code{latest}.}
#'
#' \item{dry_run}{The input argument \code{dry_run}.}
#'
#' \item{browser}{Logical indicating if a default browser has been set. If
#' FALSE, no HTML files can be opened. This is determined by the value
#' returned by \code{getOption("browser")}.}
#'
#' \item{opened}{The HTML files opened by \code{wflow_view}.}
#'
#' @seealso \code{\link{browseURL}}
#'
#' @examples
#' \dontrun{
#'
#' # View index.html
#' wflow_view()
#'
#' # View the most recently modified HTML file
#' wflow_view(latest = TRUE)
#'
#' # View a file by specifying the R Markdown file
#' wflow_view("analysis/fname.Rmd")
#'
#' # View a file by specifying the HTML file
#' wflow_view("docs/fname.html")
#'
#' # View multiple files
#' wflow_view(c("fname1.Rmd", "fname2.Rmd"))
#' wflow_view("docs/*html")
#' }
#' @export
wflow_view <- function(files = NULL, latest = FALSE, dry_run = FALSE,
project = ".") {
# Check input arguments ------------------------------------------------------
files <- process_input_files(files, allow_null = TRUE,
convert_to_relative_paths = TRUE)
if (!is.null(files)) {
# Check for valid file extensions
ext <- tools::file_ext(files)
ext_wrong <- !(ext %in% c("Rmd", "rmd", "html"))
if (any(ext_wrong))
stop(wrap("File extensions must be either Rmd, rmd, or html."))
}
assert_is_flag(latest)
assert_is_flag(dry_run)
check_wd_exists()
assert_is_single_directory(project)
project <- absolute(project)
p <- wflow_paths(project = project)
# Require that any R Markdown files are in the R Markdown directory and the
# HTML files are in the website directory
if (!is.null(files)) {
for (i in seq_along(files)) {
if (ext[i] == "html") {
if (!stringr::str_detect(files[i], p$docs)) {
stop("Cannot view non-workflowr file: ", files[i])
}
} else {
if (!stringr::str_detect(files[i], p$analysis)) {
stop("Cannot view non-workflowr file: ", files[i])
}
}
}
}
# Obtain files ---------------------------------------------------------------
html <- files
# Convert any R Markdown files to HTML and remove duplicates
if (!is.null(html)) {
# `ext` was created during the error handling at the start of the function
html[ext != "html"] <- to_html(html[ext != "html"], outdir = p$docs)
html <- unique(html)
}
# Obtain the most recently modified file
if (latest) {
html_all <- list.files(path = p$docs, pattern = "html$",
full.names = TRUE)
html_mtime <- file.mtime(html_all)
html <- unique(c(html, html_all[which.max(html_mtime)]))
}
# Open the index page if no other files specified
if (length(html) == 0) {
html <- file.path(p$docs, "index.html")
}
# Check for misssing HTML files ----------------------------------------------
html_missing <- !fs::file_exists(html)
if (any(html_missing)) {
warning("The following HTML files are missing:\n",
paste(html[html_missing], collapse = "\n"))
}
html <- html[!html_missing]
if (length(html) == 0) {
stop(wrap("No HTML files were able to viewed.
Try running `wflow_build()` first."))
}
# Check default browser ------------------------------------------------------
# If no option is set for browser, browseURL will throw an error. This is
# disastrous if wflow_view was called from wflow_publish because it resets
# everything it had done if there is an error.
browser <- check_browser()
# View files -----------------------------------------------------------------
if (!dry_run) {
# If run in RStudio and only 1 file to be viewed, use the RStudio Viewer
viewer <- getOption("viewer")
if (!is.null(viewer) && length(html) == 1) {
# RStudio Viewer only displays files saved in the R temporary directory
# (and it isn't fooled by symlinks).
tmp_dir <- absolute(tempdir())
file.copy(p$docs, tmp_dir, recursive = TRUE)
html_tmp <- file.path(tmp_dir, basename(p$docs), basename(html))
viewer(html_tmp)
} else if (browser) { # Use the default browser
for (h in html) {
utils::browseURL(h)
}
}
}
# Prepare output -------------------------------------------------------------
o <- list(files = files, latest = latest, dry_run = dry_run,
browser = browser, opened = html)
class(o) <- "wflow_view"
return(o)
}
#' @export
print.wflow_view <- function(x, ...) {
if (!x$browser) {
cat(wrap(
"wflow_view will not open any files because no option is set for a
default browser. See the Details section of ?browseURL for setup
instructions."), sep = "\n")
return(invisible(x))
}
if (x$dry_run) {
cat("wflow_view would open:\n")
} else {
cat("wflow_view opened:\n")
}
for (f in x$opened) {
cat(sprintf("- %s\n", f))
}
return(invisible(x))
}
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/wflow_view.R
|
# Resources:
# .onAttach & zzz.R explained: https://r-pkgs.org/r.html#when-you-do-need-side-effects
.onAttach <- function(libname, pkgname) {
m <- c(sprintf("This is workflowr version %s",
utils::packageVersion("workflowr")),
"Run ?workflowr for help getting started")
packageStartupMessage(paste(m, collapse = "\n"))
check_dependencies()
}
.onLoad <- function(libname, pkgname) {
sysgit <- Sys.which("git")
wflow_pkg_opts <- list(
workflowr.autosave = TRUE,
workflowr.sysgit = if(fs::file_exists(sysgit)) sysgit else "",
workflowr.view = interactive()
)
op <- options()
toset <- !(names(wflow_pkg_opts) %in% names(op))
if(any(toset)) options(wflow_pkg_opts[toset])
invisible()
}
# Unfortunately I can't assume anything about the dependencies. They may be:
#
# * Not installed: a user can remove them after installing workflowr
# * Installed but unusable, e.g. one of their dependencies was removed
# * Installed but below the minimum required version
dependencies <- c(
callr = "3.7.0",
fs = "1.2.7",
getPass = NA,
git2r = "0.26.0",
glue = NA,
httpuv = "1.2.2",
httr = NA,
knitr = "1.29",
rmarkdown = "1.18",
rprojroot = "1.2",
rstudioapi = "0.6",
stringr = "1.3.0",
tools = NA,
utils = NA,
whisker = "0.3-2",
xfun = NA,
yaml = NA
)
check_dependencies <- function() {
for (i in seq_along(dependencies)) {
pkg_name <- names(dependencies)[i]
pkg_version <- dependencies[i]
if (length(find.package(pkg_name, quiet = TRUE, verbose = FALSE)) == 0) {
stop(sprintf("The required dependency \"%s\" is missing, please install it.",
pkg_name))
}
if (!requireNamespace(pkg_name, quietly = TRUE)) {
stop(sprintf("The required dependency \"%s\" is unable to be loaded, please re-install it.",
pkg_name))
}
if (!is.na(pkg_version)) {
installed_version <- utils::packageVersion(pkg_name)
if (installed_version < as.package_version(pkg_version)) {
stop(sprintf(
"Please update package \"%s\": version %s is installed, but %s is required",
pkg_name, installed_version, pkg_version
))
}
}
}
return(NULL)
}
#' workflowr: A workflow template for creating a research website
#'
#' The workflowr package helps you create a research website using R Markdown
#' and Git.
#'
#' @section Vignettes:
#'
#' Run \code{browseVignettes("workflowr")} to read the package vignettes
#' locally. Alternatively you can read the documentation online at
#' \url{https://workflowr.github.io/workflowr/}.
#'
#' @section Main workflowr functions:
#'
#' \describe{
#'
#' \item{\code{\link{wflow_start}}}{Start a workflowr project.}
#'
#' \item{\code{\link{wflow_build}}}{Build the site to view locally.}
#'
#' \item{\code{\link{wflow_publish}}}{Publish analyses to include in the
#' website.}
#'
#' \item{\code{\link{wflow_status}}}{Report status of analysis files.}
#' }
#'
#' @section Supporting workflowr functions:
#'
#' For further information on workflowr, see the help pages for these
#' functions:
#'
#' \describe{
#'
#' \item{\code{\link{wflow_html}}}{More technical details about how
#' individual R Markdown files are converted to webpages, and how the
#' rendering settings can be customized.}
#'
#' \item{\code{\link{wflow_site}}}{This help page explains how
#' project-wide rendering settings can be customized in the
#' \code{_site.yml} file.}
#' }
#'
#' @section Package options:
#'
#' The following package options affect the default behavior of the workflowr
#' functions. To permanently set any of these options, add a call to the
#' function \code{\link[base]{options}} in the file \code{.Rprofile} at the root
#' of your workflowr project. For example:
#'
#' \preformatted{
#' # Do not use Git executable
#' options(workflowr.sysgit = "")
#' }
#'
#' \describe{
#'
#' \item{workflowr.autosave}{A logical indicating whether workflowr functions
#' should automatically save files open in the RStudio editor before running.
#' The default is \code{TRUE}. This requires RStudio 1.1.287 or later. Only
#' files that have been previously saved are affected. In other words, unnamed
#' files will be ignored.}
#'
#' \item{workflowr.sysgit}{The path to the system Git executable. This is
#' occasionally used to increase the speed of Git operations performed by
#' workflowr. By default it is set to the first Git executable on the search
#' path. You can specify a path to a different Git executable. Alternatively you
#' can disable this behavior entirely by setting it to the empty string \code{""}.}
#'
#' \item{workflowr.view}{A logical indicating whether workflowr functions should
#' open webpages for viewing in the browser. The default is set to
#' \code{\link[base]{interactive}} (i.e. it is \code{TRUE} only if it is an
#' interactive R session). This option is currently used by
#' \code{\link{wflow_build}}, \code{\link{wflow_git_push}}, and
#' \code{\link{wflow_publish}}.}
#' }
#'
#' @keywords internal
#'
"_PACKAGE"
|
/scratch/gouwar.j/cran-all/cranData/workflowr/R/zzz.R
|
## ----decide-to-execute, cache=FALSE, echo=FALSE-------------------------------
library("knitr")
# The code in this vignette requires a functional Git setup. If a workflowr user
# has a .git directory upstream of R's temporary diretory, then wflow_start will
# fail. If this situation is detected, the code is not evaluated.
if (git2r::in_repository(tempdir())) {
opts_chunk$set(eval = FALSE)
warning(workflowr:::wrap(
"Because you have a Git repository upstream of R's temporary directory,
none of the code below was executed. Please refer to the online
documentation to see the output:
https://workflowr.github.io/workflowr/articles/wflow-01-getting-started.html
\n\nYou should consider removing the directory since it was likely created
in error: ",
workflowr:::git2r_slot(git2r::repository(tempdir(), discover = TRUE), "path")))
}
# The code in this vignette requires pandoc. Not every CRAN server has pandoc
# installed.
if (!rmarkdown::pandoc_available()) {
opts_chunk$set(eval = FALSE)
message(workflowr:::wrap(
"The code chunks below were not executed because this machine does not
have pandoc installed."
))
}
## ----chunk-options, cache=FALSE, include=FALSE--------------------------------
.tmp <- tempfile("wflow-01-getting-started-")
.tmp <- workflowr:::absolute(.tmp)
.project <- file.path(.tmp, "myproject")
fs::dir_create(.project)
opts_knit$set(root.dir = .project)
opts_chunk$set(collapse = TRUE)
## ----load-workflowr-----------------------------------------------------------
library("workflowr")
## ----wflow-git-config, eval=FALSE---------------------------------------------
# # Replace the example text with your information
# wflow_git_config(user.name = "Your Name", user.email = "email@domain")
## ----wflow-start, eval=FALSE--------------------------------------------------
# wflow_start("myproject")
## ----wflow-start-hidden, echo=FALSE-------------------------------------------
setwd(.tmp)
unlink(.project, recursive = TRUE)
wflow_start("myproject", user.name = "Your Name", user.email = "email@domain")
## ----wflow-build, eval=FALSE--------------------------------------------------
# wflow_build()
## ----wflow-build-hidden, echo=FALSE-------------------------------------------
# Don't want to actually open the website when building the vignette
wflow_build(view = FALSE)
## ----wflow-build-no-action----------------------------------------------------
wflow_build()
## ----wflow-view, eval=FALSE---------------------------------------------------
# wflow_view()
## ----edit-files, include=FALSE------------------------------------------------
for (f in file.path("analysis", c("index.Rmd", "about.Rmd", "license.Rmd"))) {
cat("\nedit\n", file = f, append = TRUE)
}
## ----wflow-status-------------------------------------------------------------
wflow_status()
## ----wflow-publish, eval=FALSE------------------------------------------------
# wflow_publish(c("analysis/index.Rmd", "analysis/about.Rmd", "analysis/license.Rmd"),
# "Publish the initial files for myproject")
## ----wflow-publish-hidden, echo=FALSE-----------------------------------------
# Don't want to actually open the website when building the vignette
wflow_publish(c("analysis/index.Rmd", "analysis/about.Rmd", "analysis/license.Rmd"),
"Publish the initial files for myproject",
view = FALSE)
## ----wflow-status-post-publish------------------------------------------------
wflow_status()
## ----wflow-use-github, eval=FALSE---------------------------------------------
# wflow_use_github("myname")
## ----wflow-use-github-hidden, echo=FALSE--------------------------------------
# Don't want to try to authenticate on GitHub
wflow_use_github("myname", create_on_github = FALSE)
## ----wflow-git-push-----------------------------------------------------------
wflow_git_push(dry_run = TRUE)
## ----create-file, eval=FALSE--------------------------------------------------
# wflow_open("analysis/first-analysis.Rmd")
## ----create-file-hidden, echo=FALSE-------------------------------------------
# Don't want to actually open the file when building the vignette in RStudio
wflow_open("analysis/first-analysis.Rmd", edit_in_rstudio = FALSE)
## ----edit-index, include=FALSE------------------------------------------------
cat("\nClick on this [link](first-analysis.html) to see my results.\n",
file = "analysis/index.Rmd", append = TRUE)
## ----wflow-status-newfile-----------------------------------------------------
wflow_status()
## ----wflow-publish-newfile, eval=FALSE----------------------------------------
# wflow_publish(c("analysis/index.Rmd", "analysis/first-analysis.Rmd"),
# "Add my first analysis")
## ----wflow-publish-newfile-hidden, echo=FALSE---------------------------------
# Don't want to actually open the website when building the vignette
wflow_publish(c("analysis/index.Rmd", "analysis/first-analysis.Rmd"),
"Add my first analysis", view = FALSE)
## ----republish----------------------------------------------------------------
wflow_publish("analysis/_site.yml", republish = TRUE, dry_run = TRUE)
|
/scratch/gouwar.j/cran-all/cranData/workflowr/inst/doc/wflow-01-getting-started.R
|
---
title: "Getting started with workflowr"
subtitle: "workflowr version `r utils::packageVersion('workflowr')`"
author: "John Blischak"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
toc: true
vignette: >
%\VignetteIndexEntry{Getting started with workflowr}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r decide-to-execute, cache=FALSE, echo=FALSE}
library("knitr")
# The code in this vignette requires a functional Git setup. If a workflowr user
# has a .git directory upstream of R's temporary diretory, then wflow_start will
# fail. If this situation is detected, the code is not evaluated.
if (git2r::in_repository(tempdir())) {
opts_chunk$set(eval = FALSE)
warning(workflowr:::wrap(
"Because you have a Git repository upstream of R's temporary directory,
none of the code below was executed. Please refer to the online
documentation to see the output:
https://workflowr.github.io/workflowr/articles/wflow-01-getting-started.html
\n\nYou should consider removing the directory since it was likely created
in error: ",
workflowr:::git2r_slot(git2r::repository(tempdir(), discover = TRUE), "path")))
}
# The code in this vignette requires pandoc. Not every CRAN server has pandoc
# installed.
if (!rmarkdown::pandoc_available()) {
opts_chunk$set(eval = FALSE)
message(workflowr:::wrap(
"The code chunks below were not executed because this machine does not
have pandoc installed."
))
}
```
```{r chunk-options, cache=FALSE, include=FALSE}
.tmp <- tempfile("wflow-01-getting-started-")
.tmp <- workflowr:::absolute(.tmp)
.project <- file.path(.tmp, "myproject")
fs::dir_create(.project)
opts_knit$set(root.dir = .project)
opts_chunk$set(collapse = TRUE)
```
The workflowr R package helps scientists organize their research in a way that
promotes effective project management, reproducibility, collaboration, and
sharing of results. Workflowr combines literate programming (knitr and
rmarkdown) and version control (Git, via git2r) to generate a website containing
time-stamped, versioned, and documented results. Any R user can quickly and
easily adopt workflowr.
This tutorial assumes you have already followed the [installation
instructions](https://workflowr.github.io/workflowr/index.html#installation).
Specifically, you need to have R, pandoc (or RStudio), and workflowr installed
on your computer. Furthermore, you need an account on [GitHub][gh] or
[GitLab][gl].
[gh]: https://github.com
[gl]: https://about.gitlab.com/
## Overview
A workflowr project has two key components:
1. An R Markdown-based website. This consists of a configuration file
(`_site.yml`), a collection of R Markdown files, and their
corresponding HTML files.
2. A Git repository. Git is a [version control system][vcs] that helps track
code development^[There are many ways to use Git: in the Terminal, in the RStudio
Git pane, or another Git graphical user interface (GUI) (see
[here](https://git-scm.com/download/gui/linux) for GUI options).]. Workflowr is
able to run the basic Git commands, so there is no need to install Git prior to
using workflowr.
One of the main goals of workflowr is to help make your research more
transparent and reproducible. This is achieved by displaying multiple
"reproducibility checks" at the top of each analysis, including the unique
identifier that Git assigns a snapshot of your code (or "commit" as Git calls
it), so you always know which version of the code produced the results.
[vcs]: https://en.wikipedia.org/wiki/Version_control
## Start the project
To start a new project, open R (or RStudio) and load the workflowr package (note
that all the code in this vignette should be run directly in the R console, i.e.
do **not** try to run workflowr functions inside of R Markdown documents).
```{r load-workflowr}
library("workflowr")
```
If you have never created a Git repository on your computer before, you need to
run the following command to tell Git your name and email. Git uses this
information to assign the changes you make to the code to you (analogous to how
Track Changes in a Microsoft Office Word document assigns your changes to you).
You do not need to use the exact same name and email as you used for your
account on GitHub or GitLab. Also, you only need to run this command once per
computer, and all subsequent workflowr projects will use this information (you
can also update it at any time by re-running the command with different input).
```{r wflow-git-config, eval=FALSE}
# Replace the example text with your information
wflow_git_config(user.name = "Your Name", user.email = "email@domain")
```
Now you are ready to start your first workflowr project!
`wflow_start("myproject")` creates a directory called `myproject/` that contains
all the files to get started. It also changes the working directory to
`myproject/`^[If you're using RStudio, you can alternatively create a new
workflowr project using the RStudio project template. Go to `File` -> `New
Project...` and select `workflowr project` from the list of project types. In
the future you can return to your project by choosing `Open Project...` and
selecting the file `myproject.Rproj`. This will set the correct working
directory in the R console, switch the file navigator to the project, and
configure the Git pane.] and initializes a Git repository with the initial
commit already made.
```{r wflow-start, eval=FALSE}
wflow_start("myproject")
```
```{r wflow-start-hidden, echo=FALSE}
setwd(.tmp)
unlink(.project, recursive = TRUE)
wflow_start("myproject", user.name = "Your Name", user.email = "email@domain")
```
`wflow_start()` created the following directory structure in `myproject/`:
```
myproject/
βββ .gitignore
βββ .Rprofile
βββ _workflowr.yml
βββ analysis/
βΒ Β βββ about.Rmd
βΒ Β βββ index.Rmd
βΒ Β βββ license.Rmd
βΒ Β βββ _site.yml
βββ code/
βΒ Β βββ README.md
βββ data/
βΒ Β βββ README.md
βββ docs/
βββ myproject.Rproj
βββ output/
βΒ Β βββ README.md
βββ README.md
```
At this point, you have a minimal but complete workflowr project; that is, you
have all the files needed to use the main workflowr commands and publish a
research website. Later on, as you get more comfortable with the basic setup,
you can modify and add to the initial file structure. The overall rationale for
this setup is to help organize the files that will be commonly included in a
data analysis project. However, not all of these files are required to use
workflowr.
The two **required** subdirectories are `analysis/` and `docs/`. These
directories should never be removed from the workflowr project.
* `analysis/`: This directory contains all the source R Markdown files for
implementing the data analyses for your project. It also contains a special R
Markdown file, `index.Rmd`, that does not contain any R code, but will be used
to generate `index.html`, the homepage for your website. In addition, this
directory contains the important configuration file `_site.yml`, which you can
use to edit the theme, navigation bar, and other website aesthetics (for more
details see the documentation on [R Markdown websites][rmd-website]). Do not
delete `index.Rmd` or `_site.yml`.
[rmd-website]: https://bookdown.org/yihui/rmarkdown/rmarkdown-site.html
* `docs/`: This directory contains all the HTML files for your
website. The HTML files are built from the R Markdown files in
`analysis/`. Furthermore, any figures created by the R Markdown files
are saved here. Each of these figures is saved according to the
following pattern: `docs/figure/<insert Rmd filename>/<insert chunk
name>-#.png`, where `#` corresponds to which of the plots the chunk
generated (since one chunk can produce an arbitrary number of plots)^[Because of
this requirement, you can't customize the knitr option `fig.path` (which
controls where figure files are saved) in any R Markdown file that is part of a
workflowr project. If you do set it, it will be ignored and workflowr will
insert a warning into the HTML file to alert you.].
The workflowr-specific configuration file is `_workflowr.yml`. It will apply the
workflowr reproducibility checks consistently across all your R Markdown files.
The most critical setting is `knit_root_dir`, which determines the directory
where the files in `analysis/` will be executed. The default is to execute the
code in the root of the project where `_workflowr.yml` is located (i.e. `"."`).
To instead execute the code from `analysis/`, change the setting to
`knit_root_dir: "analysis"`. See `?wflow_html` for more details.
Also required is the RStudio project file, in this example `myproject.Rproj`.
Even if you are not using RStudio, do not delete this file because the workflowr
functions rely on it to determine the root directory of the project.
The **optional** directories are `data/`, `code/`, and `output/`.
These directories are suggestions for organizing your data analysis
project, but can be removed if you do not find them useful.
* `data/`: This directory is for raw data files.
* `code/`: This directory is for code that might not be appropriate to include
in R Markdown format (e.g. for pre-processing the data, or for long-running
code).
* `output/`: This directory is for processed data files and other
outputs generated from the code and data. For example, scripts in
`code/` that pre-process raw data files from `data/` should save the
processed data files in `output/`.
The `.Rprofile` file is a regular R script that is run once when the project is
opened. It contains the call `library("workflowr")`, ensuring that workflowr is
loaded automatically each time a workflowr-project is opened.
## Build the website
You will notice that the `docs/` directory is currently empty. That is
because we have not yet generated the website from the `analysis/`
files. This is what we will do next.
To build the website, run the function `wflow_build()` in the R
console:
```{r wflow-build, eval=FALSE}
wflow_build()
```
```{r wflow-build-hidden, echo=FALSE}
# Don't want to actually open the website when building the vignette
wflow_build(view = FALSE)
```
This command builds all the R Markdown files in `analysis/` and saves
the corresponding HTML files in `docs/`. It sets the same seed before
running every file so that any function that generates random data
(e.g. permutations) is reproducible. Furthermore, each file is built
in its own external R session to avoid any potential conflicts between
analyses (e.g. accidentally sharing a variable with the same name across files).
Lastly, it displays the website in the RStudio Viewer or default web browser.
The default action of `wflow_build()` is to behave similar to a
[Makefile](https://swcarpentry.github.io/make-novice/) (`make = TRUE` is the
default when no input files are provided), i.e. it only builds R Markdown files
that have been modified more recently than their corresponding HTML files. Thus
if you run it again, no files are built (and no files are displayed).
```{r wflow-build-no-action}
wflow_build()
```
To view the site without first building any files, run `wflow_view()`, which by
default displays the file `docs/index.html`:
```{r wflow-view, eval=FALSE}
wflow_view()
```
This is how you can view your site right on your local machine. Go ahead and
edit the files `index.Rmd`, `about.Rmd`, and `license.Rmd` to describe your
project. Then run `wflow_build()` to re-build the HTML files and display them in
the RStudio Viewer or your browser.
```{r edit-files, include=FALSE}
for (f in file.path("analysis", c("index.Rmd", "about.Rmd", "license.Rmd"))) {
cat("\nedit\n", file = f, append = TRUE)
}
```
## Publish the website
workflowr makes an important distinction between R Markdown files that are
published versus unpublished. A published file is included in the website
online; whereas, the HTML file of an unpublished R Markdown file is only able to
be viewed on the local computer. Since the project was just started, there are
no published files. To view the status of the workflowr project, run
`wflow_status()`.
```{r wflow-status}
wflow_status()
```
This alerts us that our project has 3 R Markdown files, and they are all
unpublished ("Unp"). Furthermore, it instructs how to publish them: use
`wflow_publish()`. The first argument to `wflow_publish()` is a character vector
of the R Markdown files to publish ^[Instead of listing each file individually,
you can also pass [file globs](https://en.wikipedia.org/wiki/Glob_(programming))
as input to any workflowr function, e.g. `wflow_publish("analysis/*Rmd",
"Publish the initial files for myproject")`.]. The second is a message that will
be recorded by the version control system Git when it commits (i.e. saves a
snapshot of) these files. The more informative the commit message the better (so
that future you knows what you were trying to accomplish).
```{r wflow-publish, eval=FALSE}
wflow_publish(c("analysis/index.Rmd", "analysis/about.Rmd", "analysis/license.Rmd"),
"Publish the initial files for myproject")
```
```{r wflow-publish-hidden, echo=FALSE}
# Don't want to actually open the website when building the vignette
wflow_publish(c("analysis/index.Rmd", "analysis/about.Rmd", "analysis/license.Rmd"),
"Publish the initial files for myproject",
view = FALSE)
```
`wflow_publish()` reports the 3 steps it took:
* **Step 1:** Commits the 3 R Markdown files using the custom commit message
* **Step 2:** Builds the HTML files using `wflow_build()`
* **Step 3:** Commits the 3 HTML files plus the files that specify the style of
the website (e.g. CSS and JavaScript files)
Performing these 3 steps ensures that the HTML files are always in sync with the
latest versions of the R Markdown files. Performing these steps manually would
be tedious and error-prone (e.g. an HTML file may have been built with an
outdated version of an R Markdown file). However, `wflow_publish()` makes it
easy to keep the pages of your site in sync.
Now when you run `wflow_status()`, it reports that all the files are published
and up-to-date.
```{r wflow-status-post-publish}
wflow_status()
```
## Deploy the website
At this point you have built a version-controlled website that exists on your
local computer. The next step is to put your code on GitHub so that it can serve
your website online. If you are using GitLab, switch to the vignette [Hosting
workflowr websites using GitLab](wflow-06-gitlab.html) and then continue with
the next section.
All the required setup can be performed by the workflowr function
`wflow_use_github()`. The only required argument is your GitHub username^[The
default is to name the GitHub repository using the same name as the directory
that contains the workflowr project. This is likely what you used with
`wflow_start()`, which in this case was `"myproject"`. If you'd prefer the
GitHub repository to have a different name, or if you've already created a
GitHub repo with a different name, you can pass the argument `repository =
"other-name"`.]:
```{r wflow-use-github, eval=FALSE}
wflow_use_github("myname")
```
```{r wflow-use-github-hidden, echo=FALSE}
# Don't want to try to authenticate on GitHub
wflow_use_github("myname", create_on_github = FALSE)
```
This has two main effects on your local machine: 1) it configures Git to
communicate with your future GitHub repository, and 2) it inserts a link to your
future GitHub repository into the navigation bar (you'll need to run
`wflow_build()` or `wflow_publish()` to observe this change). Furthermore,
`wflow_use_github()` will prompt you to ask if you'd like to authorize workflowr
to automatically create the repository on GitHub. If you agree, a browser tab
will open, and you will need to authenticate with your username and password,
and then give permission to the "workflowr-oauth-app" to access your
account^[This sounds scarier than it actually is. The "workflowr-oauth-app" is
simply a formality for GitHub to grant authorization. The "app" itself is the R
code running on your local machine. Once `wflow_use_github()` finishes, the
authorization is deleted, and nothing (and no one) can access your account].
If you decline the offer from `wflow_use_github()` to automatically create the
GitHub repository, you need to manually create it. To do this, login to your
account on GitHub and create a new repository following these
[instructions][new-repo]. The screenshot below shows the menu in the topright of
the webpage.
<img src="img/github-new-repo.png" alt="Create a new repository on GitHub."
style="display: block; margin: auto; border: black 1px solid">
<p class="caption" style="text-align: center;">
Create a new repository on GitHub.
</p>
Note that in this tutorial the GitHub repository also has the name
"myproject." This isn't strictly necessary (you can name your GitHub repository
whatever you like), but it's generally good organizational practice to use the
same name for both your GitHub repository and the local directory on your
computer.
Next, you need to send your files to GitHub. Push your files to GitHub with the
function `wflow_git_push()`:^[Unfortunately this can fail for many different
reasons. If you already regularly use `git push` in the Terminal, you will
probably want to continue using this. If you don't have Git installed on your
computer and thus must use `wflow_git_push()`, you can search the [git2r
Issues](https://github.com/ropensci/git2r/issues) for troubleshooting ideas.]
```{r wflow-git-push}
wflow_git_push(dry_run = TRUE)
```
Using `dry_run = TRUE` previews what the function will do. Remove this argument
to actually push to GitHub. You will be prompted to enter your GitHub username
and password for authentication^[If you'd prefer to use SSH keys for
authentication, please see the section [Setup SSH
keys](wflow-02-customization.html#setup-ssh-keys).]. Each time you make changes
to your project, e.g. run `wflow_publish()`, you will need to run
`wflow_git_push()` to send the changes to GitHub.
Lastly, now that your code is on GitHub, you need to tell GitHub that you want
the files in `docs/` to be published as a website. Go to Settings -> GitHub
Pages and choose "master branch docs/ folder" as the Source
([instructions][publish-docs]). Using the hypothetical names above, the
repository would be hosted at the URL `myname.github.io/myproject/`^[It may take
a few minutes for the site to be rendered.]. If you scroll back down to the
GitHub Pages section of the Settings page, you can click on the URL there.
[new-repo]: https://docs.github.com/articles/creating-a-new-repository
[publish-docs]: https://docs.github.com/articles/configuring-a-publishing-source-for-github-pages
## Add a new analysis file
Now that you have a functioning website, the next step is to start analyzing
data! Create a new R Markdown file, save it as `analysis/first-analysis.Rmd`,
and open it in your preferred text editor (e.g. RStudio). Alternatively, you can
use the convenience function `wflow_open()`, which will create the file (and
open it if you are using RStudio):
```{r create-file, eval=FALSE}
wflow_open("analysis/first-analysis.Rmd")
```
```{r create-file-hidden, echo=FALSE}
# Don't want to actually open the file when building the vignette in RStudio
wflow_open("analysis/first-analysis.Rmd", edit_in_rstudio = FALSE)
```
Now you are ready to start writing! Go ahead and add some example code. If you
are using RStudio, press the Knit button to build the file and see a preview in
the Viewer pane. Alternatively from the R console, you can run `wflow_build()`
again (this function can be run from the base directory of your project or any
subdirectory).
Check out your new file `first-analysis.html`. Near the top you will see the
workflowr reproducibility report. If you click on the button, the full menu will
drop down. Click around to learn more about the reproducibility safety checks,
why their important, and whether or not the file passed or failed each one.
You'll notice that the first check failed because the R Markdown file had
uncommitted changes. This is OK now since the file is a draft. Once you are
ready to publish it to share with others, you can use `wflow_publish()` to
ensure that any changes to the R Markdown file are committed to the Git
repository prior to generating the results.
In order to make it easier to navigate to your new file, you can include a link
to it on the main index page. First open `analysis/index.Rmd` (optionally using
`wflow_open()`). Second paste the following line into `index.Rmd`:
```
Click on this [link](first-analysis.html) to see my results.
```
```{r edit-index, include=FALSE}
cat("\nClick on this [link](first-analysis.html) to see my results.\n",
file = "analysis/index.Rmd", append = TRUE)
```
This uses the Markdown syntax for creating a hyperlink (for a quick reference
guide in RStudio click "Help" -> "Markdown Quick Reference"). You specify the
HTML version of the file since this is what comprises the website. Click Knit
(or run `wflow_build()` again) to check that the link works.
Now run `wflow_status()` again. As expected, two files need attention.
`index.Rmd` has status "Mod" for modified. This means it is a published file
that has subsequently been modified. `first-analysis.Rmd` has status "Scr" for
Scratch. This means not only is the HTML not published, but the R Markdown file
is not yet being tracked by Git.
```{r wflow-status-newfile}
wflow_status()
```
To publish the new analysis and the updated index page, again use
`wflow_publish()`:
```{r wflow-publish-newfile, eval=FALSE}
wflow_publish(c("analysis/index.Rmd", "analysis/first-analysis.Rmd"),
"Add my first analysis")
```
```{r wflow-publish-newfile-hidden, echo=FALSE}
# Don't want to actually open the website when building the vignette
wflow_publish(c("analysis/index.Rmd", "analysis/first-analysis.Rmd"),
"Add my first analysis", view = FALSE)
```
Lastly, push the changes to GitHub or GitLab with
`wflow_git_push()`^[Alternatively you can run `git push` in the Terminal or use
the RStudio Git Pane.] to deploy these latest changes to the website.
## The workflow
This is the general workflow:^[Note that some workflowr functions are also
available as [RStudio Addins][rstudio-addins]. You may prefer these compared to
running the commands in the R console, especially since you can [bind the addins
to keyboard shortcuts][rstudio-addins-shortcuts].]
[rstudio-addins]: https://rstudio.github.io/rstudioaddins/
[rstudio-addins-shortcuts]: https://rstudio.github.io/rstudioaddins/#keyboard-shorcuts
1. Open a new or existing R Markdown file in `analysis/` (optionally using
`wflow_open()`)
1. Perform your analysis in the R Markdown file (For RStudio users: to quickly
develop the code I recommend executing the code in the R console via Ctrl-Enter
to send one line or Ctrl-Alt-C to execute the entire code chunk)
1. Run `wflow_build()` to view the results as they will
appear on the website (alternatively press the Knit button in RStudio)
1. Go back to step 2 until you are satisfied with the result
1. Run `wflow_publish()` to commit the source files (R Markdown files or other
files in `code/`, `data/`, and `output/`), build the HTML files, and commit the
HTML files
1. Push the changes to GitHub or GitLab with `wflow_git_push()` (or `git push`
in the Terminal)
This ensures that the code version recorded at the top of an HTML file
corresponds to the state of the Git repository at the time it was built.
The only exception to this workflow is if you are updating the aesthetics of
your website (e.g. anytime you make edits to `analysis/_site.yml`). In this case
you'll want to update all the published HTML files, regardless of whether or not
their corresponding R Markdown files have been updated. To republish every HTML
page, run `wflow_publish()` with `republish = TRUE`. This behavior is only
previewed below by specifying `dry_run = TRUE`.
```{r republish}
wflow_publish("analysis/_site.yml", republish = TRUE, dry_run = TRUE)
```
## Next steps
To learn more about workflowr, you can read the following vignettes:
* [Customize your research website](wflow-02-customization.html)
* [Migrating an existing project to use workflowr](wflow-03-migrating.html)
* [How the workflowr package works](wflow-04-how-it-works.html)
* [Frequently asked questions](wflow-05-faq.html)
* [Hosting workflowr websites using GitLab](wflow-06-gitlab.html)
* [Sharing common code across analyses](wflow-07-common-code.html)
* [Alternative strategies for deploying workflowr websites](wflow-08-deploy.html)
* [Reproducible research with workflowr (workshop)](wflow-09-workshop.html)
* [Using large data files with workflowr](wflow-10-data.html)
## Further reading
* For advice on using R Markdown files to organize your analysis, read the
chapter [R Markdown workflow](https://r4ds.had.co.nz/r-markdown-workflow.html) in
the book [R for Data Science](https://r4ds.had.co.nz/) by Garrett Grolemund and
Hadley Wickham
|
/scratch/gouwar.j/cran-all/cranData/workflowr/inst/doc/wflow-01-getting-started.Rmd
|
## ----chunk-options, include=FALSE---------------------------------------------
library("knitr")
opts_chunk$set(eval = FALSE)
## ----wflow-publish-theme------------------------------------------------------
# wflow_publish("analysis/_site.yml", "Change the theme", republish = TRUE)
## ----custom-css-publish, eval=FALSE-------------------------------------------
# wflow_publish(c("analysis/_site.yml", "analysis/style.css"),
# message = "Customize website style.",
# republish = TRUE)
## ----wflow-publish-navbar-----------------------------------------------------
# wflow_publish("analysis/_site.yml", "Add main result page to navbar",
# republish = TRUE)
## ----https-to-ssh-------------------------------------------------------------
# wflow_git_remote(remote = "origin", user = "myname", repo = "myproject",
# protocol = "ssh", action = "set_url")
|
/scratch/gouwar.j/cran-all/cranData/workflowr/inst/doc/wflow-02-customization.R
|
---
title: "Customize your research website"
subtitle: "workflowr version `r utils::packageVersion('workflowr')`"
author: "John Blischak"
date: "`r Sys.Date()`"
output:
rmarkdown::html_vignette:
toc: true
vignette: >
%\VignetteIndexEntry{Customize your research website}
%\VignetteEngine{knitr::rmarkdown}
%\VignetteEncoding{UTF-8}
---
```{r chunk-options, include=FALSE}
library("knitr")
opts_chunk$set(eval = FALSE)
```
There are many ways to customize your research website. Below are some common
options.
## Adding project details
workflowr automatically creates many files when the project is first started. As
a first step for customizing your site, add the following information:
* Briefly describe your project in `analysis/index.Rmd`
* Share details about yourself in `analysis/about.Rmd`
* State a software license in `analysis/license.Rmd`. See [A Quick Guide to
Software Licensing for the Scientist-Programmer][morin2012] by Morin et al.,
2012 for advice. If you're ambivalent, the MIT license is a standard choice.
[morin2012]: https://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1002598
## Changing the theme
The theme is defined in the file `analysis/_site.yml`. The default is cosmo, but
the rmarkdown package accepts multiple Bootstrap themes. These are listed in the
[rmarkdown documentation][rmd-themes]. Go to
[bootswatch.com](https://bootswatch.com/) to compare the bootstrap themes. When
typing the theme, make sure it is all lowercase (e.g. spacelab, united, etc.).
When experimenting with different themes, you'll want to build a fast-running
file, e.g. likely `analysis/index.Rmd`, instead of rebuilding the entire site
every time. Click the RStudio Knit button or run `wflow_build()` in the R
console to preview each theme:
```
wflow_build("analysis/index.Rmd")
```
Once you have chosen a theme, update the website by running the following:
```{r wflow-publish-theme}
wflow_publish("analysis/_site.yml", "Change the theme", republish = TRUE)
```
This commits `analysis/_site.yml`, re-builds every previously published HTML
file using the new theme, and commits all the republished HTML pages.
[rmd-themes]: https://bookdown.org/yihui/rmarkdown/html-document.html
## Style with custom CSS
For ultimate control of the style of your website, you can write [custom CSS
rules to apply to the R Markdown files][custom-css]. For a workflowr project,
follow these steps to get started:
1. Create the file `analysis/style.css`
1. Register the CSS file in `analysis/_site.yml`:
```
output:
workflowr::wflow_html:
toc: true
toc_float: true
theme: cosmo
highlight: textmate
css: style.css
```
1. Run `wflow_build()` to preview the changes
1. Once you are satisfied with the appearance of the site, publish the results
```{r custom-css-publish, eval=FALSE}
wflow_publish(c("analysis/_site.yml", "analysis/style.css"),
message = "Customize website style.",
republish = TRUE)
```
[custom-css]: https://bookdown.org/yihui/rmarkdown/html-document.html#custom-css
To specifically change the style of the workflowr components of the website, you
can write your CSS rules to target the custom workflowr classes. The example CSS
rules below demonstrate how to affect every workflowr button using the class
`btn-workflowr` and also how to affect specific workflowr buttons using the more
specialized classes.
```
/* Center workflowr buttons */
.btn-workflowr {
display: block;
margin: auto;
}
/* Add red border around workflowr report button */
.btn-workflowr-report {
border: red 5px solid;
}
/* Add blue border around workflowr past figure version buttons */
.btn-workflowr-fig {
border: blue 5px solid;
}
/* Add purple border around workflowr session information button */
.btn-workflowr-sessioninfo {
border: purple 5px solid;
}
```
## Customize the navigation bar
The navigation bar appears on the top of each page. By default it includes links
to `index.html` (Home), `about.html` (About), and `license.html` (License). This
is all specified in `analysis/_site.yml`. If you run either `wflow_use_github()`
or `wflow_use_gitlab()`, a link to your source code on GitHub or GitLab will be
added to the navigation bar.
If you have other important pages, you can add them as well. For example, to add
the text "The main result" which links to `main-result.html`, you would add the
following:
```
- text: "The main result"
href: main-result.html
```
You can also create a drop-down menu from the navigation bar. See the [rmarkdown
documentation][navbar] for instructions.
Similar to changing the theme above, you will need to re-render each page of the
website (the navbar is embedded within each individual HTML file). Thus you
could run the same command as above:
```{r wflow-publish-navbar}
wflow_publish("analysis/_site.yml", "Add main result page to navbar",
republish = TRUE)
```
[navbar]: https://bookdown.org/yihui/rmarkdown/rmarkdown-site.html
## Setup SSH keys
Using the https protocol to communicate with GitHub is tedious because it
requires entering your GitHub username and password. Using SSH keys for
authentication removes the password requirement. Follow these [GitHub
instructions][ssh] for creating SSH keys and linking them to your GitHub
account. You'll need to create separate SSH keys and link them each to GitHub
for each machine where you clone your Git repository.
After you create your SSH keys and add them to your GitHub account, you'll need
to instruct your local Git repository to use the SSH protocol. For a
hypothetical GitHub username of "myname" and GitHub repository of "myproject",
you would change the remote "origin" (the default name by convention) using the
function `wflow_git_remote()`:
```{r https-to-ssh}
wflow_git_remote(remote = "origin", user = "myname", repo = "myproject",
protocol = "ssh", action = "set_url")
```
Alternatively you could update the remote URL using Git directly in the shell.
See this GitHub documentation on [changing a remote URL][set-url] for
instructions.
[ssh]: https://docs.github.com/articles/generating-an-ssh-key
[set-url]: https://docs.github.com/articles/changing-a-remote-s-url
## Change the session information function
The default function used to report the session information is `sessionInfo()`.
To change this, you can edit this setting in `_workflowr.yml`. For example, to
instead use `sessioninfo::session_info()`, add the following line to
`_workflowr.yml`:
```
sessioninfo: "sessioninfo::session_info()"
```
If you'd prefer to manually insert a more complex report of the session
information, disable the automatic reporting by adding the following to
`_workflowr.yml`:
```
sessioninfo: ""
```
Note however that workflowr will still check for the presence of a session
information function. Specifically it expects to find either `sessionInfo` or
`session_info` somewhere in the R Markdown document.
|
/scratch/gouwar.j/cran-all/cranData/workflowr/inst/doc/wflow-02-customization.Rmd
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.