Spaces:
Sleeping
Sleeping
Update OpenAlexNodes.R
Browse files- OpenAlexNodes.R +72 -84
OpenAlexNodes.R
CHANGED
@@ -5,94 +5,82 @@ authorPubNodes <- function(keywords,pub_start_date,pub_end_date){
|
|
5 |
pub_end_date <- pub_end_date
|
6 |
|
7 |
|
8 |
-
|
|
|
|
|
|
|
9 |
|
10 |
-
|
11 |
-
title = "Error!",
|
12 |
-
text = "You can only enter text values",
|
13 |
-
closeOnEsc = T,
|
14 |
-
closeOnClickOutside = T,
|
15 |
-
confirmButtonText = "OK",
|
16 |
-
timer = 4000
|
17 |
-
)
|
18 |
-
} else{
|
19 |
-
|
20 |
-
# create search engine function
|
21 |
-
search_engine <- function(keywords,pub_start_date,pub_end_date){
|
22 |
-
suppressPackageStartupMessages(library(openalexR))
|
23 |
-
suppressPackageStartupMessages(library(tidyverse))
|
24 |
-
|
25 |
-
options(openalexR.mailto = "[email protected]")
|
26 |
-
|
27 |
-
# search engine
|
28 |
-
works_search <- oa_fetch(
|
29 |
-
entity = "works",
|
30 |
-
title.search = keywords,
|
31 |
-
cited_by_count = ">50",
|
32 |
-
from_publication_date = pub_start_date,
|
33 |
-
to_publication_date = pub_end_date,
|
34 |
-
options = list(sort = "cited_by_count:desc"),
|
35 |
-
verbose = FALSE
|
36 |
-
)
|
37 |
-
|
38 |
-
return(works_search)
|
39 |
-
|
40 |
-
}
|
41 |
-
|
42 |
-
search_data <- search_engine(keywords,pub_start_date,pub_end_date)
|
43 |
-
|
44 |
-
|
45 |
-
# grab authors and group them according to collaboration
|
46 |
-
authors_collaboration_groups <- list()
|
47 |
-
for (i in 1:nrow(search_data)){
|
48 |
-
authors_collaboration_groups[[i]] <- search_data$author[[i]][2]
|
49 |
-
}
|
50 |
-
|
51 |
-
# grab all authors
|
52 |
-
all_authors <- c()
|
53 |
-
for (i in 1:length(authors_collaboration_groups)) {
|
54 |
-
all_authors <- c(all_authors,authors_collaboration_groups[[i]][[1]])
|
55 |
-
}
|
56 |
-
|
57 |
-
# get length of each authors collaboration
|
58 |
-
authors_length <- c()
|
59 |
-
for(authors in 1:length(authors_collaboration_groups)){
|
60 |
-
authors_length <- c(authors_length,authors_collaboration_groups[[authors]] |> nrow())
|
61 |
-
}
|
62 |
-
|
63 |
-
# grab all publications
|
64 |
-
publications <- list()
|
65 |
-
for (i in 1:nrow(search_data)){
|
66 |
-
publications[[i]] <- rep(search_data$display_name[i], each = authors_length[i])
|
67 |
-
}
|
68 |
|
69 |
-
#
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
Publications = all_publications
|
79 |
-
)
|
80 |
-
|
81 |
-
# stack the df so that authors and publications
|
82 |
-
# are together as one column
|
83 |
-
stacked_df <- stack(authors_to_publications)
|
84 |
-
stacked_df <- unique.data.frame(stacked_df) # remove duplicate rows
|
85 |
-
stacked_df <- stacked_df[-2] # delete second column in df
|
86 |
-
|
87 |
-
# create author_publications_nodes df
|
88 |
-
author_publication_nodes <- data.frame(
|
89 |
-
Id = 1:nrow(stacked_df),
|
90 |
-
Nodes = stacked_df$values,
|
91 |
-
Label = stacked_df$values
|
92 |
)
|
93 |
|
94 |
-
|
95 |
-
return(author_publication_nodes)
|
96 |
|
97 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
98 |
}
|
|
|
|
5 |
pub_end_date <- pub_end_date
|
6 |
|
7 |
|
8 |
+
# create search engine function
|
9 |
+
search_engine <- function(keywords,pub_start_date,pub_end_date){
|
10 |
+
suppressPackageStartupMessages(library(openalexR))
|
11 |
+
suppressPackageStartupMessages(library(tidyverse))
|
12 |
|
13 |
+
options(openalexR.mailto = "[email protected]")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
+
# search engine
|
16 |
+
works_search <- oa_fetch(
|
17 |
+
entity = "works",
|
18 |
+
title.search = keywords,
|
19 |
+
cited_by_count = ">50",
|
20 |
+
from_publication_date = pub_start_date,
|
21 |
+
to_publication_date = pub_end_date,
|
22 |
+
options = list(sort = "cited_by_count:desc"),
|
23 |
+
verbose = FALSE
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
)
|
25 |
|
26 |
+
return(works_search)
|
|
|
27 |
|
28 |
}
|
29 |
+
|
30 |
+
search_data <- search_engine(keywords,pub_start_date,pub_end_date)
|
31 |
+
|
32 |
+
|
33 |
+
# grab authors and group them according to collaboration
|
34 |
+
authors_collaboration_groups <- list()
|
35 |
+
for (i in 1:nrow(search_data)){
|
36 |
+
authors_collaboration_groups[[i]] <- search_data$author[[i]][2]
|
37 |
+
}
|
38 |
+
|
39 |
+
# grab all authors
|
40 |
+
all_authors <- c()
|
41 |
+
for (i in 1:length(authors_collaboration_groups)) {
|
42 |
+
all_authors <- c(all_authors,authors_collaboration_groups[[i]][[1]])
|
43 |
+
}
|
44 |
+
|
45 |
+
# get length of each authors collaboration
|
46 |
+
authors_length <- c()
|
47 |
+
for(authors in 1:length(authors_collaboration_groups)){
|
48 |
+
authors_length <- c(authors_length,authors_collaboration_groups[[authors]] |> nrow())
|
49 |
+
}
|
50 |
+
|
51 |
+
# grab all publications
|
52 |
+
publications <- list()
|
53 |
+
for (i in 1:nrow(search_data)){
|
54 |
+
publications[[i]] <- rep(search_data$display_name[i], each = authors_length[i])
|
55 |
+
}
|
56 |
+
|
57 |
+
# place all publications in a vector
|
58 |
+
all_publications <- c()
|
59 |
+
for(i in 1:length(publications)){
|
60 |
+
all_publications <- c(all_publications,publications[[i]])
|
61 |
+
}
|
62 |
+
|
63 |
+
# create author_to_publication data frame
|
64 |
+
authors_to_publications <- data.frame(
|
65 |
+
Authors = all_authors,
|
66 |
+
Publications = all_publications
|
67 |
+
)
|
68 |
+
|
69 |
+
# stack the df so that authors and publications
|
70 |
+
# are together as one column
|
71 |
+
stacked_df <- stack(authors_to_publications)
|
72 |
+
stacked_df <- unique.data.frame(stacked_df) # remove duplicate rows
|
73 |
+
stacked_df <- stacked_df[-2] # delete second column in df
|
74 |
+
|
75 |
+
# create author_publications_nodes df
|
76 |
+
author_publication_nodes <- data.frame(
|
77 |
+
Id = 1:nrow(stacked_df),
|
78 |
+
Nodes = stacked_df$values,
|
79 |
+
Label = stacked_df$values
|
80 |
+
)
|
81 |
+
|
82 |
+
|
83 |
+
return(author_publication_nodes)
|
84 |
+
|
85 |
}
|
86 |
+
|