Introduction

In this report, we extract information about published JOSS papers and generate
graphics as well as a summary table that can be downloaded and used for further analyses.

Load required R packages

suppressPackageStartupMessages({
    library(tibble)
    library(rcrossref)
    library(dplyr)
    library(tidyr)
    library(ggplot2)
    library(lubridate)
    library(gh)
    library(purrr)
    library(jsonlite)
    library(DT)
    library(plotly)
    library(citecorp)
    library(readr)
    library(rworldmap)
    library(gt)
    library(stringr)
    library(openalexR)
})
## Keep track of the source of each column
source_track <- c()

## Determine whether to add a caption with today's date to the (non-interactive) plots
add_date_caption <- TRUE
if (add_date_caption) {
    dcap <- lubridate::today()
} else {
    dcap <- ""
}
## Get list of countries and populations (2022) from the rworldmap/gt packages
data("countrySynonyms")
country_names <- countrySynonyms |>
    select(-ID) |>
    pivot_longer(names_to = "tmp", values_to = "name", -ISO3) |>
    filter(name != "") |>
    select(-tmp)

## Country population data from the World Bank (https://data.worldbank.org/indicator/SP.POP.TOTL),
## distributed via the gt R package
country_populations <- countrypops |> 
    filter(year == 2022)
## Read archived version of summary data frame, to use for filling in 
## information about software repositories (due to limit on API requests)
## Sort by the date when software repo info was last obtained
papers_archive <- readRDS(gzcon(url("https://github.com/openjournals/joss-analytics/blob/gh-pages/joss_submission_analytics.rds?raw=true"))) %>%
    dplyr::arrange(!is.na(repo_info_obtained), repo_info_obtained)

## Similarly for citation analysis, to avoid having to pull down the 
## same information multiple times
citations_archive <- readr::read_delim(
    url("https://github.com/openjournals/joss-analytics/blob/gh-pages/joss_submission_citations.tsv?raw=true"),
    col_types = cols(.default = "c"), col_names = TRUE,
    delim = "\t")

Collect information about papers

Pull down paper info from Crossref and citation information from OpenAlex

We get the information about published JOSS papers from Crossref, using the rcrossref R package. The openalexR R package is used to extract citation counts from OpenAlex.

## First check how many records there are in Crossref
issn <- "2475-9066"
joss_details <- rcrossref::cr_journals(issn, works = FALSE) %>%
    pluck("data")
joss_details$total_dois
## [1] 2570
## Pull down all records from Crossref
papers <- rcrossref::cr_journals(issn, works = TRUE, cursor = "*",
    cursor_max = joss_details$total_dois * 2) %>%
    pluck("data")

## Only keep articles
papers <- papers %>%
    dplyr::filter(type == "journal-article") 
dim(papers)
## [1] 2575   28
dim(papers %>% distinct())
## [1] 2575   28
## A few papers don't have alternative.ids - generate them from the DOI
noaltid <- which(is.na(papers$alternative.id))
papers$alternative.id[noaltid] <- papers$doi[noaltid]

## Get citation info from Crossref and merge with paper details
# cit <- rcrossref::cr_citation_count(doi = papers$alternative.id)
# papers <- papers %>% dplyr::left_join(
#     cit %>% dplyr::rename(citation_count = count), 
#     by = c("alternative.id" = "doi")
# )

## Remove one duplicated paper
papers <- papers %>% dplyr::filter(alternative.id != "10.21105/joss.00688")
dim(papers)
## [1] 2574   28
dim(papers %>% distinct())
## [1] 2574   28
papers$alternative.id[duplicated(papers$alternative.id)]
## character(0)
source_track <- c(source_track, 
                  structure(rep("crossref", ncol(papers)), 
                            names = colnames(papers)))
## Get info from openalexR and merge with paper details
## Helper function to extract countries from affiliations. Note that this 
## information is not available for all papers.
.get_countries <- function(df, wh = "first") {
    if (length(df) == 1 && is.na(df)) {
        ""
    } else {
        if (wh == "first") {
            ## Only first affiliation for each author
            tmp <- df |> 
                dplyr::filter(!duplicated(au_id) & !is.na(institution_country_code)) |>
                pull(institution_country_code)
        } else {
            ## All affiliations
            tmp <- df |> 
                dplyr::filter(!is.na(institution_country_code)) |>
                pull(institution_country_code)
        }
        if (length(tmp) > 0) {
            tmp |>
                unique() |>
                paste(collapse = ";")
        } else {
            ""
        }
    }
}

oa <- oa_fetch(entity = "works", 
               primary_location.source.id = "s4210214273") |>
    mutate(affil_countries_all = vapply(author, .get_countries, "", wh = "all"),
           affil_countries_first = vapply(author, .get_countries, "", wh = "first"))
## Warning in oa_request(oa_query(filter = filter_i, multiple_id = multiple_id, : 
## The following work(s) have truncated lists of authors: W3005984879.
## Query each work separately by its identifier to get full list of authors.
## For example:
##   lapply(c("W3005984879"), \(x) oa_fetch(identifier = x))
## Details at https://docs.openalex.org/api-entities/authors/limitations.
dim(oa)
## [1] 2568   40
length(unique(oa$doi))
## [1] 2568
papers <- papers %>% dplyr::left_join(
    oa %>% dplyr::mutate(alternative.id = sub("https://doi.org/", "", doi)) %>%
        dplyr::select(alternative.id, cited_by_count, id,
                      affil_countries_all, affil_countries_first) %>%
        dplyr::rename(citation_count = cited_by_count, 
                      openalex_id = id),
    by = "alternative.id"
)
dim(papers)
## [1] 2574   32
dim(papers %>% distinct())
## [1] 2574   32
source_track <- c(source_track, 
                  structure(rep("OpenAlex", length(setdiff(colnames(papers),
                                                           names(source_track)))), 
                            names = setdiff(colnames(papers), names(source_track))))

Pull down info from Whedon API

For each published paper, we use the Whedon API to get information about pre-review and review issue numbers, corresponding software repository etc.

whedon <- list()
p <- 1
a0 <- NULL
a <- jsonlite::fromJSON(
    url(paste0("https://joss.theoj.org/papers/published.json?page=", p)),
    simplifyDataFrame = FALSE
)
while (length(a) > 0 && !identical(a, a0)) {
    whedon <- c(whedon, a)
    p <- p + 1
    a0 <- a
    a <- tryCatch({
        jsonlite::fromJSON(
            url(paste0("https://joss.theoj.org/papers/published.json?page=", p)),
            simplifyDataFrame = FALSE
        )}, 
        error = function(e) return(numeric(0))
    )
}

whedon <- do.call(dplyr::bind_rows, lapply(whedon, function(w) {
    data.frame(api_title = w$title, 
               api_state = w$state,
               editor = paste(w$editor, collapse = ","),
               reviewers = paste(w$reviewers, collapse = ","),
               nbr_reviewers = length(w$reviewers),
               repo_url = w$software_repository,
               review_issue_id = sub("https://github.com/openjournals/joss-reviews/issues/", 
                                     "", w$paper_review),
               doi = w$doi,
               prereview_issue_id = ifelse(!is.null(w$meta_review_issue_id),
                                           w$meta_review_issue_id, NA_integer_),
               languages = gsub(", ", ",", w$languages),
               archive_doi = w$software_archive)
}))
dim(whedon)
## [1] 2574   11
dim(whedon %>% distinct())
## [1] 2574   11
whedon$repo_url[duplicated(whedon$repo_url)]
##  [1] "https://github.com/idaholab/moose"        
##  [2] "https://gitlab.com/libreumg/dataquier.git"
##  [3] "https://github.com/idaholab/moose"        
##  [4] "https://github.com/dynamicslab/pysindy"   
##  [5] "https://github.com/landlab/landlab"       
##  [6] "https://github.com/landlab/landlab"       
##  [7] "https://github.com/symmy596/SurfinPy"     
##  [8] "https://github.com/landlab/landlab"       
##  [9] "https://github.com/pvlib/pvlib-python"    
## [10] "https://github.com/mlpack/mlpack"         
## [11] "https://github.com/julia-wrobel/registr"  
## [12] "https://github.com/barbagroup/pygbe"
papers <- papers %>% dplyr::left_join(whedon, by = c("alternative.id" = "doi"))
dim(papers)
## [1] 2574   42
dim(papers %>% distinct())
## [1] 2574   42
papers$repo_url[duplicated(papers$repo_url)]
##  [1] "https://github.com/landlab/landlab"       
##  [2] "https://github.com/landlab/landlab"       
##  [3] "https://github.com/idaholab/moose"        
##  [4] "https://github.com/idaholab/moose"        
##  [5] "https://github.com/dynamicslab/pysindy"   
##  [6] "https://github.com/barbagroup/pygbe"      
##  [7] "https://github.com/julia-wrobel/registr"  
##  [8] "https://github.com/symmy596/SurfinPy"     
##  [9] "https://github.com/pvlib/pvlib-python"    
## [10] "https://github.com/landlab/landlab"       
## [11] "https://github.com/mlpack/mlpack"         
## [12] "https://gitlab.com/libreumg/dataquier.git"
source_track <- c(source_track, 
                  structure(rep("whedon", length(setdiff(colnames(papers),
                                                         names(source_track)))), 
                            names = setdiff(colnames(papers), names(source_track))))

Combine with info from GitHub issues

From each pre-review and review issue, we extract information about review times and assigned labels.

## Pull down info on all issues in the joss-reviews repository
issues <- gh("/repos/openjournals/joss-reviews/issues", 
             .limit = 15000, state = "all")
## From each issue, extract required information
iss <- do.call(dplyr::bind_rows, lapply(issues, function(i) {
    data.frame(title = i$title, 
               number = i$number,
               state = i$state,
               opened = i$created_at,
               closed = ifelse(!is.null(i$closed_at),
                               i$closed_at, NA_character_),
               ncomments = i$comments,
               labels = paste(setdiff(
                   vapply(i$labels, getElement, 
                          name = "name", character(1L)),
                   c("review", "pre-review", "query-scope", "paused")),
                   collapse = ","))
}))

## Split into REVIEW, PRE-REVIEW, and other issues (the latter category 
## is discarded)
issother <- iss %>% dplyr::filter(!grepl("\\[PRE REVIEW\\]", title) & 
                                      !grepl("\\[REVIEW\\]", title))
dim(issother)
## [1] 155   7
head(issother)
##                                                                                    title
## 1                                                      [JOSS Review] Small typo in paper
## 2                                                                                  hakan
## 3                                                                Add a synthetic dataset
## 4                                         # Post-Review Checklist for Editor and Authors
## 5                                     Nanonis version incompatibility - Deprecated Slots
## 6 Questions about "statement of need" and the relative contribution of the three authors
##   number  state               opened               closed ncomments labels
## 1   6982 closed 2024-07-13T23:30:13Z 2024-07-13T23:30:15Z         1       
## 2   6975 closed 2024-07-12T16:46:52Z 2024-07-12T16:46:54Z         1       
## 3   6952 closed 2024-07-02T20:56:20Z 2024-07-02T20:56:22Z         1       
## 4   6924 closed 2024-06-24T10:12:54Z 2024-06-24T10:12:57Z         1       
## 5   6709 closed 2024-05-01T06:48:44Z 2024-05-01T06:48:46Z         1       
## 6   6360 closed 2024-02-16T09:50:43Z 2024-02-16T09:50:45Z         1
## For REVIEW issues, generate the DOI of the paper from the issue number
getnbrzeros <- function(s) {
    paste(rep(0, 5 - nchar(s)), collapse = "")
}
issrev <- iss %>% dplyr::filter(grepl("\\[REVIEW\\]", title)) %>%
    dplyr::mutate(nbrzeros = purrr::map_chr(number, getnbrzeros)) %>%
    dplyr::mutate(alternative.id = paste0("10.21105/joss.", 
                                          nbrzeros,
                                          number)) %>%
    dplyr::select(-nbrzeros) %>% 
    dplyr::mutate(title = gsub("\\[REVIEW\\]: ", "", title)) %>%
    dplyr::rename_at(vars(-alternative.id), ~ paste0("review_", .))
## For pre-review and review issues, respectively, get the number of 
## issues closed each month, and the number of those that have the 
## 'rejected' label
review_rejected <- iss %>% 
    dplyr::filter(grepl("\\[REVIEW\\]", title)) %>% 
    dplyr::filter(!is.na(closed)) %>%
    dplyr::mutate(closedmonth = lubridate::floor_date(as.Date(closed), "month")) %>%
    dplyr::group_by(closedmonth) %>%
    dplyr::summarize(nbr_issues_closed = length(labels),
                     nbr_rejections = sum(grepl("rejected", labels))) %>%
    dplyr::mutate(itype = "review")

prereview_rejected <- iss %>% 
    dplyr::filter(grepl("\\[PRE REVIEW\\]", title)) %>% 
    dplyr::filter(!is.na(closed)) %>%
    dplyr::mutate(closedmonth = lubridate::floor_date(as.Date(closed), "month")) %>%
    dplyr::group_by(closedmonth) %>%
    dplyr::summarize(nbr_issues_closed = length(labels),
                     nbr_rejections = sum(grepl("rejected", labels))) %>%
    dplyr::mutate(itype = "pre-review")

all_rejected <- dplyr::bind_rows(review_rejected, prereview_rejected)
## For PRE-REVIEW issues, add information about the corresponding REVIEW 
## issue number
isspre <- iss %>% dplyr::filter(grepl("\\[PRE REVIEW\\]", title)) %>%
    dplyr::filter(!grepl("withdrawn", labels)) %>%
    dplyr::filter(!grepl("rejected", labels))
## Some titles have multiple pre-review issues. In these cases, keep the latest
isspre <- isspre %>% dplyr::arrange(desc(number)) %>% 
    dplyr::filter(!duplicated(title)) %>% 
    dplyr::mutate(title = gsub("\\[PRE REVIEW\\]: ", "", title)) %>%
    dplyr::rename_all(~ paste0("prerev_", .))

papers <- papers %>% dplyr::left_join(issrev, by = "alternative.id") %>% 
    dplyr::left_join(isspre, by = c("prereview_issue_id" = "prerev_number")) %>%
    dplyr::mutate(prerev_opened = as.Date(prerev_opened),
                  prerev_closed = as.Date(prerev_closed),
                  review_opened = as.Date(review_opened),
                  review_closed = as.Date(review_closed)) %>% 
    dplyr::mutate(days_in_pre = prerev_closed - prerev_opened,
                  days_in_rev = review_closed - review_opened,
                  to_review = !is.na(review_opened))
dim(papers)
## [1] 2574   58
dim(papers %>% distinct())
## [1] 2574   58
source_track <- c(source_track, 
                  structure(rep("joss-github", length(setdiff(colnames(papers),
                                                              names(source_track)))), 
                            names = setdiff(colnames(papers), names(source_track))))

Add information from software repositories

## Reorder so that software repositories that were interrogated longest 
## ago are checked first
tmporder <- order(match(papers$alternative.id, papers_archive$alternative.id),
                  na.last = FALSE)
software_urls <- papers$repo_url[tmporder]
software_urls[duplicated(software_urls)]
##  [1] "https://gitlab.com/libreumg/dataquier.git"
##  [2] "https://github.com/landlab/landlab"       
##  [3] "https://github.com/landlab/landlab"       
##  [4] "https://github.com/idaholab/moose"        
##  [5] "https://github.com/idaholab/moose"        
##  [6] "https://github.com/dynamicslab/pysindy"   
##  [7] "https://github.com/barbagroup/pygbe"      
##  [8] "https://github.com/julia-wrobel/registr"  
##  [9] "https://github.com/symmy596/SurfinPy"     
## [10] "https://github.com/pvlib/pvlib-python"    
## [11] "https://github.com/landlab/landlab"       
## [12] "https://github.com/mlpack/mlpack"
is_github <- grepl("github", software_urls)
length(is_github)
## [1] 2574
sum(is_github)
## [1] 2428
software_urls[!is_github]
##   [1] "https://gitlab.com/open-darts/open-darts"                                        
##   [2] "https://gitlab.com/utopia-project/utopia"                                        
##   [3] "https://gitlab.inria.fr/bramas/tbfmm"                                            
##   [4] "https://bitbucket.org/orionmhdteam/orion2_release1/src/master/"                  
##   [5] "https://gitlab.com/fduchate/predihood"                                           
##   [6] "https://git.ligo.org/asimov/asimov"                                              
##   [7] "https://jugit.fz-juelich.de/compflu/swalbe.jl/"                                  
##   [8] "https://gitlab.com/moerman1/fhi-cc4s"                                            
##   [9] "https://gitlab.com/ENKI-portal/ThermoCodegen"                                    
##  [10] "https://gitlab.com/wpettersson/kep_solver"                                       
##  [11] "https://gitlab.com/jtagusari/hrisk-noisemodelling"                               
##  [12] "https://gitlab.com/mmartin-lagarde/exonoodle-exoplanets/-/tree/master/"          
##  [13] "https://bitbucket.org/meg/cbcbeat"                                               
##  [14] "https://gitlab.pasteur.fr/vlegrand/ROCK"                                         
##  [15] "https://gitlab.mpikg.mpg.de/curcuraci/bmiptools"                                 
##  [16] "https://gitlab.com/pyFBS/pyFBS"                                                  
##  [17] "https://gitlab.dune-project.org/dorie/dorie"                                     
##  [18] "https://gitlab.kuleuven.be/ITSCreaLab/public-toolboxes/dyntapy"                  
##  [19] "https://gitlab.com/dmt-development/dmt-core"                                     
##  [20] "https://gitlab.com/dlr-ve/esy/remix/framework"                                   
##  [21] "https://gitlab.com/myqueue/myqueue"                                              
##  [22] "https://savannah.nongnu.org/projects/complot/"                                   
##  [23] "https://gitlab.inria.fr/miet/miet"                                               
##  [24] "https://gitlab.com/jason-rumengan/pyarma"                                        
##  [25] "https://bitbucket.org/cardosan/brightway2-temporalis"                            
##  [26] "http://mutabit.com/repos.fossil/grafoscopio/"                                    
##  [27] "https://gitlab.com/bonsamurais/bonsai/util/ipcc"                                 
##  [28] "https://gitlab.com/cerfacs/batman"                                               
##  [29] "https://gitlab.com/libreumg/dataquier.git"                                       
##  [30] "https://gitlab.com/ffaucher/hawen"                                               
##  [31] "https://bitbucket.org/manuela_s/hcp/"                                            
##  [32] "https://bitbucket.org/hammurabicode/hamx"                                        
##  [33] "https://gitlab.com/cosmograil/starred"                                           
##  [34] "https://gite.lirmm.fr/doccy/RedOak"                                              
##  [35] "https://gitlab.com/petsc/petsc"                                                  
##  [36] "https://gitlab.inria.fr/bcoye/game-engine-scheduling-simulation"                 
##  [37] "https://gitlab.com/fibreglass/pivc"                                              
##  [38] "https://gitlab.com/culturalcartography/text2map"                                 
##  [39] "https://codebase.helmholtz.cloud/mussel/netlogo-northsea-species.git"            
##  [40] "https://gitlab.com/gdetor/genetic_alg"                                           
##  [41] "https://bitbucket.org/berkeleylab/hardware-control/src/main/"                    
##  [42] "https://gitlab.com/utopia-project/dantro"                                        
##  [43] "https://gitlab.com/akantu/akantu"                                                
##  [44] "https://gitlab.com/manchester_qbi/manchester_qbi_public/madym_cxx/"              
##  [45] "https://gitlab.com/ProjectRHEA/flowsolverrhea"                                   
##  [46] "https://gitlab.com/emd-dev/emd"                                                  
##  [47] "https://gricad-gitlab.univ-grenoble-alpes.fr/ttk/spam/"                          
##  [48] "https://gitlab.gwdg.de/mpievolbio-it/crbhits"                                    
##  [49] "https://gitlab.ethz.ch/holukas/dyco-dynamic-lag-compensation"                    
##  [50] "https://bitbucket.org/rram/dvrlib/src/joss/"                                     
##  [51] "https://bitbucket.org/clhaley/Multitaper.jl"                                     
##  [52] "https://gitlab.com/sails-dev/sails"                                              
##  [53] "https://gitlab.com/cosapp/cosapp"                                                
##  [54] "https://gitlab.com/dlr-dw/ontocode"                                              
##  [55] "https://gitlab.com/project-dare/dare-platform"                                   
##  [56] "https://earth.bsc.es/gitlab/wuruchi/autosubmitreact"                             
##  [57] "https://gitlab.com/tum-ciip/elsa"                                                
##  [58] "https://plmlab.math.cnrs.fr/lmrs/statistique/smmR"                               
##  [59] "https://gitlab.com/sissopp_developers/sissopp"                                   
##  [60] "https://framagit.org/GustaveCoste/off-product-environmental-impact/"             
##  [61] "https://bitbucket.org/bmskinner/nuclear_morphology"                              
##  [62] "https://bitbucket.org/sbarbot/motorcycle/src/master/"                            
##  [63] "https://gitlab.com/binary_c/binary_c-python/"                                    
##  [64] "https://gitlab.com/InspectorCell/inspectorcell"                                  
##  [65] "https://gitlab.inria.fr/melissa/melissa"                                         
##  [66] "https://gitlab.com/mantik-ai/mantik"                                             
##  [67] "https://gitlab.uliege.be/smart_grids/public/gboml"                               
##  [68] "https://gitlab.com/jesseds/apav"                                                 
##  [69] "https://gitlab.com/marinvaders/marinvaders"                                      
##  [70] "https://gitlab.com/vibes-developers/vibes"                                       
##  [71] "https://gitlab.com/picos-api/picos"                                              
##  [72] "https://gitlab.com/remram44/taguette"                                            
##  [73] "https://gitlab.com/dlr-ve/esy/amiris/amiris"                                     
##  [74] "https://git.rwth-aachen.de/ants/sensorlab/imea"                                  
##  [75] "https://gitlab.com/pvst/asi"                                                     
##  [76] "https://bitbucket.org/mpi4py/mpi4py-fft"                                         
##  [77] "https://gitlab.kitware.com/LBM/lattice-boltzmann-solver"                         
##  [78] "https://gitlab.com/eidheim/Simple-Web-Server"                                    
##  [79] "https://bitbucket.org/glotzer/rowan"                                             
##  [80] "https://gitlab.com/cracklet/cracklet.git"                                        
##  [81] "https://gitlab.com/toposens/public/ros-packages"                                 
##  [82] "https://bitbucket.org/cdegroot/wediff"                                           
##  [83] "https://bitbucket.org/basicsums/basicsums"                                       
##  [84] "https://gitlab.inria.fr/azais/treex"                                             
##  [85] "https://gitlab.com/bioeconomy/forobs/biotrade/"                                  
##  [86] "https://gitlab.com/soleil-data-treatment/soleil-software-projects/remote-desktop"
##  [87] "https://git.geomar.de/digital-earth/dasf/dasf-messaging-python"                  
##  [88] "https://gitlab.com/sigcorr/sigcorr"                                              
##  [89] "https://gitlab.com/dsbowen/conditional-inference"                                
##  [90] "https://gitlab.com/thartwig/asloth"                                              
##  [91] "https://code.usgs.gov/umesc/quant-ecology/fishstan/"                             
##  [92] "https://gitlab.com/QComms/cqptoolkit"                                            
##  [93] "https://bitbucket.org/sciencecapsule/sciencecapsule"                             
##  [94] "https://framagit.org/GustaveCoste/eldam"                                         
##  [95] "https://www.idpoisson.fr/fullswof/"                                              
##  [96] "https://gitlab.com/fame-framework/fame-core"                                     
##  [97] "https://gitlab.com/fame-framework/fame-io"                                       
##  [98] "https://gitlab.ifremer.fr/resourcecode/resourcecode"                             
##  [99] "https://gitlab.com/chaver/choco-mining"                                          
## [100] "https://gitlab.com/drti/basic-tools"                                             
## [101] "https://gitlab.com/ags-data-format-wg/ags-python-library"                        
## [102] "https://gitlab.com/LMSAL_HUB/aia_hub/aiapy"                                      
## [103] "https://bitbucket.org/miketuri/perl-spice-sim-seus/"                             
## [104] "https://bitbucket.org/ocellarisproject/ocellaris"                                
## [105] "https://gitlab.inria.fr/mosaic/bvpy"                                             
## [106] "https://gitlab.com/cosmograil/PyCS3"                                             
## [107] "https://git.iws.uni-stuttgart.de/tools/frackit"                                  
## [108] "https://bitbucket.org/berkeleylab/esdr-pygdh/"                                   
## [109] "https://gitlab.com/habermann_lab/phasik"                                         
## [110] "https://gitlab.com/dlr-ve/autumn/"                                               
## [111] "https://gitlab.com/moorepants/skijumpdesign"                                     
## [112] "https://bitbucket.org/dolfin-adjoint/pyadjoint"                                  
## [113] "https://sourceforge.net/p/mcapl/mcapl_code/ci/master/tree/"                      
## [114] "https://gitlab.com/davidtourigny/dynamic-fba"                                    
## [115] "https://gitlab.com/cmbm-ethz/pourbaix-diagrams"                                  
## [116] "https://bitbucket.org/likask/mofem-cephas"                                       
## [117] "https://gitlab.com/materials-modeling/wulffpack"                                 
## [118] "https://bitbucket.org/cmutel/brightway2"                                         
## [119] "https://gitlab.com/energyincities/besos/"                                        
## [120] "https://gitlab.ruhr-uni-bochum.de/reichp2y/proppy"                               
## [121] "https://c4science.ch/source/tamaas/"                                             
## [122] "https://gitlab.com/tue-umphy/software/parmesan"                                  
## [123] "https://gitlab.com/mauricemolli/petitRADTRANS"                                   
## [124] "https://gitlab.com/tesch1/cppduals"                                              
## [125] "https://gitlab.com/geekysquirrel/bigx"                                           
## [126] "https://gitlab.com/pythia-uq/pythia"                                             
## [127] "https://bitbucket.org/cloopsy/android/"                                          
## [128] "https://bitbucket.org/dghoshal/frieda"                                           
## [129] "https://gitlab.com/ampere2/metalwalls"                                           
## [130] "https://doi.org/10.17605/OSF.IO/3DS6A"                                           
## [131] "https://gitlab.com/gims-developers/gims"                                         
## [132] "https://gitlab.com/celliern/scikit-fdiff/"                                       
## [133] "https://gitlab.com/dglaeser/fieldcompare"                                        
## [134] "https://git.mpib-berlin.mpg.de/castellum/castellum"                              
## [135] "https://gitlab.com/robizzard/libcdict"                                           
## [136] "https://gitlab.com/dlr-ve/esy/sfctools/framework/"                               
## [137] "https://gitlab.awi.de/sicopolis/sicopolis"                                       
## [138] "https://gitlab.com/programgreg/tagginglatencyestimator"                          
## [139] "https://bitbucket.org/mituq/muq2.git"                                            
## [140] "https://gitlab.eudat.eu/coccon-kit/proffastpylot"                                
## [141] "https://gitlab.com/permafrostnet/teaspoon"                                       
## [142] "https://bitbucket.org/robmoss/particle-filter-for-python/"                       
## [143] "https://gitlab.com/costrouc/pysrim"                                              
## [144] "https://gitlab.com/libreumg/dataquier.git"                                       
## [145] "https://gitlab.com/materials-modeling/calorine"                                  
## [146] "https://gitlab.com/datafold-dev/datafold/"
df <- do.call(dplyr::bind_rows, lapply(unique(software_urls[is_github]), function(u) {
    u0 <- gsub("^http://", "https://", gsub("\\.git$", "", gsub("/$", "", u)))
    if (grepl("/tree/", u0)) {
        u0 <- strsplit(u0, "/tree/")[[1]][1]
    }
    if (grepl("/blob/", u0)) {
        u0 <- strsplit(u0, "/blob/")[[1]][1]
    }
    info <- try({
        gh(gsub("(https://)?(www.)?github.com/", "/repos/", u0))
    })
    languages <- try({
        gh(paste0(gsub("(https://)?(www.)?github.com/", "/repos/", u0), "/languages"), 
           .limit = 500)
    })
    topics <- try({
        gh(paste0(gsub("(https://)?(www.)?github.com/", "/repos/", u0), "/topics"), 
           .accept = "application/vnd.github.mercy-preview+json", .limit = 500)
    })
    contribs <- try({
        gh(paste0(gsub("(https://)?(www.)?github.com/", "/repos/", u0), "/contributors"), 
           .limit = 500)
    })
    if (!is(info, "try-error") && length(info) > 1) {
        if (!is(contribs, "try-error")) {
            if (length(contribs) == 0) {
                repo_nbr_contribs <- repo_nbr_contribs_2ormore <- NA_integer_
            } else {
                repo_nbr_contribs <- length(contribs)
                repo_nbr_contribs_2ormore <- sum(vapply(contribs, function(x) x$contributions >= 2, NA_integer_))
                if (is.na(repo_nbr_contribs_2ormore)) {
                    print(contribs)
                }
            }
        } else {
            repo_nbr_contribs <- repo_nbr_contribs_2ormore <- NA_integer_
        }
        
        if (!is(languages, "try-error")) {
            if (length(languages) == 0) {
                repolang <- ""
            } else {
                repolang <- paste(paste(names(unlist(languages)), 
                                        unlist(languages), sep = ":"), collapse = ",")
            }
        } else {
            repolang <- ""
        }
        
        if (!is(topics, "try-error")) {
            if (length(topics$names) == 0) {
                repotopics <- ""
            } else {
                repotopics <- paste(unlist(topics$names), collapse = ",")
            }
        } else {
            repotopics <- ""
        }
        
        data.frame(repo_url = u, 
                   repo_created = info$created_at,
                   repo_updated = info$updated_at,
                   repo_pushed = info$pushed_at,
                   repo_nbr_stars = info$stargazers_count,
                   repo_language = ifelse(!is.null(info$language),
                                          info$language, NA_character_),
                   repo_languages_bytes = repolang,
                   repo_topics = repotopics,
                   repo_license = ifelse(!is.null(info$license),
                                         info$license$key, NA_character_),
                   repo_nbr_contribs = repo_nbr_contribs,
                   repo_nbr_contribs_2ormore = repo_nbr_contribs_2ormore
        )
    } else {
        NULL
    }
})) %>%
    dplyr::mutate(repo_created = as.Date(repo_created),
                  repo_updated = as.Date(repo_updated),
                  repo_pushed = as.Date(repo_pushed)) %>%
    dplyr::distinct() %>%
    dplyr::mutate(repo_info_obtained = lubridate::today())
if (length(unique(df$repo_url)) != length(df$repo_url)) {
    print(length(unique(df$repo_url)))
    print(length(df$repo_url))
    print(df$repo_url[duplicated(df$repo_url)])
}
stopifnot(length(unique(df$repo_url)) == length(df$repo_url))
dim(df)
## [1] 1662   12
## For papers not in df (i.e., for which we didn't get a valid response
## from the GitHub API query), use information from the archived data frame
dfarchive <- papers_archive %>% 
    dplyr::select(colnames(df)[colnames(df) %in% colnames(papers_archive)]) %>%
    dplyr::filter(!(repo_url %in% df$repo_url)) %>%
    dplyr::arrange(desc(repo_info_obtained)) %>%
    dplyr::filter(!duplicated(repo_url))
head(dfarchive)
## # A tibble: 6 × 12
##   repo_url    repo_created repo_updated repo_pushed repo_nbr_stars repo_language
##   <chr>       <date>       <date>       <date>               <int> <chr>        
## 1 https://gi… 2019-10-22   2024-07-06   2023-07-06             123 Python       
## 2 https://gi… 2017-12-24   2024-07-03   2024-06-18              16 Python       
## 3 https://gi… 2020-05-01   2024-07-16   2024-05-26              75 Python       
## 4 https://gi… 2023-04-03   2024-07-16   2024-07-16               9 Python       
## 5 https://gi… 2019-07-24   2024-06-24   2024-07-18              24 R            
## 6 https://gi… 2017-04-25   2024-06-28   2022-09-26              33 C++          
## # ℹ 6 more variables: repo_languages_bytes <chr>, repo_topics <chr>,
## #   repo_license <chr>, repo_nbr_contribs <int>,
## #   repo_nbr_contribs_2ormore <int>, repo_info_obtained <date>
dim(dfarchive)
## [1] 899  12
df <- dplyr::bind_rows(df, dfarchive)
stopifnot(length(unique(df$repo_url)) == length(df$repo_url))
dim(df)
## [1] 2561   12
papers <- papers %>% dplyr::left_join(df, by = "repo_url")
dim(papers)
## [1] 2574   69
source_track <- c(source_track, 
                  structure(rep("sw-github", length(setdiff(colnames(papers),
                                                            names(source_track)))), 
                            names = setdiff(colnames(papers), names(source_track))))

Clean up a bit

## Convert publication date to Date format
## Add information about the half year (H1, H2) of publication
## Count number of authors
papers <- papers %>% dplyr::select(-reference, -license, -link) %>%
    dplyr::mutate(published.date = as.Date(published.print)) %>% 
    dplyr::mutate(
        halfyear = paste0(year(published.date), 
                          ifelse(month(published.date) <= 6, "H1", "H2"))
    ) %>% dplyr::mutate(
        halfyear = factor(halfyear, 
                          levels = paste0(rep(sort(unique(year(published.date))), 
                                              each = 2), c("H1", "H2")))
    ) %>% dplyr::mutate(nbr_authors = vapply(author, function(a) nrow(a), NA_integer_))
dim(papers)
## [1] 2574   69
dupidx <- which(papers$alternative.id %in% papers$alternative.id[duplicated(papers)])
papers[dupidx, ] %>% arrange(alternative.id) %>% head(n = 10)
## # A tibble: 0 × 69
## # ℹ 69 variables: alternative.id <chr>, container.title <chr>, created <chr>,
## #   deposited <chr>, published.print <chr>, doi <chr>, indexed <chr>,
## #   issn <chr>, issue <chr>, issued <chr>, member <chr>, page <chr>,
## #   prefix <chr>, publisher <chr>, score <chr>, source <chr>,
## #   reference.count <chr>, references.count <chr>,
## #   is.referenced.by.count <chr>, title <chr>, type <chr>, url <chr>,
## #   volume <chr>, short.container.title <chr>, author <list>, …
papers <- papers %>% dplyr::distinct()
dim(papers)
## [1] 2574   69
source_track <- c(source_track, 
                  structure(rep("cleanup", length(setdiff(colnames(papers),
                                                          names(source_track)))), 
                            names = setdiff(colnames(papers), names(source_track))))

Tabulate number of missing values

In some cases, fetching information from (e.g.) the GitHub API fails for a subset of the publications. There are also other reasons for missing values (for example, the earliest submissions do not have an associated pre-review issue). The table below lists the number of missing values for each of the variables in the data frame.

DT::datatable(
    data.frame(variable = colnames(papers),
               nbr_missing = colSums(is.na(papers))) %>%
        dplyr::mutate(source = source_track[variable]),
    escape = FALSE, rownames = FALSE, 
    filter = list(position = 'top', clear = FALSE),
    options = list(scrollX = TRUE)
)

Number of published papers per month

monthly_pubs <- papers %>% 
    dplyr::mutate(pubmonth = lubridate::floor_date(published.date, "month")) %>%
    dplyr::group_by(pubmonth) %>%
    dplyr::summarize(npub = n())
ggplot(monthly_pubs, 
       aes(x = factor(pubmonth), y = npub)) + 
    geom_bar(stat = "identity") + theme_minimal() + 
    labs(x = "", y = "Number of published papers per month", caption = dcap) + 
    theme(axis.title = element_text(size = 15),
          axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5))

DT::datatable(
    monthly_pubs %>% 
        dplyr::rename("Number of papers" = "npub",
                      "Month of publications" = "pubmonth"),
    escape = FALSE, rownames = FALSE, 
    filter = list(position = 'top', clear = FALSE),
    options = list(scrollX = TRUE)
)

Number of published papers per year

yearly_pubs <- papers %>% 
    dplyr::mutate(pubyear = lubridate::year(published.date)) %>%
    dplyr::group_by(pubyear) %>%
    dplyr::summarize(npub = n())
ggplot(yearly_pubs, 
       aes(x = factor(pubyear), y = npub)) + 
    geom_bar(stat = "identity") + theme_minimal() + 
    labs(x = "", y = "Number of published papers per year", caption = dcap) + 
    theme(axis.title = element_text(size = 15),
          axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5))

DT::datatable(
    yearly_pubs %>% 
        dplyr::rename("Number of papers" = "npub",
                      "Year of publications" = "pubyear"),
    escape = FALSE, rownames = FALSE, 
    filter = list(position = 'top', clear = FALSE),
    options = list(scrollX = TRUE)
)

Fraction rejected papers

The plots below illustrate the fraction of pre-review and review issues closed during each month that have the ‘rejected’ label attached.

ggplot(all_rejected, 
       aes(x = factor(closedmonth), y = nbr_rejections/nbr_issues_closed)) + 
    geom_bar(stat = "identity") + 
    theme_minimal() + 
    facet_wrap(~ itype, ncol = 1) + 
    labs(x = "Month of issue closing", y = "Fraction of issues rejected",
         caption = dcap) + 
    theme(axis.title = element_text(size = 15),
          axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5))

Citation distribution

Papers with 20 or more citations are grouped in the “>=20” category.

ggplot(papers %>% 
           dplyr::mutate(citation_count = replace(citation_count,
                                                  citation_count >= 20, ">=20")) %>%
           dplyr::mutate(citation_count = factor(citation_count, 
                                                 levels = c(0:20, ">=20"))) %>%
           dplyr::group_by(citation_count) %>%
           dplyr::tally(),
       aes(x = citation_count, y = n)) + 
    geom_bar(stat = "identity") + 
    theme_minimal() + 
    labs(x = "OpenAlex citation count", y = "Number of publications", caption = dcap)

Most cited papers

The table below sorts the JOSS papers in decreasing order by the number of citations in OpenAlex.

DT::datatable(
    papers %>% 
        dplyr::mutate(url = paste0("<a href='", url, "' target='_blank'>", 
                                   url,"</a>")) %>% 
        dplyr::arrange(desc(citation_count)) %>% 
        dplyr::select(title, url, published.date, citation_count),
    escape = FALSE,
    filter = list(position = 'top', clear = FALSE),
    options = list(scrollX = TRUE)
)

Citation count vs time since publication

plotly::ggplotly(
    ggplot(papers, aes(x = published.date, y = citation_count, label = title)) + 
        geom_point(alpha = 0.5) + theme_bw() + scale_y_sqrt() + 
        geom_smooth() + 
        labs(x = "Date of publication", y = "OpenAlex citation count", caption = dcap) + 
        theme(axis.title = element_text(size = 15)),
    tooltip = c("label", "x", "y")
)
## Warning: Removed 11 rows containing non-finite outside the scale range
## (`stat_smooth()`).
## Warning: The following aesthetics were dropped during statistical transformation: label.
## ℹ This can happen when ggplot fails to infer the correct grouping structure in
##   the data.
## ℹ Did you forget to specify a `group` aesthetic or to convert a numerical
##   variable into a factor?

Power law of citation count within each half year

Here, we plot the citation count for all papers published within each half year, sorted in decreasing order.

ggplot(papers %>% dplyr::group_by(halfyear) %>% 
           dplyr::arrange(desc(citation_count)) %>%
           dplyr::mutate(idx = seq_along(citation_count)), 
       aes(x = idx, y = citation_count)) + 
    geom_point(alpha = 0.5) + 
    facet_wrap(~ halfyear, scales = "free") + 
    theme_bw() + 
    labs(x = "Index", y = "OpenAlex citation count", caption = dcap)
## Warning: Removed 11 rows containing missing values or values outside the scale range
## (`geom_point()`).

Pre-review/review time over time

In these plots we investigate whether the time a submission spends in the pre-review or review stage (or their sum) has changed over time. The blue curve corresponds to a rolling median for submissions over 120 days.

## Helper functions (modified from https://stackoverflow.com/questions/65147186/geom-smooth-with-median-instead-of-mean)
rolling_median <- function(formula, data, xwindow = 120, ...) {
    ## Get order of x-values and sort x/y
    ordr <- order(data$x)
    x <- data$x[ordr]
    y <- data$y[ordr]
    
    ## Initialize vector for smoothed y-values
    ys <- rep(NA, length(x))
    ## Calculate median y-value for each unique x-value
    for (xs in setdiff(unique(x), NA)) {
        ## Get x-values in the window, and calculate median of corresponding y
        j <- ((xs - xwindow/2) < x) & (x < (xs + xwindow/2))
        ys[x == xs] <- median(y[j], na.rm = TRUE)
    }
    y <- ys
    structure(list(x = x, y = y, f = approxfun(x, y)), class = "rollmed")
}

predict.rollmed <- function(mod, newdata, ...) {
    setNames(mod$f(newdata$x), newdata$x)
}
ggplot(papers, aes(x = prerev_opened, y = as.numeric(days_in_pre))) + 
    geom_point() + 
    geom_smooth(formula = y ~ x, method = "rolling_median", 
                se = FALSE, method.args = list(xwindow = 120)) + 
    theme_bw() + 
    labs(x = "Date of pre-review opening", y = "Number of days in pre-review", 
         caption = dcap) + 
    theme(axis.title = element_text(size = 15))

ggplot(papers, aes(x = review_opened, y = as.numeric(days_in_rev))) + 
    geom_point() +
    geom_smooth(formula = y ~ x, method = "rolling_median", 
                se = FALSE, method.args = list(xwindow = 120)) +
    theme_bw() + 
    labs(x = "Date of review opening", y = "Number of days in review", 
         caption = dcap) + 
    theme(axis.title = element_text(size = 15))

ggplot(papers, aes(x = prerev_opened, 
                   y = as.numeric(days_in_pre) + as.numeric(days_in_rev))) + 
    geom_point() +
    geom_smooth(formula = y ~ x, method = "rolling_median", 
                se = FALSE, method.args = list(xwindow = 120)) +
    theme_bw() + 
    labs(x = "Date of pre-review opening", y = "Number of days in pre-review + review", 
         caption = dcap) + 
    theme(axis.title = element_text(size = 15))

Languages

Next, we consider the languages used by the submissions, both as reported by Whedon and based on the information encoded in available GitHub repositories (for the latter, we also record the number of bytes of code written in each language). Note that a given submission can use multiple languages.

## Language information from Whedon
sspl <- strsplit(papers$languages, ",")
all_languages <- unique(unlist(sspl))
langs <- do.call(dplyr::bind_rows, lapply(all_languages, function(l) {
    data.frame(language = l,
               nbr_submissions_Whedon = sum(vapply(sspl, function(v) l %in% v, 0)))
}))

## Language information from GitHub software repos
a <- lapply(strsplit(papers$repo_languages_bytes, ","), function(w) strsplit(w, ":"))
a <- a[sapply(a, length) > 0]
langbytes <- as.data.frame(t(as.data.frame(a))) %>% 
    setNames(c("language", "bytes")) %>%
    dplyr::mutate(bytes = as.numeric(bytes)) %>%
    dplyr::filter(!is.na(language)) %>%
    dplyr::group_by(language) %>%
    dplyr::summarize(nbr_bytes_GitHub = sum(bytes),
                     nbr_repos_GitHub = length(bytes)) %>%
    dplyr::arrange(desc(nbr_bytes_GitHub))

langs <- dplyr::full_join(langs, langbytes, by = "language")
ggplot(langs %>% dplyr::arrange(desc(nbr_submissions_Whedon)) %>%
           dplyr::filter(nbr_submissions_Whedon > 10) %>%
           dplyr::mutate(language = factor(language, levels = language)),
       aes(x = language, y = nbr_submissions_Whedon)) + 
    geom_bar(stat = "identity") + 
    theme_bw() + 
    theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5)) + 
    labs(x = "", y = "Number of submissions", caption = dcap) + 
    theme(axis.title = element_text(size = 15))

DT::datatable(
    langs %>% dplyr::arrange(desc(nbr_bytes_GitHub)),
    escape = FALSE,
    filter = list(position = 'top', clear = FALSE),
    options = list(scrollX = TRUE)
)
ggplot(langs, aes(x = nbr_repos_GitHub, y = nbr_bytes_GitHub)) + 
    geom_point() + scale_x_log10() + scale_y_log10() + geom_smooth() + 
    theme_bw() + 
    labs(x = "Number of repos using the language",
         y = "Total number of bytes of code\nwritten in the language", 
         caption = dcap) + 
    theme(axis.title = element_text(size = 15))

Association between number of citations and number of stars of the GitHub repo

ggplotly(
    ggplot(papers, aes(x = citation_count, y = repo_nbr_stars,
                       label = title)) + 
        geom_point(alpha = 0.5) + scale_x_sqrt() + scale_y_sqrt() + 
        theme_bw() + 
        labs(x = "OpenAlex citation count", y = "Number of stars, GitHub repo", 
             caption = dcap) + 
        theme(axis.title = element_text(size = 15)),
    tooltip = c("label", "x", "y")
)

Distribution of time between GitHub repo creation and JOSS submission

ggplot(papers, aes(x = as.numeric(prerev_opened - repo_created))) +
    geom_histogram(bins = 50) + 
    theme_bw() + 
    labs(x = "Time (days) from repo creation to JOSS pre-review start", 
         caption = dcap) + 
    theme(axis.title = element_text(size = 15))

Distribution of time between JOSS acceptance and last commit

ggplot(papers, aes(x = as.numeric(repo_pushed - review_closed))) +
    geom_histogram(bins = 50) + 
    theme_bw() + 
    labs(x = "Time (days) from closure of JOSS review to most recent commit in repo",
         caption = dcap) + 
    theme(axis.title = element_text(size = 15)) + 
    facet_wrap(~ year(published.date), scales = "free_y")

Number of authors per paper

List the papers with the largest number of authors, and display the distribution of the number of authors per paper, for papers with at most 20 authors.

## Papers with largest number of authors
papers %>% dplyr::arrange(desc(nbr_authors)) %>% 
    dplyr::select(title, published.date, url, nbr_authors) %>%
    as.data.frame() %>% head(10)
##                                                                                                                          title
## 1                                                                                    SunPy: A Python package for Solar Physics
## 2                                                        ENZO: An Adaptive Mesh Refinement Code for Astrophysics (Version 2.6)
## 3  The Pencil Code, a modular MPI code for partial differential equations and particles: multipurpose and multiuser-maintained
## 4                                                     GRChombo: An adaptable numerical relativity code for fundamental physics
## 5                                                                                       PyBIDS: Python tools for BIDS datasets
## 6                                       DataLad: distributed system for joint management of code, data, and their relationship
## 7                                                                            Chaste: Cancer, Heart and Soft Tissue Environment
## 8                          sourmash v4: A multitool to quickly search, compare,\nand analyze genomic and metagenomic data sets
## 9                                        NOMAD: A distributed web-based platform for managing\nmaterials science research data
## 10                                                    HeuDiConv — flexible DICOM conversion into structured\ndirectory layouts
##    published.date                                   url nbr_authors
## 1      2020-02-14 http://dx.doi.org/10.21105/joss.01832         124
## 2      2019-10-03 http://dx.doi.org/10.21105/joss.01636          55
## 3      2021-02-21 http://dx.doi.org/10.21105/joss.02807          38
## 4      2021-12-10 http://dx.doi.org/10.21105/joss.03703          32
## 5      2019-08-12 http://dx.doi.org/10.21105/joss.01294          31
## 6      2021-07-01 http://dx.doi.org/10.21105/joss.03262          31
## 7      2020-03-13 http://dx.doi.org/10.21105/joss.01848          29
## 8      2024-06-28 http://dx.doi.org/10.21105/joss.06830          29
## 9      2023-10-15 http://dx.doi.org/10.21105/joss.05388          29
## 10     2024-07-03 http://dx.doi.org/10.21105/joss.05839          27
nbins <- max(papers$nbr_authors[papers$nbr_authors <= 20])
ggplot(papers %>% dplyr::filter(nbr_authors <= 20),
       aes(x = nbr_authors)) + 
    geom_histogram(bins = nbins, fill = "lightgrey", color = "grey50") + 
    theme_bw() + 
    facet_wrap(~ year(published.date), scales = "free_y") + 
    theme(axis.title = element_text(size = 15)) + 
    labs(x = "Number of authors",
         y = "Number of publications with\na given number of authors", 
         caption = dcap)

ggplot(papers %>% 
           dplyr::mutate(nbr_authors = replace(nbr_authors, nbr_authors > 5, ">5")) %>%
           dplyr::mutate(nbr_authors = factor(nbr_authors, levels = c("1", "2", "3", 
                                                                      "4", "5", ">5"))) %>%
           dplyr::mutate(year = year(published.date)) %>%
           dplyr::mutate(year = factor(year)) %>%
           dplyr::group_by(year, nbr_authors, .drop = FALSE) %>%
           dplyr::summarize(n = n()) %>%
           dplyr::mutate(freq = n/sum(n)) %>%
           dplyr::mutate(year = as.integer(as.character(year))), 
       aes(x = year, y = freq, fill = nbr_authors)) + geom_area() + 
    theme_minimal() + 
    scale_fill_brewer(palette = "Set1", name = "Number of\nauthors", 
                      na.value = "grey") + 
    theme(axis.title = element_text(size = 15)) + 
    labs(x = "Year", y = "Fraction of submissions", caption = dcap)

Number of authors vs number of contributors to the GitHub repo

Note that points are slightly jittered to reduce the overlap.

plotly::ggplotly(
    ggplot(papers, aes(x = nbr_authors, y = repo_nbr_contribs_2ormore, label = title)) + 
        geom_abline(slope = 1, intercept = 0) + 
        geom_jitter(width = 0.05, height = 0.05, alpha = 0.5) + 
        # geom_point(alpha = 0.5) + 
        theme_bw() + 
        scale_x_sqrt() + scale_y_sqrt() + 
        labs(x = "Number of authors", 
             y = "Number of contributors\nwith at least 2 commits", 
             caption = dcap) + 
        theme(axis.title = element_text(size = 15)),
    tooltip = c("label", "x", "y")
)

Number of reviewers per paper

Submissions associated with rOpenSci and pyOpenSci are not considered here, since they are not explicitly reviewed at JOSS.

ggplot(papers %>%
           dplyr::filter(!grepl("rOpenSci|pyOpenSci", prerev_labels)) %>%
           dplyr::mutate(year = year(published.date)),
       aes(x = nbr_reviewers)) + geom_bar() + 
    facet_wrap(~ year) + theme_bw() + 
    labs(x = "Number of reviewers", y = "Number of submissions", caption = dcap)

Most active reviewers

Submissions associated with rOpenSci and pyOpenSci are not considered here, since they are not explicitly reviewed at JOSS.

reviewers <- papers %>% 
    dplyr::filter(!grepl("rOpenSci|pyOpenSci", prerev_labels)) %>%
    dplyr::mutate(year = year(published.date)) %>%
    dplyr::select(reviewers, year) %>%
    tidyr::separate_rows(reviewers, sep = ",")

## Most active reviewers
DT::datatable(
    reviewers %>% dplyr::group_by(reviewers) %>%
        dplyr::summarize(nbr_reviews = length(year),
                         timespan = paste(unique(c(min(year), max(year))), 
                                          collapse = " - ")) %>%
        dplyr::arrange(desc(nbr_reviews)),
    escape = FALSE, rownames = FALSE, 
    filter = list(position = 'top', clear = FALSE),
    options = list(scrollX = TRUE)
)

Number of papers per editor and year

ggplot(papers %>% 
           dplyr::mutate(year = year(published.date),
                         `r/pyOpenSci` = factor(
                             grepl("rOpenSci|pyOpenSci", prerev_labels),
                             levels = c("TRUE", "FALSE"))), 
       aes(x = editor)) + geom_bar(aes(fill = `r/pyOpenSci`)) + 
    theme_bw() + facet_wrap(~ year, ncol = 1) + 
    scale_fill_manual(values = c(`TRUE` = "grey65", `FALSE` = "grey35")) + 
    theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5)) + 
    labs(x = "Editor", y = "Number of submissions", caption = dcap)

Distribution of software repo licenses

all_licenses <- sort(unique(papers$repo_license))
license_levels = c(grep("apache", all_licenses, value = TRUE),
                   grep("bsd", all_licenses, value = TRUE),
                   grep("mit", all_licenses, value = TRUE),
                   grep("gpl", all_licenses, value = TRUE),
                   grep("mpl", all_licenses, value = TRUE))
license_levels <- c(license_levels, setdiff(all_licenses, license_levels))
ggplot(papers %>% 
           dplyr::mutate(repo_license = factor(repo_license, 
                                               levels = license_levels)),
       aes(x = repo_license)) +
    geom_bar() + 
    theme_bw() + 
    labs(x = "Software license", y = "Number of submissions", caption = dcap) + 
    theme(axis.title = element_text(size = 15),
          axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5)) + 
    facet_wrap(~ year(published.date), scales = "free_y")

## For plots below, replace licenses present in less 
## than 2.5% of the submissions by 'other'
tbl <- table(papers$repo_license)
to_replace <- names(tbl[tbl <= 0.025 * nrow(papers)])
ggplot(papers %>% 
           dplyr::mutate(year = year(published.date)) %>%
           dplyr::mutate(repo_license = replace(repo_license, 
                                                repo_license %in% to_replace,
                                                "other")) %>%
           dplyr::mutate(year = factor(year), 
                         repo_license = factor(
                             repo_license, 
                             levels = license_levels[license_levels %in% repo_license]
                         )) %>%
           dplyr::group_by(year, repo_license, .drop = FALSE) %>%
           dplyr::count() %>%
           dplyr::mutate(year = as.integer(as.character(year))), 
       aes(x = year, y = n, fill = repo_license)) + geom_area() + 
    theme_minimal() + 
    scale_fill_brewer(palette = "Set1", name = "Software\nlicense", 
                      na.value = "grey") + 
    theme(axis.title = element_text(size = 15)) + 
    labs(x = "Year", y = "Number of submissions", caption = dcap)

ggplot(papers %>% 
           dplyr::mutate(year = year(published.date)) %>%
           dplyr::mutate(repo_license = replace(repo_license, 
                                                repo_license %in% to_replace,
                                                "other")) %>%
           dplyr::mutate(year = factor(year), 
                         repo_license = factor(
                             repo_license, 
                             levels = license_levels[license_levels %in% repo_license]
                         )) %>%
           dplyr::group_by(year, repo_license, .drop = FALSE) %>%
           dplyr::summarize(n = n()) %>%
           dplyr::mutate(freq = n/sum(n)) %>%
           dplyr::mutate(year = as.integer(as.character(year))), 
       aes(x = year, y = freq, fill = repo_license)) + geom_area() + 
    theme_minimal() + 
    scale_fill_brewer(palette = "Set1", name = "Software\nlicense", 
                      na.value = "grey") + 
    theme(axis.title = element_text(size = 15)) + 
    labs(x = "Year", y = "Fraction of submissions", caption = dcap)

Most common GitHub repo topics

a <- unlist(strsplit(papers$repo_topics, ","))
a <- a[!is.na(a)]
topicfreq <- table(a)

colors <- viridis::viridis(100)
set.seed(1234)
wordcloud::wordcloud(
    names(topicfreq), sqrt(topicfreq), min.freq = 1, max.words = 300,
    random.order = FALSE, rot.per = 0.05, use.r.layout = FALSE, 
    colors = colors, scale = c(10, 0.1), random.color = TRUE,
    ordered.colors = FALSE, vfont = c("serif", "plain")
)

DT::datatable(as.data.frame(topicfreq) %>% 
                  dplyr::rename(topic = a, nbr_repos = Freq) %>%
                  dplyr::arrange(desc(nbr_repos)),
              escape = FALSE, rownames = FALSE, 
              filter = list(position = 'top', clear = FALSE),
              options = list(scrollX = TRUE))

Citation analysis

Here, we take a more detailed look at the papers that cite JOSS papers, using data from the Open Citations Corpus.

Get citing papers for each submission

## Split into several queries
## Randomize the splitting since a whole query may fail if one ID is not recognized
papidx <- seq_len(nrow(papers))
idxL <- split(sample(papidx, length(papidx), replace = FALSE), ceiling(papidx / 50))
citationsL <- lapply(idxL, function(idx) {
    tryCatch({
        citecorp::oc_coci_cites(doi = papers$alternative.id[idx]) %>%
            dplyr::distinct() %>%
            dplyr::mutate(citation_info_obtained = as.character(lubridate::today()))
    }, error = function(e) {
        NULL
    })
})
citationsL <- citationsL[vapply(citationsL, function(df) !is.null(df) && nrow(df) > 0, FALSE)]
if (length(citationsL) > 0) {
    citations <- do.call(dplyr::bind_rows, citationsL)
} else {
    citations <- NULL
}
dim(citations)
## [1] 37124     8
if (!is.null(citations) && is.data.frame(citations) && "oci" %in% colnames(citations)) {
    citations <- citations %>% 
        dplyr::filter(!(oci %in% citations_archive$oci))
    
    tmpj <- rcrossref::cr_works(dois = unique(citations$citing))$data %>%
        dplyr::select(contains("doi"), contains("container.title"), contains("issn"),
                      contains("type"), contains("publisher"), contains("prefix"))
    citations <- citations %>% dplyr::left_join(tmpj, by = c("citing" = "doi"))
    
    ## bioRxiv preprints don't have a 'container.title' or 'issn', but we'll assume 
    ## that they can be 
    ## identified from the prefix 10.1101 - set the container.title 
    ## for these records manually; we may or may not want to count these
    ## (would it count citations twice, both preprint and publication?)
    citations$container.title[citations$prefix == "10.1101"] <- "bioRxiv"
    
    ## JOSS is represented by 'The Journal of Open Source Software' as well as 
    ## 'Journal of Open Source Software'
    citations$container.title[citations$container.title == 
                                  "Journal of Open Source Software"] <- 
        "The Journal of Open Source Software"
    
    ## Remove real self citations (cited DOI = citing DOI)
    citations <- citations %>% dplyr::filter(cited != citing)
    
    ## Merge with the archive
    citations <- dplyr::bind_rows(citations, citations_archive)
} else {
    citations <- citations_archive
    if (is.null(citations[["citation_info_obtained"]])) {
        citations$citation_info_obtained <- NA_character_
    }
}

citations$citation_info_obtained[is.na(citations$citation_info_obtained)] <- 
    "2021-08-11"

write.table(citations, file = "joss_submission_citations.tsv",
            row.names = FALSE, col.names = TRUE, sep = "\t", quote = FALSE)

Summary statistics

## Latest successful update of new citation data
max(as.Date(citations$citation_info_obtained))
## [1] "2024-07-24"
## Number of JOSS papers with >0 citations included in this collection
length(unique(citations$cited))
## [1] 1583
## Number of JOSS papers with >0 citations according to OpenAlex
length(which(papers$citation_count > 0))
## [1] 1880
## Number of citations from Open Citations Corpus vs OpenAlex
df0 <- papers %>% dplyr::select(doi, citation_count) %>%
    dplyr::full_join(citations %>% dplyr::group_by(cited) %>%
                         dplyr::tally() %>%
                         dplyr::mutate(n = replace(n, is.na(n), 0)),
                     by = c("doi" = "cited"))
## Total citation count OpenAlex
sum(df0$citation_count, na.rm = TRUE)
## [1] 67686
## Total citation count Open Citations Corpus
sum(df0$n, na.rm = TRUE)
## [1] 76046
## Ratio of total citation count Open Citations Corpus/OpenAlex
sum(df0$n, na.rm = TRUE)/sum(df0$citation_count, na.rm = TRUE)
## [1] 1.123512
ggplot(df0, aes(x = citation_count, y = n)) + 
    geom_abline(slope = 1, intercept = 0) + 
    geom_point(size = 3, alpha = 0.5) + 
    labs(x = "OpenAlex citation count", y = "Open Citations Corpus citation count",
         caption = dcap) + 
    theme_bw()

## Zoom in
ggplot(df0, aes(x = citation_count, y = n)) + 
    geom_abline(slope = 1, intercept = 0) + 
    geom_point(size = 3, alpha = 0.5) + 
    labs(x = "OpenAlex citation count", y = "Open Citations Corpus citation count",
         caption = dcap) + 
    theme_bw() + 
    coord_cartesian(xlim = c(0, 75), ylim = c(0, 75))

## Number of journals citing JOSS papers
length(unique(citations$container.title))
## [1] 8695
length(unique(citations$issn))
## [1] 6468

Most citing journals

topcit <- citations %>% dplyr::group_by(container.title) %>%
    dplyr::summarize(nbr_citations_of_joss_papers = length(cited),
                     nbr_cited_joss_papers = length(unique(cited)),
                     nbr_citing_papers = length(unique(citing)),
                     nbr_selfcitations_of_joss_papers = sum(author_sc == "yes"),
                     fraction_selfcitations = signif(nbr_selfcitations_of_joss_papers /
                                                         nbr_citations_of_joss_papers, digits = 3)) %>%
    dplyr::arrange(desc(nbr_cited_joss_papers))
DT::datatable(topcit,
              escape = FALSE, rownames = FALSE, 
              filter = list(position = 'top', clear = FALSE),
              options = list(scrollX = TRUE))
plotly::ggplotly(
    ggplot(topcit, aes(x = nbr_citations_of_joss_papers, y = nbr_cited_joss_papers,
                       label = container.title)) + 
        geom_abline(slope = 1, intercept = 0, linetype = "dashed", color = "grey") + 
        geom_point(size = 3, alpha = 0.5) + 
        theme_bw() + 
        labs(caption = dcap, x = "Number of citations of JOSS papers",
             y = "Number of cited JOSS papers")
)
plotly::ggplotly(
    ggplot(topcit, aes(x = nbr_citations_of_joss_papers, y = nbr_cited_joss_papers,
                       label = container.title)) + 
        geom_abline(slope = 1, intercept = 0, linetype = "dashed", color = "grey") + 
        geom_point(size = 3, alpha = 0.5) + 
        theme_bw() + 
        coord_cartesian(xlim = c(0, 100), ylim = c(0, 50)) + 
        labs(caption = dcap, x = "Number of citations of JOSS papers",
             y = "Number of cited JOSS papers")
)
write.table(topcit, file = "joss_submission_citations_byjournal.tsv",
            row.names = FALSE, col.names = TRUE, sep = "\t", quote = FALSE)

Save object

The tibble object with all data collected above is serialized to a file that can be downloaded and reused.

head(papers) %>% as.data.frame()
##        alternative.id                     container.title    created  deposited
## 1 10.21105/joss.02583     Journal of Open Source Software 2020-09-26 2020-09-26
## 2 10.21105/joss.02013     Journal of Open Source Software 2020-02-10 2020-02-10
## 3 10.21105/joss.03917     Journal of Open Source Software 2021-12-02 2021-12-02
## 4 10.21105/joss.00050 The Journal of Open Source Software 2016-12-07 2017-10-24
## 5 10.21105/joss.01420     Journal of Open Source Software 2019-07-11 2019-11-17
## 6 10.21105/joss.00029 The Journal of Open Source Software 2016-06-20 2019-09-09
##   published.print                 doi    indexed      issn issue     issued
## 1      2020-09-26 10.21105/joss.02583 2022-03-28 2475-9066    53 2020-09-26
## 2      2020-02-10 10.21105/joss.02013 2023-05-19 2475-9066    46 2020-02-10
## 3      2021-12-02 10.21105/joss.03917 2022-03-30 2475-9066    68 2021-12-02
## 4      2016-12-06 10.21105/joss.00050 2022-03-29 2475-9066     8 2016-12-06
## 5      2019-07-11 10.21105/joss.01420 2022-04-02 2475-9066    39 2019-07-11
## 6      2016-06-19 10.21105/joss.00029 2022-04-07 2475-9066     2 2016-06-19
##   member page   prefix        publisher score   source reference.count
## 1   8722 2583 10.21105 The Open Journal     0 Crossref              28
## 2   8722 2013 10.21105 The Open Journal     0 Crossref              12
## 3   8722 3917 10.21105 The Open Journal     0 Crossref              34
## 4   8722   50 10.21105 The Open Journal     0 Crossref               2
## 5   8722 1420 10.21105 The Open Journal     0 Crossref               5
## 6   8722   29 10.21105 The Open Journal     0 Crossref               5
##   references.count is.referenced.by.count
## 1               28                      0
## 2               12                      6
## 3               34                      0
## 4                2                      2
## 5                5                      2
## 6                5                      1
##                                                                                                                                                     title
## 1                                                                 emba: R package for analysis and visualization of biomarkers in boolean model ensembles
## 2 thresholdmodeling: A Python package for modeling excesses over a threshold using the Peak-Over-Threshold Method and the Generalized Pareto Distribution
## 3                                                  CR-Sparse: Hardware accelerated functional algorithms for sparse signal processing in Python using JAX
## 4                                                                                                                           MassMine: Your Access To Data
## 5                                                                    ECabc: A feature tuning program focused on Artificial Neural Network hyperparameters
## 6                                                                           Git-RDM: A research data management plugin for the Git version control system
##              type                                   url volume
## 1 journal-article http://dx.doi.org/10.21105/joss.02583      5
## 2 journal-article http://dx.doi.org/10.21105/joss.02013      5
## 3 journal-article http://dx.doi.org/10.21105/joss.03917      6
## 4 journal-article http://dx.doi.org/10.21105/joss.00050      1
## 5 journal-article http://dx.doi.org/10.21105/joss.01420      4
## 6 journal-article http://dx.doi.org/10.21105/joss.00029      1
##   short.container.title
## 1                  JOSS
## 2                  JOSS
## 3                  JOSS
## 4                  JOSS
## 5                  JOSS
## 6                  JOSS
##                                                                                                                                                                                                                                                                                               author
## 1                                                                                http://orcid.org/0000-0002-3609-8674, http://orcid.org/0000-0002-1171-9876, http://orcid.org/0000-0002-3357-425X, FALSE, FALSE, FALSE, John, Martin, Åsmund, Zobolas, Kuiper, Flobak, first, additional, additional
## 2                                                                                   http://orcid.org/0000-0002-5829-7711, http://orcid.org/0000-0003-0170-6083, http://orcid.org/0000-0002-8166-5666, FALSE, FALSE, FALSE, Iago, Antônio, Marcus, Lemos, Lima, Duarte, first, additional, additional
## 3                                                                                                                                                                                                                                http://orcid.org/0000-0003-2217-4768, FALSE, Shailesh, Kumar, first
## 4                                                                                                                                                http://orcid.org/0000-0002-0695-7765, http://orcid.org/0000-0002-5156-3044, FALSE, FALSE, Nicholas, Aaron, M Van Horn, Beveridge, first, additional
## 5 http://orcid.org/0000-0002-9884-7351, http://orcid.org/0000-0003-0690-576X, http://orcid.org/0000-0002-7363-4050, http://orcid.org/0000-0002-5455-8611, FALSE, FALSE, FALSE, FALSE, Sanskriti, Hernan, Travis, John, Sharma, Gelaf-Romer, Kessler, Mack, first, additional, additional, additional
## 6                                                                                                                                               http://orcid.org/0000-0002-0034-4650, http://orcid.org/0000-0002-2695-3358, FALSE, FALSE, Christian, Alexandros, T. Jacobs, Avdis, first, additional
##   citation_count                      openalex_id affil_countries_all
## 1              0 https://openalex.org/W3088617247                  NO
## 2              8 https://openalex.org/W3005450240                  BR
## 3              2 https://openalex.org/W4200596790                  IN
## 4              2 https://openalex.org/W2560158402                  US
## 5              2 https://openalex.org/W2961021894                  US
## 6              3 https://openalex.org/W2423960820                  GB
##   affil_countries_first
## 1                    NO
## 2                    BR
## 3                    IN
## 4                    US
## 5                    US
## 6                    GB
##                                                                                                                                                 api_title
## 1                                                                 emba: R package for analysis and visualization of biomarkers in boolean model ensembles
## 2 thresholdmodeling: A Python package for modeling excesses over a threshold using the Peak-Over-Threshold Method and the Generalized Pareto Distribution
## 3                                                  CR-Sparse: Hardware accelerated functional algorithms for sparse signal processing in Python using JAX
## 4                                                                                                                           MassMine: Your Access To Data
## 5                                                                    ECabc: A feature tuning program focused on Artificial Neural Network hyperparameters
## 6                                                                           Git-RDM: A research data management plugin for the Git version control system
##   api_state         editor                   reviewers nbr_reviewers
## 1  accepted        @mikldk @neerajdhanraj,@edifice1989             2
## 2  accepted @drvinceknight         @bahung,@kellieotto             2
## 3  accepted       @pdebuyl           @Saran-nns,@mirca             2
## 4  accepted       @mgymrek              @julianmcauley             1
## 5  accepted        @arokem                   @annoviko             1
## 6  accepted         @arfon                       @jsta             1
##                                          repo_url review_issue_id
## 1                https://github.com/bblodfon/emba            2583
## 2 https://github.com/iagolemos1/thresholdmodeling            2013
## 3     https://github.com/carnotresearch/cr-sparse            3917
## 4                https://github.com/n3mo/massmine              50
## 5                   https://github.com/ECRL/ecabc            1420
## 6             https://github.com/ctjacobs/git-rdm              29
##   prereview_issue_id languages                                    archive_doi
## 1               2534         R         https://doi.org/10.5281/zenodo.4043085
## 2               1999    Python         https://doi.org/10.5281/zenodo.3661338
## 3               3913    Python         https://doi.org/10.5281/zenodo.5749792
## 4                 NA    Scheme          https://doi.org/10.5281/zenodo.193078
## 5               1383    Python         https://doi.org/10.5281/zenodo.3256403
## 6                 NA    Python https://doi.org/10.6084/m9.figshare.3443750.v1
##                                                                                                                                              review_title
## 1                                                                 emba: R package for analysis and visualization of biomarkers in boolean model ensembles
## 2 thresholdmodeling: A Python package for modeling excesses over a threshold using the Peak-Over-Threshold Method and the Generalized Pareto Distribution
## 3                                                  CR-Sparse: Hardware accelerated functional algorithms for sparse signal processing in Python using JAX
## 4                                                                                                                           MassMine: Your Access To Data
## 5                                                                    ECabc: A feature tuning program focused on Artificial Neural Network hyperparameters
## 6                                                                           Git-RDM: A research data management plugin for the Git version control system
##   review_number review_state review_opened review_closed review_ncomments
## 1          2583       closed    2020-08-19    2020-09-26               76
## 2          2013       closed    2020-01-13    2020-02-10               69
## 3          3917       closed    2021-11-16    2021-12-02               59
## 4            50       closed    2016-08-16    2016-12-07               42
## 5          1420       closed    2019-04-30    2019-07-11               66
## 6            29       closed    2016-06-17    2016-06-20               17
##                                          review_labels
## 1            accepted,TeX,R,recommend-accept,published
## 2                  accepted,recommend-accept,published
## 3 accepted,TeX,Shell,Python,recommend-accept,published
## 4                  accepted,recommend-accept,published
## 5                  accepted,recommend-accept,published
## 6                  accepted,recommend-accept,published
##                                                                                                                                              prerev_title
## 1                                                                 emba: R package for analysis and visualization of biomarkers in boolean model ensembles
## 2 thresholdmodeling: A Python package for modeling excesses over a threshold using the Peak-Over-Threshold Method and the Generalized Pareto Distribution
## 3                                                  CR-Sparse: Hardware accelerated functional algorithms for sparse signal processing in Python using JAX
## 4                                                                                                                                                    <NA>
## 5                                                                    ECabc: A feature tuning program focused on Artificial Neural Network hyperparameters
## 6                                                                                                                                                    <NA>
##   prerev_state prerev_opened prerev_closed prerev_ncomments    prerev_labels
## 1       closed    2020-07-28    2020-08-19               32            TeX,R
## 2       closed    2020-01-07    2020-01-13               37       TeX,Python
## 3       closed    2021-11-12    2021-11-16               29 TeX,Shell,Python
## 4         <NA>          <NA>          <NA>               NA             <NA>
## 5       closed    2019-04-14    2019-04-30               31       TeX,Python
## 6         <NA>          <NA>          <NA>               NA             <NA>
##   days_in_pre days_in_rev to_review repo_created repo_updated repo_pushed
## 1     22 days     38 days      TRUE   2019-06-03   2023-04-26  2023-04-26
## 2      6 days     28 days      TRUE   2019-12-27   2024-03-20  2020-12-24
## 3      4 days     16 days      TRUE   2020-12-22   2024-06-17  2023-10-17
## 4     NA days    113 days      TRUE   2014-04-04   2023-09-28  2022-07-22
## 5     16 days     72 days      TRUE   2018-02-28   2024-02-23  2023-08-02
## 6     NA days      3 days      TRUE   2016-06-16   2023-09-05  2017-10-24
##   repo_nbr_stars    repo_language
## 1              0                R
## 2             30           Python
## 3             86 Jupyter Notebook
## 4             74           Scheme
## 5             12           Python
## 6             34           Python
##                                         repo_languages_bytes
## 1                                         R:211165,TeX:45603
## 2                                      Python:37812,TeX:3472
## 3 Jupyter Notebook:1232323,Python:632797,TeX:18209,Shell:187
## 4                           Scheme:154502,Shell:2438,TeX:478
## 5                                      Python:26233,TeX:1944
## 6                                      Python:23978,TeX:1513
##                                                                                                                                                                                                repo_topics
## 1                                                                                                                                                                   r,r-package,biomarkers,ensemble-models
## 2                                                                                                                                                                                                         
## 3 sparse-representations,jax,wavelets,convex-optimization,linear-operators,compressive-sensing,functional-programming,l1-regularization,sparse-linear-systems,lasso,sparse-bayesian-learning,basis-pursuit
## 4                                                                                                                                                                                                         
## 5                                                                                                                          neural-network,feature-tuning,hyperparameter-optimization,artificial-bee-colony
## 6                                                                                                    research-data-management,git,data,open-data,open-science,version-control,curation,publishing,datasets
##   repo_license repo_nbr_contribs repo_nbr_contribs_2ormore repo_info_obtained
## 1        other                 2                         1         2024-07-24
## 2     lgpl-3.0                 4                         2         2024-07-24
## 3   apache-2.0                 2                         1         2024-07-24
## 4      gpl-3.0                 1                         0         2024-07-24
## 5          mit                 9                         5         2024-07-24
## 6          mit                 2                         1         2024-07-24
##   published.date halfyear nbr_authors
## 1     2020-09-26   2020H2           3
## 2     2020-02-10   2020H1           3
## 3     2021-12-02   2021H2           1
## 4     2016-12-06   2016H2           2
## 5     2019-07-11   2019H2           4
## 6     2016-06-19   2016H1           2
saveRDS(papers, file = "joss_submission_analytics.rds")

To read the current version of this file directly from GitHub, use the following code:

papers <- readRDS(gzcon(url("https://github.com/openjournals/joss-analytics/blob/gh-pages/joss_submission_analytics.rds?raw=true")))

Session info

sessionInfo()
## R version 4.4.1 (2024-06-14)
## Platform: aarch64-apple-darwin20
## Running under: macOS Sonoma 14.5
## 
## Matrix products: default
## BLAS:   /Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/lib/libRblas.0.dylib 
## LAPACK: /Library/Frameworks/R.framework/Versions/4.4-arm64/Resources/lib/libRlapack.dylib;  LAPACK version 3.12.0
## 
## locale:
## [1] en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8
## 
## time zone: UTC
## tzcode source: internal
## 
## attached base packages:
## [1] stats     graphics  grDevices utils     datasets  methods   base     
## 
## other attached packages:
##  [1] openalexR_1.4.0   stringr_1.5.1     gt_0.11.0         rworldmap_1.3-8  
##  [5] sp_2.1-4          readr_2.1.5       citecorp_0.3.0    plotly_4.10.4    
##  [9] DT_0.33           jsonlite_1.8.8    purrr_1.0.2       gh_1.4.1         
## [13] lubridate_1.9.3   ggplot2_3.5.1     tidyr_1.3.1       dplyr_1.1.4      
## [17] rcrossref_1.2.009 tibble_3.2.1     
## 
## loaded via a namespace (and not attached):
##  [1] tidyselect_1.2.1   viridisLite_0.4.2  farver_2.1.2       viridis_0.6.5     
##  [5] urltools_1.7.3     fields_16.2        fastmap_1.2.0      lazyeval_0.2.2    
##  [9] promises_1.3.0     digest_0.6.36      dotCall64_1.1-1    timechange_0.3.0  
## [13] mime_0.12          lifecycle_1.0.4    terra_1.7-78       magrittr_2.0.3    
## [17] compiler_4.4.1     rlang_1.1.4        sass_0.4.9         tools_4.4.1       
## [21] wordcloud_2.6      utf8_1.2.4         yaml_2.3.9         data.table_1.15.4 
## [25] knitr_1.48         labeling_0.4.3     fauxpas_0.5.2      htmlwidgets_1.6.4 
## [29] bit_4.0.5          curl_5.2.1         RColorBrewer_1.1-3 plyr_1.8.9        
## [33] xml2_1.3.6         httpcode_0.3.0     miniUI_0.1.1.1     withr_3.0.0       
## [37] triebeard_0.4.1    grid_4.4.1         fansi_1.0.6        xtable_1.8-4      
## [41] colorspace_2.1-0   gitcreds_0.1.2     scales_1.3.0       crul_1.5.0        
## [45] cli_3.6.3          rmarkdown_2.27     crayon_1.5.3       generics_0.1.3    
## [49] httr_1.4.7         tzdb_0.4.0         cachem_1.1.0       splines_4.4.1     
## [53] maps_3.4.2         parallel_4.4.1     vctrs_0.6.5        Matrix_1.7-0      
## [57] hms_1.1.3          bit64_4.0.5        crosstalk_1.2.1    jquerylib_0.1.4   
## [61] glue_1.7.0         spam_2.10-0        codetools_0.2-20   stringi_1.8.4     
## [65] gtable_0.3.5       later_1.3.2        raster_3.6-26      munsell_0.5.1     
## [69] pillar_1.9.0       rappdirs_0.3.3     htmltools_0.5.8.1  R6_2.5.1          
## [73] httr2_1.0.2        vroom_1.6.5        evaluate_0.24.0    shiny_1.8.1.1     
## [77] lattice_0.22-6     highr_0.11         httpuv_1.6.15      bslib_0.7.0       
## [81] Rcpp_1.0.13        gridExtra_2.3      nlme_3.1-164       mgcv_1.9-1        
## [85] whisker_0.4.1      xfun_0.46          pkgconfig_2.0.3