diff --git a/R/age.R b/R/age.R index 47e1e502..26dbaa2a 100755 --- a/R/age.R +++ b/R/age.R @@ -50,8 +50,8 @@ age <- function(x, reference = Sys.Date()) { as.integer(years_gap)) if (any(ages < 0, na.rm = TRUE)) { - warning("NAs introduced for ages below 0.") ages[ages < 0] <- NA_integer_ + warning("NAs introduced for ages below 0.") } if (any(ages > 120, na.rm = TRUE)) { warning("Some ages are > 120.") @@ -71,7 +71,7 @@ age <- function(x, reference = Sys.Date()) { #' The default is to split on young children (0-11), youth (12-24), young adults (25-54), middle-aged adults (55-74) and elderly (75+).} #' \item{A character:} #' \itemize{ -#' \item{\code{"children"}, equivalent of: \code{c(0, 1, 2, 4, 6, 13, 18)}. This will split on 0, 1, 2-3, 4-5, 6-12, 13-17 and 18+.} +#' \item{\code{"children"} or \code{"kids"}, equivalent of: \code{c(0, 1, 2, 4, 6, 13, 18)}. This will split on 0, 1, 2-3, 4-5, 6-12, 13-17 and 18+.} #' \item{\code{"elderly"} or \code{"seniors"}, equivalent of: \code{c(65, 75, 85)}. This will split on 0-64, 65-74, 75-84, 85+.} #' \item{\code{"fives"}, equivalent of: \code{1:20 * 5}. This will split on 0-4, 5-9, 10-14, ..., 90-94, 95-99, 100+.} #' \item{\code{"tens"}, equivalent of: \code{1:10 * 10}. This will split on 0-9, 10-19, 20-29, ... 80-89, 90-99, 100+.} @@ -107,17 +107,15 @@ age <- function(x, reference = Sys.Date()) { #' # resistance of ciprofloxacine per age group #' library(dplyr) #' septic_patients %>% -#' mutate(first_isolate = first_isolate(.)) %>% -#' filter(first_isolate == TRUE, -#' mo == as.mo("E. coli")) %>% +#' filter_first_isolate) %>% +#' filter(mo == as.mo("E. coli")) %>% #' group_by(age_group = age_groups(age)) %>% -#' select(age_group, -#' CIP) %>% +#' select(age_group, CIP) %>% #' ggplot_rsi(x = "age_group") age_groups <- function(x, split_at = c(12, 25, 55, 75)) { if (is.character(split_at)) { split_at <- split_at[1L] - if (split_at %like% "^(child|kid)") { + if (split_at %like% "^(child|kid|junior)") { split_at <- c(0, 1, 2, 4, 6, 13, 18) } else if (split_at %like% "^(elder|senior)") { split_at <- c(65, 75, 85) @@ -133,10 +131,12 @@ age_groups <- function(x, split_at = c(12, 25, 55, 75)) { } split_at <- sort(unique(split_at)) if (!split_at[1] == 0) { + # add base number 0 split_at <- c(0, split_at) } + split_at <- split_at[!is.na(split_at)] if (length(split_at) == 1) { - # only 0 available + # only 0 is available stop("invalid value for `split_at`.") } diff --git a/docs/articles/AMR.html b/docs/articles/AMR.html index 2519e790..b66f2e0c 100644 --- a/docs/articles/AMR.html +++ b/docs/articles/AMR.html @@ -40,7 +40,7 @@
@@ -334,69 +334,69 @@So, we can draw at least two conclusions immediately. From a data scientist perspective, the data looks clean: only values M
and F
. From a researcher perspective: there are slightly more men. Nothing we didn’t already know.
The data is already quite clean, but we still need to transform some variables. The bacteria
column now consists of text, and we want to add more variables based on microbial IDs later on. So, we will transform this column to valid IDs. The mutate()
function of the dplyr
package makes this really easy:
data <- data %>%
@@ -449,14 +449,14 @@
# Pasteurella multocida (no new changes)
# Staphylococcus (no new changes)
# Streptococcus groups A, B, C, G (no new changes)
-# Streptococcus pneumoniae (1390 new changes)
+# Streptococcus pneumoniae (1464 new changes)
# Viridans group streptococci (no new changes)
#
# EUCAST Expert Rules, Intrinsic Resistance and Exceptional Phenotypes (v3.1, 2016)
-# Table 01: Intrinsic resistance in Enterobacteriaceae (1264 new changes)
+# Table 01: Intrinsic resistance in Enterobacteriaceae (1265 new changes)
# Table 02: Intrinsic resistance in non-fermentative Gram-negative bacteria (no new changes)
# Table 03: Intrinsic resistance in other Gram-negative bacteria (no new changes)
-# Table 04: Intrinsic resistance in Gram-positive bacteria (2725 new changes)
+# Table 04: Intrinsic resistance in Gram-positive bacteria (2753 new changes)
# Table 08: Interpretive rules for B-lactam agents and Gram-positive cocci (no new changes)
# Table 09: Interpretive rules for B-lactam agents and Gram-negative rods (no new changes)
# Table 11: Interpretive rules for macrolides, lincosamides, and streptogramins (no new changes)
@@ -464,24 +464,24 @@
# Table 13: Interpretive rules for quinolones (no new changes)
#
# Other rules
-# Non-EUCAST: amoxicillin/clav acid = S where ampicillin = S (2301 new changes)
-# Non-EUCAST: ampicillin = R where amoxicillin/clav acid = R (114 new changes)
+# Non-EUCAST: amoxicillin/clav acid = S where ampicillin = S (2272 new changes)
+# Non-EUCAST: ampicillin = R where amoxicillin/clav acid = R (108 new changes)
# Non-EUCAST: piperacillin = R where piperacillin/tazobactam = R (no new changes)
# Non-EUCAST: piperacillin/tazobactam = S where piperacillin = S (no new changes)
# Non-EUCAST: trimethoprim = R where trimethoprim/sulfa = R (no new changes)
# Non-EUCAST: trimethoprim/sulfa = S where trimethoprim = S (no new changes)
#
# --------------------------------------------------------------------------
-# EUCAST rules affected 6,521 out of 20,000 rows, making a total of 7,794 edits
+# EUCAST rules affected 6,521 out of 20,000 rows, making a total of 7,862 edits
# => added 0 test results
#
-# => changed 7,794 test results
-# - 125 test results changed from S to I
-# - 4,678 test results changed from S to R
-# - 1,070 test results changed from I to S
-# - 286 test results changed from I to R
-# - 1,620 test results changed from R to S
-# - 15 test results changed from R to I
+# => changed 7,862 test results
+# - 112 test results changed from S to I
+# - 4,682 test results changed from S to R
+# - 1,085 test results changed from I to S
+# - 303 test results changed from I to R
+# - 1,657 test results changed from R to S
+# - 23 test results changed from R to I
# --------------------------------------------------------------------------
#
# Use verbose = TRUE to get a data.frame with all specified edits instead.
So only 28.1% is suitable for resistance analysis! We can now filter on it with the filter()
function, also from the dplyr
package:
So only 28.3% is suitable for resistance analysis! We can now filter on it with the filter()
function, also from the dplyr
package:
For future use, the above two syntaxes can be shortened with the filter_first_isolate()
function:
isolate | @@ -674,8 +674,8 @@||||||||||||||
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1 | -2010-03-09 | -J4 | +2010-01-13 | +L3 | B_ESCHR_COL | S | S | @@ -686,32 +686,32 @@|||||||
2 | -2010-03-19 | -J4 | +2010-02-27 | +L3 | B_ESCHR_COL | +S | +S | +R | R | -S | -S | -S | FALSE | TRUE |
3 | -2010-03-31 | -J4 | +2010-04-07 | +L3 | B_ESCHR_COL | S | S | -R | +S | S | FALSE | TRUE | ||
4 | -2010-05-07 | -J4 | +2010-08-07 | +L3 | B_ESCHR_COL | R | S | @@ -722,10 +722,10 @@|||||||
5 | -2010-06-21 | -J4 | +2010-08-09 | +L3 | B_ESCHR_COL | -I | +R | S | S | S | @@ -734,44 +734,44 @@||||
6 | -2010-07-10 | -J4 | +2010-09-28 | +L3 | B_ESCHR_COL | S | S | -R | +S | S | FALSE | TRUE | ||
7 | -2010-11-26 | -J4 | +2010-10-07 | +L3 | B_ESCHR_COL | -S | +R | S | R | S | FALSE | -FALSE | +TRUE | |
8 | -2010-12-04 | -J4 | +2010-12-15 | +L3 | B_ESCHR_COL | +S | +S | R | S | -S | -S | FALSE | TRUE | |
9 | -2011-03-19 | -J4 | +2011-10-03 | +L3 | B_ESCHR_COL | S | S | @@ -782,8 +782,8 @@|||||||
10 | -2011-05-16 | -J4 | +2011-12-06 | +L3 | B_ESCHR_COL | S | S | @@ -794,11 +794,11 @@
Instead of 2, now 7 isolates are flagged. In total, 75.4% of all isolates are marked ‘first weighted’ - 47.2% more than when using the CLSI guideline. In real life, this novel algorithm will yield 5-10% more isolates than the classic CLSI guideline.
+Instead of 2, now 8 isolates are flagged. In total, 75.6% of all isolates are marked ‘first weighted’ - 47.3% more than when using the CLSI guideline. In real life, this novel algorithm will yield 5-10% more isolates than the classic CLSI guideline.
As with filter_first_isolate()
, there’s a shortcut for this new algorithm too:
So we end up with 15,074 isolates for analysis.
+So we end up with 15,116 isolates for analysis.
We can remove unneeded columns:
@@ -822,93 +822,93 @@Or can be used like the dplyr
way, which is easier readable:
Frequency table of genus
and species
from a data.frame
(15,074 x 13)
Frequency table of genus
and species
from a data.frame
(15,116 x 13)
Columns: 2
-Length: 15,074 (of which NA: 0 = 0.00%)
+Length: 15,116 (of which NA: 0 = 0.00%)
Unique: 4
Shortest: 16
Longest: 24
The functions portion_S()
, portion_SI()
, portion_I()
, portion_IR()
and portion_R()
can be used to determine the portion of a specific antimicrobial outcome. As per the EUCAST guideline of 2019, we calculate resistance as the portion of R (portion_R()
) and susceptibility as the portion of S and I (portion_SI()
). These functions can be used on their own:
Or can be used in conjuction with group_by()
and summarise()
, both from the dplyr
package:
data_1st %>%
group_by(hospital) %>%
@@ -997,19 +997,19 @@ Longest: 24
Hospital A
-0.4603103
+0.4690460
Hospital B
-0.4641985
+0.4633959
Hospital C
-0.4670185
+0.4724376
Hospital D
-0.4706464
+0.4590056
@@ -1027,23 +1027,23 @@ Longest: 24
Hospital A
-0.4603103
-4447
+0.4690460
+4539
Hospital B
-0.4641985
-5321
+0.4633959
+5218
Hospital C
-0.4670185
-2274
+0.4724376
+2322
Hospital D
-0.4706464
-3032
+0.4590056
+3037
@@ -1063,27 +1063,27 @@ Longest: 24
Escherichia
-0.9235075
-0.8956557
-0.9941365
+0.9243844
+0.8899893
+0.9945128
Klebsiella
-0.8135048
-0.8926045
-0.9858521
+0.8101587
+0.8996825
+0.9828571
Staphylococcus
-0.9240951
-0.9289573
-0.9972988
+0.9207527
+0.9114763
+0.9912536
Streptococcus
-0.6182447
+0.6276132
0.0000000
-0.6182447
+0.6276132
diff --git a/docs/articles/AMR_files/figure-html/plot 1-1.png b/docs/articles/AMR_files/figure-html/plot 1-1.png
index b7382182..adb4667c 100644
Binary files a/docs/articles/AMR_files/figure-html/plot 1-1.png and b/docs/articles/AMR_files/figure-html/plot 1-1.png differ
diff --git a/docs/articles/AMR_files/figure-html/plot 3-1.png b/docs/articles/AMR_files/figure-html/plot 3-1.png
index 48275f63..915bacde 100644
Binary files a/docs/articles/AMR_files/figure-html/plot 3-1.png and b/docs/articles/AMR_files/figure-html/plot 3-1.png differ
diff --git a/docs/articles/AMR_files/figure-html/plot 4-1.png b/docs/articles/AMR_files/figure-html/plot 4-1.png
index 5f4f4bfd..65aae47a 100644
Binary files a/docs/articles/AMR_files/figure-html/plot 4-1.png and b/docs/articles/AMR_files/figure-html/plot 4-1.png differ
diff --git a/docs/articles/AMR_files/figure-html/plot 5-1.png b/docs/articles/AMR_files/figure-html/plot 5-1.png
index 713cb2ad..98f1c4be 100644
Binary files a/docs/articles/AMR_files/figure-html/plot 5-1.png and b/docs/articles/AMR_files/figure-html/plot 5-1.png differ
diff --git a/docs/articles/EUCAST.html b/docs/articles/EUCAST.html
index e3ed41b0..81461b30 100644
--- a/docs/articles/EUCAST.html
+++ b/docs/articles/EUCAST.html
@@ -40,7 +40,7 @@
The data set looks like this now:
head(my_TB_data)
# rifampicin isoniazid gatifloxacin ethambutol pyrazinamide moxifloxacin
-# 1 S S R S R S
-# 2 R R R S S R
-# 3 S S S S S S
-# 4 S S S S S R
-# 5 R R S S R R
-# 6 R S S R S S
+# 1 S S R R S R
+# 2 R S S S S R
+# 3 S S S S I I
+# 4 S I R I I S
+# 5 S R S R R R
+# 6 R R R R S S
# kanamycin
# 1 S
-# 2 S
-# 3 S
-# 4 R
+# 2 R
+# 3 R
+# 4 S
# 5 R
# 6 I
We can now add the interpretation of MDR-TB to our data set:
@@ -285,40 +285,40 @@ Unique: 5In the table above, all measurements are shown in milliseconds (thousands of seconds). A value of 5 milliseconds means it can determine 200 input values per second. It case of 100 milliseconds, this is only 10 input values per second. The second input is the only one that has to be looked up thoroughly. All the others are known codes (the first one is a WHONET code) or common laboratory codes, or common full organism names like the last one. Full organism names are always preferred.
To achieve this speed, the as.mo
function also takes into account the prevalence of human pathogenic microorganisms. The downside is of course that less prevalent microorganisms will be determined less fast. See this example for the ID of Thermus islandicus (B_THERMS_ISL
), a bug probably never found before in humans:
T.islandicus <- microbenchmark(as.mo("theisl"),
@@ -243,12 +243,12 @@
print(T.islandicus, unit = "ms", signif = 2)
# Unit: milliseconds
# expr min lq mean median uq max neval
-# as.mo("theisl") 470 470 500 500 520 530 10
-# as.mo("THEISL") 470 470 500 510 520 520 10
-# as.mo("T. islandicus") 75 75 82 76 76 130 10
-# as.mo("T. islandicus") 74 75 90 75 91 150 10
-# as.mo("Thermus islandicus") 73 74 90 74 120 130 10
That takes 8.3 times as much time on average. A value of 100 milliseconds means it can only determine ~10 different input values per second. We can conclude that looking up arbitrary codes of less prevalent microorganisms is the worst way to go, in terms of calculation performance. Full names (like Thermus islandicus) are almost fast - these are the most probable input from most data sets.
+# as.mo("theisl") 470 470 500 520 520 520 10 +# as.mo("THEISL") 470 470 490 470 520 520 10 +# as.mo("T. islandicus") 74 74 79 74 75 120 10 +# as.mo("T. islandicus") 74 74 92 74 120 120 10 +# as.mo("Thermus islandicus") 73 73 80 73 73 140 10 +That takes 8.1 times as much time on average. A value of 100 milliseconds means it can only determine ~10 different input values per second. We can conclude that looking up arbitrary codes of less prevalent microorganisms is the worst way to go, in terms of calculation performance. Full names (like Thermus islandicus) are almost fast - these are the most probable input from most data sets.
In the figure below, we compare Escherichia coli (which is very common) with Prevotella brevis (which is moderately common) and with Thermus islandicus (which is very uncommon):
par(mar = c(5, 16, 4, 2)) # set more space for left margin text (16)
@@ -294,8 +294,8 @@
print(run_it, unit = "ms", signif = 3)
# Unit: milliseconds
# expr min lq mean median uq max neval
-# mo_fullname(x) 630 675 709 693 765 858 10
So transforming 500,000 values (!!) of 50 unique values only takes 0.69 seconds (692 ms). You only lose time on your unique input values.
+# mo_fullname(x) 637 674 745 720 764 936 10 +So transforming 500,000 values (!!) of 50 unique values only takes 0.72 seconds (720 ms). You only lose time on your unique input values.
So going from mo_fullname("Staphylococcus aureus")
to "Staphylococcus aureus"
takes 0.0017 seconds - it doesn’t even start calculating if the result would be the same as the expected resulting value. That goes for all helper functions:
So going from mo_fullname("Staphylococcus aureus")
to "Staphylococcus aureus"
takes 0.0018 seconds - it doesn’t even start calculating if the result would be the same as the expected resulting value. That goes for all helper functions:
run_it <- microbenchmark(A = mo_species("aureus"),
B = mo_genus("Staphylococcus"),
C = mo_fullname("Staphylococcus aureus"),
@@ -324,14 +324,14 @@
print(run_it, unit = "ms", signif = 3)
# Unit: milliseconds
# expr min lq mean median uq max neval
-# A 0.422 0.441 0.483 0.452 0.483 0.675 10
-# B 0.434 0.512 0.549 0.562 0.593 0.638 10
-# C 1.340 1.530 1.670 1.680 1.860 2.030 10
-# D 0.443 0.471 0.552 0.580 0.611 0.645 10
-# E 0.358 0.398 0.465 0.473 0.516 0.554 10
-# F 0.366 0.397 0.441 0.436 0.485 0.512 10
-# G 0.348 0.462 0.485 0.484 0.517 0.615 10
-# H 0.194 0.262 0.295 0.288 0.327 0.403 10
Of course, when running mo_phylum("Firmicutes")
the function has zero knowledge about the actual microorganism, namely S. aureus. But since the result would be "Firmicutes"
too, there is no point in calculating the result. And because this package ‘knows’ all phyla of all known bacteria (according to the Catalogue of Life), it can just return the initial value immediately.
Currently supported are German, Dutch, Spanish, Italian, French and Portuguese.
diff --git a/docs/articles/benchmarks_files/figure-html/unnamed-chunk-5-1.png b/docs/articles/benchmarks_files/figure-html/unnamed-chunk-5-1.png index 8c759f38..1644365b 100644 Binary files a/docs/articles/benchmarks_files/figure-html/unnamed-chunk-5-1.png and b/docs/articles/benchmarks_files/figure-html/unnamed-chunk-5-1.png differ diff --git a/docs/articles/freq.html b/docs/articles/freq.html index e235c3d2..fe9e6433 100644 --- a/docs/articles/freq.html +++ b/docs/articles/freq.html @@ -40,7 +40,7 @@ @@ -199,7 +199,7 @@freq.Rmd
Frequency tables (or frequency distributions) are summaries of the distribution of values in a sample. With the freq
function, you can create univariate frequency tables. Multiple variables will be pasted into one variable, so it forces a univariate distribution. We take the septic_patients
dataset (included in this AMR package) as example.
Frequency tables (or frequency distributions) are summaries of the distribution of values in a sample. With the freq()
function, you can create univariate frequency tables. Multiple variables will be pasted into one variable, so it forces a univariate distribution. We take the septic_patients
dataset (included in this AMR package) as example.
To only show and quickly review the content of one variable, you can just select this variable in various ways. Let’s say we want to get the frequencies of the gender
variable of the septic_patients
dataset:
# Any of these will work:
+# freq(septic_patients$gender)
+# freq(septic_patients[, "gender"])
+
+# Using tidyverse:
+# septic_patients$gender %>% freq()
+# septic_patients[, "gender"] %>% freq()
+# septic_patients %>% freq("gender")
+
+# Probably the fastest and easiest:
+septic_patients %>% freq(gender)
Frequency table of gender
from a data.frame
(2,000 x 49)
Class: character
Length: 2,000 (of which NA: 0 = 0.00%)
@@ -489,8 +499,10 @@ Outliers: 15 (unique count: 12)
Mean
Standard deviation
Coefficient of variation (CV), the standard deviation divided by the mean
Five numbers of Tukey (min, Q1, median, Q3, max)
Coefficient of quartile variation (CQV, sometimes called coefficient of dispersion), calculated as (Q3 - Q1) / (Q3 + Q1) using quantile with type = 6
as quantile algorithm to comply with SPSS standards
Mean absolute deviation (MAD), the median of the absolute deviations from the median - a more robust statistic than the standard deviation
Five numbers of Tukey, namely: the minimum, Q1, median, Q3 and maximum
Interquartile range (IQR), the distance between Q1 and Q3
Coefficient of quartile variation (CQV, sometimes called coefficient of dispersion), calculated as (Q3 - Q1) / (Q3 + Q1) using quantile()
with type = 6
as quantile algorithm to comply with SPSS standards
Outliers (total count and unique count)
So for example, the above frequency table quickly shows the median age of patients being 74.
@@ -498,7 +510,7 @@ Outliers: 15 (unique count: 12)To sort frequencies of factors on factor level instead of item count, use the sort.count
parameter.
To sort frequencies of factors on their levels instead of item count, use the sort.count
parameter.
sort.count
is TRUE
by default. Compare this default behaviour…
… with this, where items are now sorted on count:
+… to this, where items are now sorted on factor levels:
Frequency table of hospital_id
from a data.frame
(2,000 x 49)
All classes will be printed into the header (default is FALSE
when using markdown like this document). Variables with the new rsi
class of this AMR package are actually ordered factors and have three classes (look at Class
in the header):
All classes will be printed into the header. Variables with the new rsi
class of this AMR package are actually ordered factors and have three classes (look at Class
in the header):
Frequency table of AMX
from a data.frame
(2,000 x 49)
Frequency table of AMX
from a data.frame
(2,000 x 49)
Class: factor > ordered > rsi (numeric)
-Length: 2,000 (of which NA: 771 = 38.55%)
-Levels: 3: S < I < R
-Unique: 4
Drug: Amoxicillin (AMX, J01CA04)
-Group: Beta-lactams/penicillins
-%SI: 44.43%
- | Item | -Count | -Percent | -Cum. Count | -Cum. Percent | -
---|---|---|---|---|---|
1 | -(NA) | -771 | -38.6% | -771 | -38.6% | -
2 | -R | -683 | -34.2% | -1,454 | -72.7% | -
3 | -S | -543 | -27.2% | -1,997 | -99.8% | -
4 | -I | -3 | -0.2% | -2,000 | -100.0% | -
row.names
A frequency table shows row indices. To remove them, use row.names = FALSE
:
Frequency table of hospital_id
from a data.frame
(2,000 x 49)
Class: factor (numeric)
Length: 2,000 (of which NA: 0 = 0.00%)
@@ -902,58 +860,22 @@ Unique: 4
markdown
The markdown
parameter is TRUE
at default in non-interactive sessions, like in reports created with R Markdown. This will always print all rows, unless nmax
is set.
Frequency table of hospital_id
from a data.frame
(2,000 x 49)
Class: factor (numeric)
-Length: 2,000 (of which NA: 0 = 0.00%)
-Levels: 4: A, B, C, D
-Unique: 4
- | Item | -Count | -Percent | -Cum. Count | -Cum. Percent | -
---|---|---|---|---|---|
1 | -D | -762 | -38.1% | -762 | -38.1% | -
2 | -B | -663 | -33.2% | -1,425 | -71.2% | -
3 | -A | -321 | -16.0% | -1,746 | -87.3% | -
4 | -C | -254 | -12.7% | -2,000 | -100.0% | -
The markdown
parameter is TRUE
at default in non-interactive sessions, like in reports created with R Markdown. This will always print all rows, unless nmax
is set. Without markdown (like in regular R), a frequency table would print like:
septic_patients %>%
+ freq(hospital_id, markdown = FALSE)
+# Frequency table of `hospital_id` from a data.frame (2,000 x 49)
+#
+# Class: factor (numeric)
+# Length: 2,000 (of which NA: 0 = 0.00%)
+# Levels: 4: A, B, C, D
+# Unique: 4
+#
+# Item Count Percent Cum. Count Cum. Percent
+# --- ----- ------ -------- ----------- -------------
+# 1 D 762 38.1% 762 38.1%
+# 2 B 663 33.2% 1,425 71.2%
+# 3 A 321 16.0% 1,746 87.3%
+# 4 C 254 12.7% 2,000 100.0%