mirror of
https://github.com/msberends/AMR.git
synced 2026-01-11 19:14:39 +01:00
(v3.0.1.9012) fix translations
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
Package: AMR
|
||||
Version: 3.0.1.9008
|
||||
Date: 2026-01-06
|
||||
Version: 3.0.1.9012
|
||||
Date: 2026-01-07
|
||||
Title: Antimicrobial Resistance Data Analysis
|
||||
Description: Functions to simplify and standardise antimicrobial resistance (AMR)
|
||||
data analysis and to work with microbial and antimicrobial properties by
|
||||
|
||||
2
NEWS.md
2
NEWS.md
@@ -1,4 +1,4 @@
|
||||
# AMR 3.0.1.9008
|
||||
# AMR 3.0.1.9012
|
||||
|
||||
### New
|
||||
* Integration with the **tidymodels** framework to allow seamless use of SIR, MIC and disk data in modelling pipelines via `recipes`
|
||||
|
||||
@@ -301,7 +301,7 @@ ab_info <- function(x, language = get_AMR_locale(), ...) {
|
||||
ab = as.character(x),
|
||||
cid = ab_cid(x),
|
||||
name = ab_name(x, language = language),
|
||||
group = ab_group(x, language = language),
|
||||
group = ab_group(x, language = language, all_groups = TRUE),
|
||||
atc = ab_atc(x),
|
||||
atc_group1 = ab_atc_group1(x, language = language),
|
||||
atc_group2 = ab_atc_group2(x, language = language),
|
||||
|
||||
@@ -279,29 +279,23 @@ translate_into_language <- function(from,
|
||||
}
|
||||
)
|
||||
# non-regex part
|
||||
translate_tokens <- function(tokens) {
|
||||
patterns <- df_trans$pattern[df_trans$regular_expr == FALSE]
|
||||
replacements <- df_trans[[lang]][df_trans$regular_expr == FALSE]
|
||||
matches <- match(tokens, patterns)
|
||||
tokens[!is.na(matches)] <- replacements[matches[!is.na(matches)]]
|
||||
tokens
|
||||
}
|
||||
from_unique_translated <- vapply(
|
||||
FUN.VALUE = character(1),
|
||||
USE.NAMES = FALSE,
|
||||
from_unique_translated,
|
||||
function(x) {
|
||||
words <- strsplit(x, " ", fixed = TRUE)[[1]]
|
||||
# print(words)
|
||||
for (i in seq_along(words)) {
|
||||
word_trans <- df_trans[[lang]][df_trans$regular_expr == FALSE][match(words[i], df_trans$pattern[df_trans$regular_expr == FALSE])]
|
||||
if (!is.na(word_trans)) {
|
||||
words[i] <- word_trans
|
||||
}
|
||||
}
|
||||
words <- paste(words, collapse = " ")
|
||||
words <- strsplit(x, "/", fixed = TRUE)[[1]]
|
||||
# print(words)
|
||||
for (i in seq_along(words)) {
|
||||
word_trans <- df_trans[[lang]][df_trans$regular_expr == FALSE][match(words[i], df_trans$pattern[df_trans$regular_expr == FALSE])]
|
||||
if (!is.na(word_trans)) {
|
||||
words[i] <- word_trans
|
||||
}
|
||||
}
|
||||
paste(words, collapse = " ")
|
||||
delimiters <- "[ /()]"
|
||||
split_regex <- paste0("(?<=", delimiters, ")|(?=", delimiters, ")")
|
||||
tokens <- strsplit(x, split_regex, perl = TRUE)[[1]]
|
||||
tokens <- translate_tokens(tokens)
|
||||
paste(tokens, collapse = "")
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user