mirror of
https://github.com/msberends/AMR.git
synced 2026-01-11 23:14:34 +01:00
(v3.0.1.9014) try-again fix
This commit is contained in:
@@ -1,5 +1,5 @@
|
|||||||
Package: AMR
|
Package: AMR
|
||||||
Version: 3.0.1.9013
|
Version: 3.0.1.9014
|
||||||
Date: 2026-01-07
|
Date: 2026-01-07
|
||||||
Title: Antimicrobial Resistance Data Analysis
|
Title: Antimicrobial Resistance Data Analysis
|
||||||
Description: Functions to simplify and standardise antimicrobial resistance (AMR)
|
Description: Functions to simplify and standardise antimicrobial resistance (AMR)
|
||||||
|
|||||||
2
NEWS.md
2
NEWS.md
@@ -1,4 +1,4 @@
|
|||||||
# AMR 3.0.1.9013
|
# AMR 3.0.1.9014
|
||||||
|
|
||||||
### New
|
### New
|
||||||
* Integration with the **tidymodels** framework to allow seamless use of SIR, MIC and disk data in modelling pipelines via `recipes`
|
* Integration with the **tidymodels** framework to allow seamless use of SIR, MIC and disk data in modelling pipelines via `recipes`
|
||||||
|
|||||||
@@ -263,6 +263,30 @@ translate_into_language <- function(from,
|
|||||||
df_trans$pattern[df_trans$regular_expr == TRUE] <- gsub("$$", "$", df_trans$pattern[df_trans$regular_expr == TRUE], fixed = TRUE)
|
df_trans$pattern[df_trans$regular_expr == TRUE] <- gsub("$$", "$", df_trans$pattern[df_trans$regular_expr == TRUE], fixed = TRUE)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# non-regex part
|
||||||
|
translate_tokens <- function(tokens) {
|
||||||
|
patterns <- df_trans$pattern[df_trans$regular_expr == FALSE]
|
||||||
|
replacements <- df_trans[[lang]][df_trans$regular_expr == FALSE]
|
||||||
|
matches <- match(tokens, patterns)
|
||||||
|
tokens[!is.na(matches)] <- replacements[matches[!is.na(matches)]]
|
||||||
|
tokens
|
||||||
|
}
|
||||||
|
from_unique_translated[order(nchar(from_unique_translated), decreasing = TRUE)] <- vapply(
|
||||||
|
FUN.VALUE = character(1),
|
||||||
|
USE.NAMES = FALSE,
|
||||||
|
from_unique_translated[order(nchar(from_unique_translated), decreasing = TRUE)],
|
||||||
|
function(x) {
|
||||||
|
delimiters <- "[ /()]"
|
||||||
|
split_regex <- paste0("(?<=", delimiters, ")|(?=", delimiters, ")")
|
||||||
|
tokens <- strsplit(x, split_regex, perl = TRUE)[[1]]
|
||||||
|
tokens <- translate_tokens(tokens)
|
||||||
|
out <- paste(tokens, collapse = "")
|
||||||
|
# also try with those tokens
|
||||||
|
out <- translate_tokens(out)
|
||||||
|
out
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
df_trans_regex <- df_trans[which(df_trans$regular_expr == TRUE), ]
|
df_trans_regex <- df_trans[which(df_trans$regular_expr == TRUE), ]
|
||||||
# regex part
|
# regex part
|
||||||
lapply(
|
lapply(
|
||||||
@@ -279,26 +303,6 @@ translate_into_language <- function(from,
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
# non-regex part
|
|
||||||
translate_tokens <- function(tokens) {
|
|
||||||
patterns <- df_trans$pattern[df_trans$regular_expr == FALSE]
|
|
||||||
replacements <- df_trans[[lang]][df_trans$regular_expr == FALSE]
|
|
||||||
matches <- match(tokens, patterns)
|
|
||||||
tokens[!is.na(matches)] <- replacements[matches[!is.na(matches)]]
|
|
||||||
tokens
|
|
||||||
}
|
|
||||||
from_unique_translated <- vapply(
|
|
||||||
FUN.VALUE = character(1),
|
|
||||||
USE.NAMES = FALSE,
|
|
||||||
from_unique_translated,
|
|
||||||
function(x) {
|
|
||||||
delimiters <- "[ /()]"
|
|
||||||
split_regex <- paste0("(?<=", delimiters, ")|(?=", delimiters, ")")
|
|
||||||
tokens <- strsplit(x, split_regex, perl = TRUE)[[1]]
|
|
||||||
tokens <- translate_tokens(tokens)
|
|
||||||
paste(tokens, collapse = "")
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
# force UTF-8 for diacritics
|
# force UTF-8 for diacritics
|
||||||
from_unique_translated <- enc2utf8(from_unique_translated)
|
from_unique_translated <- enc2utf8(from_unique_translated)
|
||||||
|
|||||||
Reference in New Issue
Block a user