Skip to content

Commit 24c602f

Browse files
committed
flatten() -> unwrap()
1 parent d8bed50 commit 24c602f

33 files changed

+152
-120
lines changed

DESCRIPTION

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
Package: batchtools
22
Title: Tools for Computation on Batch Systems
3-
Version: 0.9.7-9002
3+
Version: 0.9.8
44
Authors@R: c(
55
person("Michel", "Lang", NULL, "michellang@gmail.com",
66
role = c("cre", "aut"), comment = c(ORCID = "0000-0001-9754-0393")),

NAMESPACE

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,7 @@ export(sweepRegistry)
118118
export(syncRegistry)
119119
export(testJob)
120120
export(ujoin)
121+
export(unwrap)
121122
export(waitForJobs)
122123
import(checkmate)
123124
import(data.table)

NEWS.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,8 @@
22

33
* Renamed column "memory" in the status table to "mem.used" to avoid name clashes with the resource specification.
44
* Exported function `assertRegistry()`.
5+
* New function `unwrap()` as alias to `flatten()`.
6+
The latter causes a name clash with package `purrr` and will be deprecated in a future version.
57
* Registries now contain a unique hash which is updated each time the registry is altered.
68
Can be utilized to invalidate caches, e.g. the cache of knitr.
79

R/ExperimentRegistry.R

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
#' summarizeExperiments(reg = tmp)
3030
#' summarizeExperiments(reg = tmp, by = c("problem", "algorithm", "n"))
3131
#' ids = findExperiments(prob.pars = (n == 50), reg = tmp)
32-
#' print(flatten(getJobPars(ids, reg = tmp)))
32+
#' print(unwrap(getJobPars(ids, reg = tmp)))
3333
#'
3434
#' # Submit jobs
3535
#' submitJobs(reg = tmp)
@@ -42,8 +42,8 @@
4242
#' # Join info table with all results and calculate mean of results
4343
#' # grouped by n and algorithm
4444
#' ids = findDone(reg = tmp)
45-
#' pars = flatten(getJobPars(ids, reg = tmp))
46-
#' results = flatten(reduceResultsDataTable(ids, fun = function(res) list(res = res), reg = tmp))
45+
#' pars = unwrap(getJobPars(ids, reg = tmp))
46+
#' results = unwrap(reduceResultsDataTable(ids, fun = function(res) list(res = res), reg = tmp))
4747
#' tab = ljoin(pars, results)
4848
#' tab[, list(mres = mean(res)), by = c("n", "algorithm")]
4949
makeExperimentRegistry = function(file.dir = "registry", work.dir = getwd(), conf.file = findConfFile(), packages = character(0L), namespaces = character(0L),

R/addExperiments.R

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@
6666
#'
6767
#' # check what has been created
6868
#' summarizeExperiments(reg = tmp)
69-
#' flatten(getJobPars(reg = tmp))
69+
#' unwrap(getJobPars(reg = tmp))
7070
addExperiments = function(prob.designs = NULL, algo.designs = NULL, repls = 1L, combine = "crossprod", reg = getDefaultRegistry()) {
7171
convertDesigns = function(type, designs, keywords) {
7272
check.factors = default.stringsAsFactors()

R/batchMapResults.R

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,11 +33,11 @@
3333
#' waitForJobs(reg = target)
3434
#'
3535
#' # Map old to new ids. First, get a table with results and parameters
36-
#' results = flatten(rjoin(getJobPars(reg = target), reduceResultsDataTable(reg = target)))
36+
#' results = unwrap(rjoin(getJobPars(reg = target), reduceResultsDataTable(reg = target)))
3737
#' print(results)
3838
#'
3939
#' # Parameter '..id' points to job.id in 'source'. Use a inner join to combine:
40-
#' ijoin(results, flatten(reduceResultsDataTable(reg = tmp)), by = c("..id" = "job.id"))
40+
#' ijoin(results, unwrap(reduceResultsDataTable(reg = tmp)), by = c("..id" = "job.id"))
4141
batchMapResults = function(fun, ids = NULL, ..., more.args = list(), target, source = getDefaultRegistry()) {
4242
assertRegistry(source, sync = TRUE)
4343
assertRegistry(target, writeable = TRUE, sync = TRUE)

R/clusterFunctionsOpenLava.R

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
#' @title ClusterFunctions for OpenLava
22
#'
33
#' @description
4-
#' Cluster functions for OpenLava (\url{http://www.openlava.org/}).
4+
#' Cluster functions for OpenLava.
55
#'
66
#' Job files are created based on the brew template \code{template}. This
77
#' file is processed with brew and then submitted to the queue using the

R/estimateRuntimes.R

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@
3535
#' addExperiments(algo.designs = list(ncol = CJ(x = 1:50, y = letters[1:5])), reg = tmp)
3636
#'
3737
#' # We use the job parameters to predict runtimes
38-
#' tab = flatten(getJobPars(reg = tmp))
38+
#' tab = unwrap(getJobPars(reg = tmp))
3939
#'
4040
#' # First we need to submit some jobs so that the forest can train on some data.
4141
#' # Thus, we just sample some jobs from the registry while grouping by factor variables.

R/reduceResults.R

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,7 @@ reduceResults = function(fun, ids = NULL, init, ..., reg = getDefaultRegistry())
174174
#' getJobPars(reg = tmp),
175175
#' reduceResultsDataTable(reg = tmp, fun = function(x) list(res = x))
176176
#' )
177-
#' flatten(res, sep = ".")
177+
#' unwrap(res, sep = ".")
178178
reduceResultsList = function(ids = NULL, fun = NULL, ..., missing.val, reg = getDefaultRegistry()) {
179179
assertRegistry(reg, sync = TRUE)
180180
assertFunction(fun, null.ok = TRUE)

R/submitJobs.R

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -110,10 +110,10 @@
110110
#'
111111
#' # Retrive information about memory, combine with parameters
112112
#' info = ijoin(getJobStatus(reg = tmp)[, .(job.id, mem.used)], getJobPars(reg = tmp))
113-
#' print(flatten(info))
113+
#' print(unwrap(info))
114114
#'
115115
#' # Combine job info with results -> each job is aggregated using mean()
116-
#' flatten(ijoin(info, reduceResultsDataTable(fun = function(res) list(res = mean(res)), reg = tmp)))
116+
#' unwrap(ijoin(info, reduceResultsDataTable(fun = function(res) list(res = mean(res)), reg = tmp)))
117117
#'
118118
#' ### Example 2: Multicore execution on the slave
119119
#' tmp = makeRegistry(file.dir = NA, make.default = FALSE)

0 commit comments

Comments
 (0)