I have a list structure which represents a table being handed to me like this
> l = list(list(1, 4), list(2, 5), list(3, 6))
> str(l)
List of 3
$ :List of 2
..$ : num 1
..$ : num 4
$ :List of 2
..$ : num 2
..$ : num 5
$ :List of 2
..$ : num 3
..$ : num 6
And I'd like to convert it to this
> lt = list(x = c(1, 2, 3), y = c(4, 5, 6))
> str(lt)
List of 2
$ x: num [1:3] 1 2 3
$ y: num [1:3] 4 5 6
I've written a function that does it in a really simple manner which uses Reduce
, but I feel like there must be a smarter way to do it.
Any help appreciated, Thanks
Benchmarks
Thanks all! Much appreciated. Benchmarked the answers and picked the fastest for a larger test case:
f1 = function(l) {
k <- length(unlist(l)) / length(l)
lapply(seq_len(k), function(i) sapply(l, "[[", i))
}
f2 = function(l) {
n <- length(l[[1]])
split(unlist(l, use.names = FALSE), paste0("x", seq_len(n)))
}
f3 = function(l) {
split(do.call(cbind, lapply(l, unlist)), seq(unique(lengths(l))))
}
f4 = function(l) {
l %>%
purrr::transpose() %>%
map(unlist)
}
f5 = function(l) {
# bind lists together into a matrix (of lists)
temp <- Reduce(rbind, l)
# split unlisted values using indices of columns
split(unlist(temp), col(temp))
}
f6 = function(l) {
data.table::transpose(lapply(l, unlist))
}
microbenchmark::microbenchmark(
lapply = f1(l),
split_seq = f2(l),
unique = f3(l),
tidy = f4(l),
Reduce = f5(l),
dt = f6(l),
times = 10000
)
Unit: microseconds
expr min lq mean median uq max neval
lapply 165.057 179.6160 199.9383 186.2460 195.0005 4983.883 10000
split_seq 85.655 94.6820 107.5544 98.5725 104.1175 4609.378 10000
unique 144.908 159.6365 182.2863 165.9625 174.7485 3905.093 10000
tidy 99.547 122.8340 141.9482 129.3565 138.3005 8545.215 10000
Reduce 172.039 190.2235 216.3554 196.8965 206.8545 3652.939 10000
dt 98.072 106.6200 120.0749 110.0985 116.0950 3353.926 10000