Skip to content

Commit

Permalink
style: fix excess line lengths
Browse files Browse the repository at this point in the history
  • Loading branch information
elipousson committed Jan 4, 2024
1 parent f91e7ab commit e461958
Show file tree
Hide file tree
Showing 10 changed files with 117 additions and 60 deletions.
15 changes: 10 additions & 5 deletions R/acs_survey.R
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,11 @@ NULL
acs_survey_match <- function(survey = "acs5",
error_call = caller_env()) {
# See acs_surveys
arg_match0(survey, c("acs1", "acs3", "acs5", "acsse"), error_call = error_call)
arg_match0(
survey,
c("acs1", "acs3", "acs5", "acsse"),
error_call = error_call
)
}

#' @rdname acs_survey
Expand Down Expand Up @@ -119,10 +123,11 @@ acs_survey_ts <- function(survey = "acs5",
#' `year_start` variable which is the earliest year for a survey sample
#' specified by the survey parameter.
#' @export
acs_survey_label <- function(survey = "acs5",
year = 2022,
pattern = "{year_start}-{year} ACS {sample}-year Estimates",
prefix = "") {
acs_survey_label <- function(
survey = "acs5",
year = 2022,
pattern = "{year_start}-{year} ACS {sample}-year Estimates",
prefix = "") {
sample <- acs_survey_sample(survey)

year_start <- year - (as.integer(sample) - 1)
Expand Down
50 changes: 40 additions & 10 deletions R/collapse_acs_variables.R
Original file line number Diff line number Diff line change
Expand Up @@ -91,18 +91,34 @@ collapse_acs_variables <- function(data,
data <- dplyr::summarise(
data,
"{variable_col}" := list(unique(.data[[variable_col]])),
"{value_col}" := round(sum(.data[[value_col]], na.rm = na.rm), digits = digits),
"{value_col}" := round(
sum(.data[[value_col]], na.rm = na.rm),
digits = digits
),
"{moe_col}" := round(
tidycensus::moe_sum(.data[[moe_col]], estimate = .data[[value_col]], na.rm = na.rm),
tidycensus::moe_sum(
.data[[moe_col]],
estimate = .data[[value_col]],
na.rm = na.rm
),
digits = digits
),
"{perc_cols[[1]]}" := round(
sum(.data[[perc_cols[[1]]]], na.rm = na.rm),
digits = digits
),
"{perc_cols[[1]]}" := round(sum(.data[[perc_cols[[1]]]], na.rm = na.rm), digits = digits),
"{perc_cols[[2]]}" := round(
tidycensus::moe_sum(.data[[perc_cols[[2]]]], estimate = .data[[perc_cols[[1]]]], na.rm = na.rm),
tidycensus::moe_sum(
.data[[perc_cols[[2]]]],
estimate = .data[[perc_cols[[1]]]],
na.rm = na.rm
),
digits = digits
),
dplyr::across(
-any_of(c(name_col, label_col, variable_col, value_col, moe_col, perc_cols)),
-any_of(
c(name_col, label_col, variable_col, value_col, moe_col, perc_cols)
),
function(x) {
list(unique(x))
}
Expand All @@ -113,13 +129,22 @@ collapse_acs_variables <- function(data,
data <- dplyr::summarise(
data,
"{variable_col}" := list(unique(.data[[variable_col]])),
"{value_col}" := round(sum(.data[[value_col]], na.rm = na.rm), digits = digits),
"{value_col}" := round(
sum(.data[[value_col]], na.rm = na.rm),
digits = digits
),
"{moe_col}" := round(
tidycensus::moe_sum(.data[[moe_col]], estimate = .data[[value_col]], na.rm = na.rm),
tidycensus::moe_sum(
.data[[moe_col]],
estimate = .data[[value_col]],
na.rm = na.rm
),
digits = digits
),
dplyr::across(
-any_of(c(name_col, label_col, variable_col, value_col, moe_col)),
-any_of(
c(name_col, label_col, variable_col, value_col, moe_col)
),
function(x) {
list(unique(x))
}
Expand All @@ -133,9 +158,14 @@ collapse_acs_variables <- function(data,
# FIXME: This could be a weighted mean if some valid weight is included in
# the dataset. As is, these values may be invalid if the relative size of
# the collapsed groups is very different.
"{value_col}" := round(mean(.data[[value_col]], na.rm = na.rm), digits = digits),
"{value_col}" := round(
mean(.data[[value_col]], na.rm = na.rm),
digits = digits
),
dplyr::across(
-any_of(c(name_col, label_col, variable_col, value_col, moe_col)),
-any_of(
c(name_col, label_col, variable_col, value_col, moe_col)
),
function(x) {
list(unique(x))
}
Expand Down
6 changes: 3 additions & 3 deletions R/fmt_acs.R
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,9 @@ fmt_acs_county <- function(data,
columns = all_of(name_col),
...) {
if (is.null(state) && any(data[["geography"]] == "state")) {
state <- unique(
data[data[["geography"]] == "state", name_col]
)
state <- unique(
data[data[["geography"]] == "state", name_col]
)
}

pattern <- glue(pattern)
Expand Down
10 changes: 8 additions & 2 deletions R/get_acs_metadata.R
Original file line number Diff line number Diff line change
Expand Up @@ -125,9 +125,15 @@ label_acs_metadata <- function(data,
perc = TRUE,
geoid_col = "GEOID",
variable_col = "variable") {
data <- label_acs_table_metadata(data, survey, year, variable_col = variable_col)
data <- label_acs_table_metadata(
data, survey, year,
variable_col = variable_col
)

data <- label_acs_column_metadata(data, survey, year, variable_col = variable_col)
data <- label_acs_column_metadata(
data, survey, year,
variable_col = variable_col
)

if (perc && all(has_name(data, geoid_col))) {
data <- join_acs_percent(data, geoid_col = geoid_col)
Expand Down
5 changes: 3 additions & 2 deletions R/get_decennial_ts.R
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,10 @@ get_decennial_ts <- function(geography,
)

if (has_length(year, 1)) {
if (!all(year %in% c(2000, 2010, 2020))) {
allow_years <- c(2000, 2010, 2020)
if (!all(year %in% allow_years)) {
cli::cli_abort(
c("{.arg year} can't include any values other than {c(2000, 2010, 2020)}.",
c("{.arg year} can't include any values other than {allow_years}.",
"i" = "Try using NHGIS for earlier decennial Census data:
{.url https://www.nhgis.org/}"
)
Expand Down
6 changes: 5 additions & 1 deletion R/join_tigris_geometry.R
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,11 @@ join_tigris_geometry <- function(data = NULL,
\(geography, state, county) {
switch(geography,
"block" = tigris::blocks(state = state, county = county, ...),
"block group" = tigris::block_groups(state = state, county = county, ...),
"block group" = tigris::block_groups(
state = state,
county = county,
...
),
"tract" = tigris::tracts(state = state, county = county, ...),
"county" = tigris::counties(state = state, ...),
"state" = tigris::states(...)
Expand Down
2 changes: 1 addition & 1 deletion R/select_acs.R
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
#' county = "Baltimore city"
#' )
#'
#' select_acs(edu_data)
#' select_acs(edu_data)
#' }
#' }
#' @importFrom dplyr select
Expand Down
21 changes: 16 additions & 5 deletions R/xwalk.R
Original file line number Diff line number Diff line change
Expand Up @@ -195,8 +195,10 @@ make_area_xwalk <- function(area,
if (coverage && st_is_all_predicate(block_xwalk, area)) {
cli::cli_bullets(
c(
"!" = "All features in {.arg block_xwalk} already intersect with {.arg area}",
"*" = "Setting {.arg coverage} to {.code FALSE} to avoid inaccurate results."
"!" = "All features in {.arg block_xwalk} already
intersect with {.arg area}",
"*" = "Setting {.arg coverage} to {.code FALSE} to
avoid inaccurate results."
)
)

Expand Down Expand Up @@ -373,7 +375,8 @@ rbind_area_coverage <- function(area,
st_make_valid_coverage(block_xwalk, area),
error = function(cnd) {
cli_abort(
c("Valid spatial coverage for the area of {.arg block_xwalk} outside the {.arg area_xwalk} can't be created.",
c("Valid spatial coverage for the area of {.arg block_xwalk}
outside the {.arg area_xwalk} can't be created.",
"*" = "Set {.code coverage = FALSE} and try again."
),
parent = cnd,
Expand Down Expand Up @@ -524,7 +527,11 @@ summarise_weighted_sum <- function(data,
digits = digits
),
"{moe_col}" := round(
tidycensus::moe_sum(.data[[moe_col]], .data[[value_col]] * .data[[weight_col]]),
tidycensus::moe_sum(
moe = .data[[moe_col]],
estimate = .data[[value_col]] * .data[[weight_col]],
na.rm = na.rm
),
digits = digits
),
.by = all_of(c(name_col, variable_col))
Expand All @@ -544,7 +551,11 @@ summarise_weighted_mean <- function(data,
dplyr::summarise(
data,
"{value_col}" := round(
stats::weighted.mean(.data[[value_col]], w = .data[[weight_col]], na.rm = na.rm),
stats::weighted.mean(
x = .data[[value_col]],
w = .data[[weight_col]],
na.rm = na.rm
),
digits = digits
),
# FIXME: Explore options to calculate an interpolated MOE per this method:
Expand Down
60 changes: 30 additions & 30 deletions tests/testthat/test-filter_acs.R
Original file line number Diff line number Diff line change
@@ -1,38 +1,38 @@
test_that("filter_acs works", {
edu_data <- get_acs_geographies(
c("county", "state"),
table = "B15003",
state = "MD",
county = "Baltimore city"
)
edu_data <- get_acs_geographies(
c("county", "state"),
table = "B15003",
state = "MD",
county = "Baltimore city"
)

expect_s3_class(
filter_acs(edu_data, vars = "B15003_017"),
"data.frame"
)
expect_s3_class(
filter_acs(edu_data, vars = "B15003_017"),
"data.frame"
)

expect_s3_class(
filter_acs(edu_data, vars = 17),
"data.frame"
)
expect_s3_class(
filter_acs(edu_data, vars = 17),
"data.frame"
)

expect_s3_class(
filter_acs(edu_data, drop_vars = 1),
"data.frame"
)
expect_s3_class(
filter_acs(edu_data, drop_vars = 1),
"data.frame"
)

expect_s3_class(
filter_acs(edu_data, drop_vars = 1),
"data.frame"
)
expect_s3_class(
filter_acs(edu_data, drop_vars = 1),
"data.frame"
)

expect_s3_class(
filter_acs(edu_data, geography = "county"),
"data.frame"
)
expect_s3_class(
filter_acs(edu_data, geography = "county"),
"data.frame"
)

expect_s3_class(
filter_acs(edu_data, column = "Master's degree"),
"data.frame"
)
expect_s3_class(
filter_acs(edu_data, column = "Master's degree"),
"data.frame"
)
})
2 changes: 1 addition & 1 deletion tests/testthat/test-xwalk.R
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ test_that("xwalk functions work", {
expect_s3_class(block_xwalk, "sf")

area_xwalk <- make_area_xwalk(
area = sf::st_buffer(block_xwalk[1,], dist = 5000),
area = sf::st_buffer(block_xwalk[1, ], dist = 5000),
block_xwalk = block_xwalk
)

Expand Down

0 comments on commit e461958

Please sign in to comment.