Scrapes survey tables and performs sanitation to output tidy data

scrape_wahlrecht(
  address = "https://www.wahlrecht.de/umfragen/emnid.htm",
  parties = c("CDU", "SPD", "GRUENE", "FDP", "LINKE", "PIRATEN", "FW", "AFD",
    "SONSTIGE")
)

scrape_by(
  address = "https://www.wahlrecht.de/umfragen/landtage/bayern.htm",
  parties = c("CSU", "SPD", "GRUENE", "FDP", "LINKE", "PIRATEN", "FW", "AFD",
    "SONSTIGE")
)

scrape_rp(
  address = "https://www.wahlrecht.de/umfragen/landtage/rheinland-pfalz.htm",
  parties = c("CDU", "SPD", "GRUENE", "FDP", "LINKE", "AFD", "FW", "SONSTIGE"),
  ind_row_remove = -c(1:3)
)

scrape_ltw(
  address = "https://www.wahlrecht.de/umfragen/landtage/niedersachsen.htm",
  parties = c("CDU", "SPD", "GRUENE", "FDP", "LINKE", "PIRATEN", "FW", "AFD",
    "SONSTIGE"),
  ind_row_remove = -c(1:2)
)

Arguments

address

http-address from which tables should be scraped.

parties

A character vector containing names of parties to collapse.

ind_row_remove

Negative vector of rows that will be skipped at the beginning.

Examples

if (FALSE) { library(coalitions) library(dplyr) # select a polling agency from .pollster_df that should be scraped ... coalitions:::.pollster_df # ... here we choose Forsa address <- coalitions:::.pollster_df %>% filter(pollster == "forsa") %>% pull(address) scrape_wahlrecht(address = address) %>% slice(1:5) } if (FALSE) { # Niedersachsen scrape_ltw() %>% slice(1:5) # Hessen scrape_ltw("https://www.wahlrecht.de/umfragen/landtage/hessen.htm", ind_row_remove=-c(1)) %>% slice(1:5) }