AE 10: Scraping multiple pages of articles from the Cornell Review
Suggested answers
Packages
We will use the following packages in this application exercise.
- tidyverse: For data import, wrangling, and visualization.
- rvest: For scraping HTML files.
- robotstxt: For verifying if we can scrape a website.
Part 1 - Data scraping
See the code below stored in iterate-cornell-review.R
.
# load packages
library(tidyverse)
library(rvest)
library(robotstxt)
# check that we can scrape data from the cornell review
paths_allowed("https://www.thecornellreview.org/")
# read the first page
page <- read_html("https://www.thecornellreview.org/")
# extract desired components
titles <- html_elements(x = page, css = "#main .read-title a") |>
html_text2()
authors <- html_elements(x = page, css = "#main .byline a") |>
html_text2()
article_dates <- html_elements(x = page, css = "#main .posts-date") |>
html_text2()
topics <- html_elements(x = page, css = "#main .cat-links") |>
html_text2()
abstracts <- html_elements(x = page, css = ".post-description") |>
html_text2()
post_urls <- html_elements(x = page, css = ".aft-readmore") |>
html_attr(name = "href")
# create a tibble with this data
review_raw <- tibble(
title = titles,
author = authors,
date = article_dates,
topic = topics,
description = abstracts,
url = post_urls
)
# clean up the data
review <- review_raw |>
mutate(
date = mdy(date),
description = str_remove(string = description, pattern = "\nRead More")
)
######## write a for loop to scrape the first 10 pages
scrape_results <- vector(mode = "list", length = 10)
for(page_num in 1:length(scrape_results)) {
# print a message to keep track of where we are in the iteration
message(str_glue("Scraping page {page_num}"))
# pause for a couple of seconds to prevent rapid HTTP requests
Sys.sleep(2)
# create url
url <- str_glue("https://www.thecornellreview.org/page/{page_num}/")
# read the first page
page <- read_html(url)
# extract desired components
titles <- html_elements(x = page, css = "#main .read-title a") |>
html_text2()
authors <- html_elements(x = page, css = "#main .byline a") |>
html_text2()
article_dates <- html_elements(x = page, css = "#main .posts-date") |>
html_text2()
topics <- html_elements(x = page, css = "#main .cat-links") |>
html_text2()
abstracts <- html_elements(x = page, css = ".post-description") |>
html_text2()
post_urls <- html_elements(x = page, css = ".aft-readmore") |>
html_attr(name = "href")
# create a tibble with this data
review_raw <- tibble(
title = titles,
author = authors,
date = article_dates,
topic = topics,
description = abstracts,
url = post_urls
)
# clean up the data
review <- review_raw |>
mutate(
date = mdy(date),
description = str_remove(string = description, pattern = "\nRead More")
)
# store in list output
scrape_results[[page_num]] <- review
}
# collapse list of data frames to a single data frame
scrape_df <- list_rbind(x = scrape_results)
######## write a function to scrape a single page and use a map() function
######## to iterate over the first ten pages
# convert to a function
scrape_review <- function(url){
# pause for a couple of seconds to prevent rapid HTTP requests
Sys.sleep(2)
# read the first page
page <- read_html(url)
# extract desired components
titles <- html_elements(x = page, css = "#main .read-title a") |>
html_text2()
authors <- html_elements(x = page, css = "#main .byline a") |>
html_text2()
article_dates <- html_elements(x = page, css = "#main .posts-date") |>
html_text2()
topics <- html_elements(x = page, css = "#main .cat-links") |>
html_text2()
abstracts <- html_elements(x = page, css = ".post-description") |>
html_text2()
post_urls <- html_elements(x = page, css = ".aft-readmore") |>
html_attr(name = "href")
# create a tibble with this data
review_raw <- tibble(
title = titles,
author = authors,
date = article_dates,
topic = topics,
description = abstracts,
url = post_urls
)
# clean up the data
review <- review_raw |>
mutate(
date = mdy(date),
description = str_remove(string = description, pattern = "\nRead More")
)
# export the resulting data frame
return(review)
}
# test function
## page 1
scrape_review(url = "https://www.thecornellreview.org/page/1/")
## page 2
scrape_review(url = "https://www.thecornellreview.org/page/2/")
## page 3
scrape_review(url = "https://www.thecornellreview.org/page/3/")
# create a vector of URLs
page_nums <- 1:10
cr_urls <- str_glue("https://www.thecornellreview.org/page/{page_nums}/")
cr_urls
# map function over URLs
cr_reviews <- map(.x = cr_urls, .f = scrape_review, .progress = TRUE) |>
list_rbind()
# write data
write_csv(x = cr_reviews, file = "data/cornell-review-all.csv")
Part 2 - Data analysis
Demo: Import the scraped data set.
cr_reviews <- read_csv(file = "data/cornell-review-all.csv")
Rows: 100 Columns: 6
── Column specification ────────────────────────────────────────────────────────
Delimiter: ","
chr (5): title, author, topic, description, url
date (1): date
ℹ Use `spec()` to retrieve the full column specification for this data.
ℹ Specify the column types or set `show_col_types = FALSE` to quiet this message.
cr_reviews
# A tibble: 100 × 6
title author date topic description url
<chr> <chr> <date> <chr> <chr> <chr>
1 Playing the Race Card Revie… 2024-10-07 "Cam… CML and BS… http…
2 Should Joel Malina Be Fired? Revie… 2024-10-07 "Bey… Cornell’s … http…
3 Cornell Drops in 2025 FIRE Free Sp… Revie… 2024-10-03 "Cam… Each year,… http…
4 Interim Expressive Activity Policy… Revie… 2024-10-02 "Cor… On October… http…
5 Daryl Davis To Speak on Race Relat… Revie… 2024-10-01 "Cam… Daryl Davi… http…
6 Happy 100th Birthday, President Ca… Revie… 2024-10-01 "Bey… President … http…
7 Kavita Bala Named Cornell Provost Revie… 2024-09-25 "Cam… On Septemb… http…
8 Ithaca Labor News Revie… 2024-09-25 "Ith… Here are t… http…
9 CML Realizes It Overstepped Social… Revie… 2024-09-25 "Cam… On Wednesd… http…
10 Cornell Republicans to Host Ben Sh… Revie… 2024-09-24 "Ith… On Monday,… http…
# ℹ 90 more rows
Demo: Who are the most prolific authors?
cr_reviews |>
# adjust order of authors so they appear from most to least frequent
mutate(author = fct_infreq(f = author) |>
fct_rev()) |>
# horizontal bar chart
ggplot(mapping = aes(y = author)) +
geom_bar()
Demo: What topics does The Cornell Review write about?
Not super helpful. Each article can have multiple topics. What is the syntax for this column?
cr_reviews |>
select(topic)
# A tibble: 100 × 1
topic
<chr>
1 "Campus"
2 "Beyond Cayuga's Waters"
3 "Campus"
4 "Cornell Politics"
5 "Campus"
6 "Beyond Cayuga's Waters\nUncategorized"
7 "Campus"
8 "Ithaca"
9 "Campus"
10 "Ithaca\nPolitics"
# ℹ 90 more rows
Each topic is separated by a "\n"
. Since the number of topics varies for each article, we should separate_longer_delim()
this column. Instead we can use a stringr function to split them into distinct character strings.
cr_reviews |>
separate_longer_delim(
cols = topic,
delim = "\n"
)
# A tibble: 133 × 6
title author date topic description url
<chr> <chr> <date> <chr> <chr> <chr>
1 Playing the Race Card Revie… 2024-10-07 Camp… CML and BS… http…
2 Should Joel Malina Be Fired? Revie… 2024-10-07 Beyo… Cornell’s … http…
3 Cornell Drops in 2025 FIRE Free Sp… Revie… 2024-10-03 Camp… Each year,… http…
4 Interim Expressive Activity Policy… Revie… 2024-10-02 Corn… On October… http…
5 Daryl Davis To Speak on Race Relat… Revie… 2024-10-01 Camp… Daryl Davi… http…
6 Happy 100th Birthday, President Ca… Revie… 2024-10-01 Beyo… President … http…
7 Happy 100th Birthday, President Ca… Revie… 2024-10-01 Unca… President … http…
8 Kavita Bala Named Cornell Provost Revie… 2024-09-25 Camp… On Septemb… http…
9 Ithaca Labor News Revie… 2024-09-25 Itha… Here are t… http…
10 CML Realizes It Overstepped Social… Revie… 2024-09-25 Camp… On Wednesd… http…
# ℹ 123 more rows
Notice the data frame now has additional rows. The unit of analysis is now an article-topic combination, rather than one-row-per-article. Not entirely a tidy structure, but necessary to construct a chart to visualize topic frequency.
cr_reviews |>
separate_longer_delim(
cols = topic,
delim = "\n"
) |>
ggplot(mapping = aes(y = topic)) +
geom_bar()
Let’s clean this up like the previous chart.
cr_reviews |>
separate_longer_delim(
cols = topic,
delim = "\n"
) |>
mutate(topic = fct_infreq(f = topic) |>
fct_rev()) |>
ggplot(mapping = aes(y = topic)) +
geom_bar()
sessioninfo::session_info()
─ Session info ───────────────────────────────────────────────────────────────
setting value
version R version 4.4.1 (2024-06-14)
os macOS Sonoma 14.6.1
system aarch64, darwin20
ui X11
language (EN)
collate en_US.UTF-8
ctype en_US.UTF-8
tz America/New_York
date 2024-10-09
pandoc 3.4 @ /usr/local/bin/ (via rmarkdown)
─ Packages ───────────────────────────────────────────────────────────────────
! package * version date (UTC) lib source
P bit 4.0.5 2022-11-15 [?] CRAN (R 4.3.0)
P bit64 4.0.5 2020-08-30 [?] CRAN (R 4.3.0)
cli 3.6.3 2024-06-21 [1] RSPM (R 4.4.0)
P colorspace 2.1-0 2023-01-23 [?] CRAN (R 4.3.0)
P crayon 1.5.3 2024-06-20 [?] CRAN (R 4.4.0)
P digest 0.6.35 2024-03-11 [?] CRAN (R 4.3.1)
P dplyr * 1.1.4 2023-11-17 [?] CRAN (R 4.3.1)
P evaluate 0.24.0 2024-06-10 [?] CRAN (R 4.4.0)
P fansi 1.0.6 2023-12-08 [?] CRAN (R 4.3.1)
P farver 2.1.2 2024-05-13 [?] CRAN (R 4.3.3)
P fastmap 1.2.0 2024-05-15 [?] CRAN (R 4.4.0)
P forcats * 1.0.0 2023-01-29 [?] CRAN (R 4.3.0)
P generics 0.1.3 2022-07-05 [?] CRAN (R 4.3.0)
P ggplot2 * 3.5.1 2024-04-23 [?] CRAN (R 4.3.1)
P glue 1.7.0 2024-01-09 [?] CRAN (R 4.3.1)
P gtable 0.3.5 2024-04-22 [?] CRAN (R 4.3.1)
P here 1.0.1 2020-12-13 [?] CRAN (R 4.3.0)
P hms 1.1.3 2023-03-21 [?] CRAN (R 4.3.0)
P htmltools 0.5.8.1 2024-04-04 [?] CRAN (R 4.3.1)
P htmlwidgets 1.6.4 2023-12-06 [?] CRAN (R 4.3.1)
P httr 1.4.7 2023-08-15 [?] CRAN (R 4.3.0)
P jsonlite 1.8.8 2023-12-04 [?] CRAN (R 4.3.1)
P knitr 1.47 2024-05-29 [?] CRAN (R 4.4.0)
P labeling 0.4.3 2023-08-29 [?] CRAN (R 4.3.0)
P lifecycle 1.0.4 2023-11-07 [?] CRAN (R 4.3.1)
P lubridate * 1.9.3 2023-09-27 [?] CRAN (R 4.3.1)
P magrittr 2.0.3 2022-03-30 [?] CRAN (R 4.3.0)
P munsell 0.5.1 2024-04-01 [?] CRAN (R 4.3.1)
P pillar 1.9.0 2023-03-22 [?] CRAN (R 4.3.0)
P pkgconfig 2.0.3 2019-09-22 [?] CRAN (R 4.3.0)
P purrr * 1.0.2 2023-08-10 [?] CRAN (R 4.3.0)
P R6 2.5.1 2021-08-19 [?] CRAN (R 4.3.0)
P readr * 2.1.5 2024-01-10 [?] CRAN (R 4.3.1)
renv 1.0.7 2024-04-11 [1] CRAN (R 4.4.0)
P rlang 1.1.4 2024-06-04 [?] CRAN (R 4.3.3)
P rmarkdown 2.27 2024-05-17 [?] CRAN (R 4.4.0)
P robotstxt * 0.7.13 2020-09-03 [?] RSPM
P rprojroot 2.0.4 2023-11-05 [?] CRAN (R 4.3.1)
P rstudioapi 0.16.0 2024-03-24 [?] CRAN (R 4.3.1)
P rvest * 1.0.4 2024-02-12 [?] CRAN (R 4.3.1)
P scales 1.3.0.9000 2024-05-07 [?] Github (r-lib/scales@c0f79d3)
P sessioninfo 1.2.2 2021-12-06 [?] CRAN (R 4.3.0)
P stringi 1.8.4 2024-05-06 [?] CRAN (R 4.3.1)
P stringr * 1.5.1 2023-11-14 [?] CRAN (R 4.3.1)
P tibble * 3.2.1 2023-03-20 [?] CRAN (R 4.3.0)
P tidyr * 1.3.1 2024-01-24 [?] CRAN (R 4.3.1)
P tidyselect 1.2.1 2024-03-11 [?] CRAN (R 4.3.1)
P tidyverse * 2.0.0 2023-02-22 [?] CRAN (R 4.3.0)
P timechange 0.3.0 2024-01-18 [?] CRAN (R 4.3.1)
P tzdb 0.4.0 2023-05-12 [?] CRAN (R 4.3.0)
P utf8 1.2.4 2023-10-22 [?] CRAN (R 4.3.1)
P vctrs 0.6.5 2023-12-01 [?] CRAN (R 4.3.1)
P vroom 1.6.5 2023-12-05 [?] CRAN (R 4.3.1)
withr 3.0.1 2024-07-31 [1] RSPM (R 4.4.0)
P xfun 0.45 2024-06-16 [?] CRAN (R 4.4.0)
P xml2 1.3.6 2023-12-04 [?] CRAN (R 4.3.1)
P yaml 2.3.8 2023-12-11 [?] CRAN (R 4.3.1)
[1] /Users/soltoffbc/Projects/info-5001/course-site/renv/library/macos/R-4.4/aarch64-apple-darwin20
[2] /Users/soltoffbc/Library/Caches/org.R-project.R/R/renv/sandbox/macos/R-4.4/aarch64-apple-darwin20/f7156815
P ── Loaded and on-disk path mismatch.
──────────────────────────────────────────────────────────────────────────────