Setup

clear global environment

rm(list = ls()); gc()

load libraries

library(tidyverse)
library(sf)
library(ggplot2)
library(gridExtra) # for multiplot of ggplots
library(patchwork) # for multiplotting of ggplots
library(flextable)
library(kableExtra)
library(INLA)
library(inlabru) # for gg function to plot mesh
library(rnaturalearth)
library(scales) # for log scaling ggplots
library(readxl)
#library(gifski) #depends on cargo being installed on machine

library(mgcv)
library(lattice); library(latticeExtra); library(grid); library(gridExtra);
library(mapdata); library(fields); library(ZIM)

library(viridis) # for spatial field ggplots

set model and paths

model_name <- "IM_ST10"
model_file <- "IM_ST10_no_priors_3RWsYear_9tmpknots_mesh_nonconvex_landboundary_outerbuffer_UTM_km_1407_1988_2023_.rds"

model_pred_name <- "IM_ST10_pred"
model_pred_file <- "IM_ST10_no_priors_pred_3RWsYear_9tmpknots_mesh_nonconvex_landboundary_outerbuffer_UTM_km_1407_1988_2023_.rds"

# get inla version
inlaV <- packageDescription("INLA")$Version

### relative paths
data_path <- "../data"
output_path <- "../output"
model_output_path <- file.path(output_path, "INLA_models", paste0( "INLAv" ,inlaV, "_nonconvex_landboundary_outbuffer_mesh"))
results_path <- file.path(output_path,"results",paste0("INLAv",inlaV,"_nonconvex_landboundary_outbuffer_mesh"))

#dir.create(results_path, recursive = TRUE)
tables_path <- file.path(results_path, "tables")
dir.create(tables_path, recursive = TRUE)
figures_path <- file.path(results_path, "figures")
dir.create(figures_path, recursive = TRUE)

Set date for files surveys

That was used as timestamp when saving dowloaded exchange date

dl_date <- "14_06_2024"

load data and get year range

Load data required for fitting INLA models:

  • model_data: processed data of Datras haul and catch survey data by size class for Raja clavata
  • NS_EEC_poly: polygons as simple_features for Greater North Sea study area
  • bathymetry_GNS: Bathymetry data for Greater North Sea study area for prediction grid
  • gear_efficiency: gear efficiency data for Raja clavata obtained from Walker et al. 2017: https://doi.org/10.1093/icesjms/fsw250
  • exploit_maturity_length_class_stats: data of exploitation_maturity mean weight, mean length, gear, efficiency
  • exploit_length_class_stats: data of exploitation mean weight, mean length, gear, efficiency
# save(model_data, bathymetry_GNS, NS_EEC_poly, RFA_UTM_km, NS_EEC_simpl_EUNIS_2021, NS_EEC_poly_simpl_UTM_km, exploit_maturity_length_class_stats, exploit_length_class_stats, gear_efficiency,  file = file.path(data_path,"data_RJC_INLA.RData"))
# To load the data again
load(file.path(data_path, "data_RJC_INLA.RData"))
catches_maturity_RJC <- model_data
min_data_yr <- min(catches_maturity_RJC$Year)
(max_data_yr <- max(catches_maturity_RJC$Year))

Set themes for plotting

# plot spatio-temporal posterior mean
mytheme <- theme_classic() +
  theme(axis.text = element_text(size = 7),
        axis.title = element_text(size = 8),
        legend.position = "bottom", 
        legend.text = element_text(size = 8),
        strip.text.x = element_text(size = 8), # adjust facet label size
        legend.key.width = unit(1, 'cm'),
        legend.key.size = unit(0.1, 'cm')
  ) 

mytheme_ext <- mytheme + 
  theme(strip.text.x = element_text(size = 8), # adjust facet label size
        legend.key.width = unit(2, 'cm'), 
        legend.key.size = unit(0.3, 'cm')
        )

mytheme_ts <- mytheme + 
  theme(panel.grid.major = element_line(size = (0.2), colour = "grey"),
       panel.border = element_rect(colour = "black", fill = NA, linewidth = 0.5),
       axis.line = element_line(linewidth = 0.1),
       axis.ticks = element_line(colour = "black", linewidth = 0.2),
       title = element_text(size=6),
       legend.margin=margin(c(0,0,0,0)),
       legend.box.margin=margin(c(-10,0,-10,0)))

load ICES areas (statistical areas, roundfisharea (RFAs)), ices rectangles

# load simplified NS_EEC statistical rectangles
# NS EEC SubAreaDiv poly
NS_EEC_poly <- NS_EEC_poly %>% st_union() %>% st_transform(crs = "+proj=utm +zone=31 +units=km") # transform to UTM 31N with km as units
NS_EEC_poly_simpl_UTM_km <- NS_EEC_poly_simpl_UTM_km %>% st_set_crs(3857) %>% st_transform(crs = "+proj=utm +zone=31 +units=km")  # transform to UTM 31N with km as units

#################################
#worldmap
world_sf_4326 <- ne_countries(scale = "medium") %>% st_as_sf() %>% st_make_valid()
world_sf <- ne_countries(scale = "medium") %>% st_as_sf() %>% st_transform(crs = "+proj=utm +zone=31 +units=km")

#################################
# load GNS countries polygons
GNS_bbox_4326 <- c(xmin = -5, xmax = 10, ymin = 48, ymax = 61.5)
GNS_countries_4326 <- ne_countries(continent = "Europe", scale = "medium") %>% st_as_sf() %>% st_make_valid() %>% st_crop(GNS_bbox_4326)

GNS_bbox <- c(xmin = 50, xmax = 1000, ymin = 5400, ymax = 7000)
GNS_countries_UTM_km <- ne_countries(continent = "Europe", scale = "medium") %>% st_as_sf() %>% st_transform(crs = "+proj=utm +zone=31 +units=km") %>% st_crop(GNS_bbox) # transform to UTM with units as km

#################################
#- roundfish polygons RFA_UTM_km obtained via https://gis.ices.dk/sf/?widget=datras
#count of distinct hauls 
catches_maturity_RJC %>% distinct(HaulCode, .keep_all = TRUE) %>% count()

Summarize data

count of distinct hauls with R. clavata

#count of distinct hauls with R. clavata
catches_maturity_RJC %>%
  group_by(HaulCode, .keep_all = TRUE) %>%
  summarize(totC = sum(NoPerHaul)) %>%
  count(isC = totC > 0 ) %>% 
  ungroup() %>%
  summarize(isC = sum(isC)) 

count of distinct hauls per survey

#count of distinct hauls per survey
catches_maturity_RJC %>% distinct(HaulCode, .keep_all = TRUE) %>% count(Survey)

count of distinct hauls with R. clavata by size class

#count of distinct hauls with R. clavata by size class
catches_maturity_RJC %>%
  group_by( Exploit_Maturity, .keep_all = TRUE) %>%
  count(isC = NoPerHaul > 0 ) 

Table S1: Count of hauls per substrate types

#count of distinct hauls per substrate type
haul_count_per_substrate <- catches_maturity_RJC %>% distinct(HaulCode, .keep_all = TRUE) %>% count(Substrate, name = "Number of Hauls"); haul_count_per_substrate

# reorder levels of fSubstrate
substrate_levels <- c("Coarse substrate",
                      "Rock or other hard substrata",
                      "Mixed sediment",
                      "Sand",
                      "Muddy sand",
                      "Sandy mud or Muddy sand",
                      "Sandy mud",
                      "Fine mud",
                      "Worm reefs",
                      "Sabellaria spinulosa reefs")

haul_count_per_substrate <- haul_count_per_substrate %>%
  arrange(match(Substrate, substrate_levels))

# calculate total area per substrate
total_area_substrate <- NS_EEC_simpl_EUNIS_2021;
total_area_substrate$area_Substrate <- NS_EEC_simpl_EUNIS_2021 %>% st_area(Substrate)
total_area_substrate <- total_area_substrate %>% st_drop_geometry() %>% select(Substrate, area_Substrate) %>% group_by(Substrate) %>% summarize('Total Area' = round(sum(area_Substrate)))
total_area_substrate <- total_area_substrate %>% mutate(Substrate = str_remove_all(Substrate, "[[:punct:]]"))
  
# # save as table
(haul_count_total_area_per_substrate <- haul_count_per_substrate %>%
  left_join(total_area_substrate) %>%
  select(Substrate, 'Total Area', 'Number of Hauls') %>%
  mutate('Total Area (km2)' = units::drop_units(`Total Area`)) %>%
  select(-`Total Area`) %>% relocate('Number of Hauls', .after = last_col()))

haul_count_total_area_per_substrate %>%
  flextable() %>%
  theme_booktabs() %>%
  flextable::font(fontname = "Verdana", part = "all") %>%
  flextable::fontsize(size = 8, part = "all") %>%
  flextable::bold(i = 1, bold = TRUE, part = "header") %>%
  #flextable::hline(part = "body", border = officer::fp_border(color = "gray")) %>% # add horizontal borders to body
  #FitFlextableToPage(ft = ., pgwidth = 8.5) %>%
  save_as_docx(path = file.path(tables_path, paste0("haul_count_per_substrate_table.docx")))

Table S2: Count of hauls per habitat types

#count of distinct hauls per substrate type
(haul_count_per_habitat <- catches_maturity_RJC %>%
  distinct(HaulCode, .keep_all = TRUE) %>%
  count(EUNIS2019C, name = "Number of Hauls"))

haul_count_per_habitat <- haul_count_per_habitat %>%
  rename("Habitat" = EUNIS2019C)

# calculate total area per habitat
total_area_habitat <- NS_EEC_simpl_EUNIS_2021;
total_area_habitat$area_EUNIS2019C <- NS_EEC_simpl_EUNIS_2021 %>% st_area(EUNIS2019C)
total_area_habitat <- total_area_habitat %>% st_drop_geometry() %>% select(EUNIS2019C, area_EUNIS2019C) %>% group_by(EUNIS2019C) %>% summarize('Total Area' = round(sum(area_EUNIS2019C)))
  
## save as table
(haul_count_total_area_per_habitat <- haul_count_per_habitat %>% left_join(total_area_habitat %>% rename("Habitat" = EUNIS2019C)) %>% select(Habitat, 'Total Area', 'Number of Hauls') %>%
  mutate('Total Area (km2)' = units::drop_units(`Total Area`)) %>% select(-`Total Area`) %>% relocate('Number of Hauls', .after = last_col())) 

haul_count_total_area_per_habitat %>%
  flextable() %>%
  theme_booktabs() %>%
  flextable::font(fontname = "Verdana", part = "all") %>%
  flextable::fontsize(size = 8, part = "all") %>%
  flextable::bold(i = 1, bold = TRUE, part = "header") %>%
  #FitFlextableToPage(ft = ., pgwidth = 8.5) %>%
  save_as_docx(path = file.path(tables_path, "haul_count_per_habitat_table.docx"))

additional functions to sumamrise INLA model results

# function to autofit flextable to desired word page width in cm
FitFlextableToPage <- function(ft, pgwidth = 17){ # desired pgwidth in cm
  ft_out <- ft %>% autofit()
  ft_out <- width(ft_out, width = dim(ft_out)$widths * pgwidth * 0.3937 /(flextable_dim(ft_out)$widths))
  return(ft_out)
}

# function to calculate LCPO
# calculate LCPO
slcpo <- function(m, na.rm = TRUE) {
  - sum(log(m$cpo$cpo), na.rm = na.rm)
}

Gear efficiency Figure S6 & Table S3

# create flextable
exploit_maturity_length_class_stats_mod <- exploit_maturity_length_class_stats %>%
  mutate(Exploit_Maturity = case_when(Exploit_Maturity == "Unexploited_Immature" ~ "Size class 1",
                                      Exploit_Maturity == "Exploited_Immature" ~ "Size class 2",
                                      Exploit_Maturity == "Exploited_Mature" ~ "Size class 3")) %>% # recode exploitation_maturity
  mutate(Exploit_Maturity = factor(Exploit_Maturity, levels = paste("Size class", 1:3))) %>%
  mutate(mean_weight = mean_weight/1e3 %>% round(3)) %>%
  relocate(Exploit_Maturity, mean_length, Gear, Efficiency, mean_weight) %>%
  dplyr::rename("Size class" = "Exploit_Maturity",
         "Average Length (cm)" = "mean_length",
         "Weight (kg)" = "mean_weight")

flextable gear efficiency by size class

exploit_maturity_length_class_stats_flex <- exploit_maturity_length_class_stats_mod %>%
  flextable::flextable() %>%
    flextable::theme_vanilla() %>%
    flextable::font(fontname = "Verdana", part = "all") %>%
    flextable::fontsize(size = 7, part = "all") %>%
    flextable::colformat_double(j = c(4), digits = 4) %>%
    flextable::colformat_double(j = c(5), digits = 3) %>%
    flextable::bold(i = 1, bold = TRUE, part = "header")
      
exploit_maturity_length_class_stats_flex %>% 
  FitFlextableToPage(ft = ., pgwidth = 17) %>%
  flextable::save_as_docx(path =  file.path(results_path, "exploit_maturity_length_class_stats_flex.docx"))

figures of gear efficienty by size class and survey

size_class_cpal <- c("Size class 1" = "#F8766D", "Size class 2" = "#00BA38" , "Size class 3" = "#619CFF")

# figure gear efficiency
gear_efficiency_plot <- gear_efficiency %>% 
  filter(Gear %in% c("BEAM", "GOV")) %>% 
  ggplot() + 
  geom_line(aes(x = Length, y = Efficiency)) + 
  geom_vline(data = exploit_maturity_length_class_stats_mod %>% distinct(`Size class`, `Average Length (cm)`) , aes(xintercept = `Average Length (cm)`, colour = `Size class`), lty = "dashed", linewidth = 1.5) +
  scale_colour_manual(values = size_class_cpal) + 
  facet_wrap(~Gear) +
  xlab("Average Length (cm)") +
  ylim(c(0, 1)) +
  theme_bw() +
  theme(axis.text = element_text(size = 16),
        axis.title = element_text(size = 16),
        strip.text = element_text(size = 16),
        legend.position = "bottom", 
        legend.title = element_text(size = 16),
        legend.key.size = unit(2, 'cm'),
        legend.text=element_text(size = 16)
  )

gear_efficiency_plot

ggsave(plot = gear_efficiency_plot, 
       filename = file.path(results_path, paste("gear_efficiency_size_class_plot.tiff", sep = "_")), 
       width = 17, 
       height = 8, 
       dpi = 500,
       compression ="lzw")

Model comparison

Load RJC Exploit_Maturity INLA models

# list files in model path
list.files(file.path(model_output_path))

# all INLA model files & model setup files
IM_models <- tibble(IM_file = list.files(file.path(model_output_path))) %>% 
  mutate(model_setup_file = paste0("model_setup_", IM_file)) %>%
  filter(str_starts(IM_file, "IM\\w+\\d+_no_priors")) %>% 
  mutate(model_name = case_when(str_detect(IM_file, "pred") == TRUE ~ paste0(str_extract(IM_file, "IM_[A-Z]+[0-9]+"), "_pred"),
                                TRUE ~ str_extract(IM_file, "IM_[A-Z]+[0-9]+"))) #str_subset("IM\\w+\\d+_no_priors")
IM_models

model_list <- list()
model_setup_list <- list()
for (model in IM_models$IM_file){
  model_name <- IM_models %>% filter(IM_file == model) %>% pull(model_name)
  model_list[[model_name]] <- readRDS(file.path(model_output_path, model))
  model_setup_list[[model_name]] <- readRDS(file.path(model_output_path, filter(IM_models, IM_file == model) %>% pull(model_setup_file) ))
}

Model comparison

model evaluation

models_tbl <- tibble("model_name" = NA, "DIC" = NA, "WAIC" = NA, "LCPO" = NA)
for(model_name in names(model_list)){
  n <- which(names(model_list) == model_name)
  IM <- model_list[[model_name]]
  dic <- IM$dic$dic
  waic <- IM$waic$waic
  lcpo <- slcpo(IM)
  tmp_tib <- tibble("model_name" = model_name, "DIC" = dic, "WAIC" = waic, "LCPO" = lcpo)
  if(n == 1){
    models_tbl <- tmp_tib }
  else{
    models_tbl <- models_tbl %>% add_row(tmp_tib)
  }
}

models_tbl

write_rds(models_tbl, file = file.path(results_path, "model_evaluation_tbl.rds"))
#models_tbl <- read_rds(file = file.path(results_path, "model_evaluation_tbl.rds"))

model descriptions

# add model description column
model_description <- tibble(Model = NULL,
                            ModelID = NULL,
                            Type = NULL,
                            Description = NULL,
                            Formula = NULL,
                            Variables = NULL, 
                            'Fixed Effects' = NULL,
                            'Random Effects' = NULL) %>%
  ##Non-Spatial models
  bind_rows(tibble(Model = 1, 
                   ModelID = "IM_NS1", 
                   Type = "Non-Spatial", 
                   Description = "Non-Spatial: 3RWs Year Size class", 
                   Formula = paste(deparse((model_setup_list[[ModelID]]$Formula)), collapse = ""), 
                   Variables = paste(c("Size class", "Survey", "Year"), collapse = " + "),
                   'Fixed Effects' = NULL,
                   'Random Effects' = NULL)) %>%
  bind_rows(tibble(Model = 2, 
                   ModelID = "IM_NS2", 
                   Type = "Non-Spatial", 
                   Description = "Non-Spatial: 3RWs Year Size class, 3 RWs Depth-Size class", 
                   Formula = paste(deparse((model_setup_list[[ModelID]]$Formula)), collapse = ""), 
                   Variables = paste(c("Size class", "Survey", "Year", "Depth"), collapse = " + "),
                   'Fixed Effects' = NULL,
                   'Random Effects' = paste(c( "Depth: random walk for each size class"), collapse = ", ")
  )) %>%
  bind_rows(tibble(Model = 3, 
                   ModelID = "IM_NS3", 
                   Type = "Non-Spatial", 
                   Description = "Non-Spatial: 3RWs Year Size class, 3 RWs Depth-Size class, Habitat", 
                   Formula = paste(deparse((model_setup_list[[ModelID]]$Formula)), collapse = ""), 
                   Variables = paste(c("Size class", "Survey", "Year", "Depth", "Habitat"), collapse = " + "),
                   'Fixed Effects' = paste(c("Habitat"), collapse = " + "),
                   'Random Effects' = paste(c( "Depth: random walk for each size class"), collapse = ", ")
  )) %>%
  bind_rows(tibble(Model = 4,
                   ModelID = "IM_NS4",
                   Type = "Non-Spatial", 
                   Description = "Non-Spatial: 3RWs Year Size class, 3 RWs Depth-Size class, Substrate",
                   Formula = paste(deparse((model_setup_list[[ModelID]]$Formula)), collapse = ""), 
                   Variables = paste(c("Size class", "Survey", "Year", "Depth", "Substrate"), collapse = " + "), 
                   'Fixed Effects' = paste(c("Substrate"), collapse = " + "),
                   'Random Effects' = paste(c( "Depth: random walk for each size class"), collapse = ", ")
  )) %>%
  ## Spatial models
  bind_rows(tibble(Model = 5, 
                   ModelID = "IM_S5", 
                   Type = "Spatial", 
                   Description = "Spatial: 3RWs Year Size class", 
                   Formula = paste(deparse((model_setup_list[[ModelID]]$Formula)), collapse = ""), 
                   Variables = paste(c("Size class", "Survey", "Year"), collapse = " + "),
                   'Fixed Effects' = NULL,
                   'Random Effects' = NULL)) %>%
  bind_rows(tibble(Model = 6, 
                   ModelID = "IM_S6", 
                   Type = "Spatial", 
                   Description = "Spatial: 3RWs Year Size class, 3 RWs Depth-Size class", 
                   Formula = paste(deparse((model_setup_list[[ModelID]]$Formula)), collapse = ""), 
                   Variables = paste(c("Size class", "Survey", "Year", "Depth"), collapse = " + "),
                   'Fixed Effects' = NULL,
                   'Random Effects' = paste(c( "Depth: random walk for each size class"), collapse = ", ")
  )) %>%
  bind_rows(tibble(Model = 7, 
                   ModelID = "IM_S7", 
                   Type = "Spatial", 
                   Description = "Spatial: 3RWs Year Size class, 3 RWs Depth-Size class, Habitat", 
                   Formula = paste(deparse((model_setup_list[[ModelID]]$Formula)), collapse = ""), 
                   Variables = paste(c("Size class", "Survey", "Year", "Depth", "Habitat"), collapse = " + "), 
                   'Fixed Effects' = paste(c("Habitat"), collapse = " + "),
                   'Random Effects' = paste(c("Depth: random walk for each size class"), collapse = ", ")
  )) %>%
  bind_rows(tibble(Model = 8,
                   ModelID = "IM_S8",
                   Type = "Spatial", 
                   Description = "Spatial: 3RWs Year Size class, 3 RWs Depth-Size class, Substrate", 
                   Formula = paste(deparse((model_setup_list[[ModelID]]$Formula)), collapse = ""), 
                   Variables = paste(c("Size class", "Survey", "Year", "Depth", "Substrate"), collapse = " + "), 
                   'Fixed Effects' = paste(c("Substrate"), collapse = " + "),
                   'Random Effects' = paste(c("Depth: random walk for each size class"), collapse = ", ")
  )) %>%
  ## Spatio-temporal models
  bind_rows(tibble(Model = 9, 
                   ModelID = "IM_ST9", 
                   Type = "Spatio-temporal", 
                   Description = "Spatio-temporal: 3RWs Year Size class", 
                   Formula = paste(deparse((model_setup_list[[ModelID]]$Formula)), collapse = ""), 
                   Variables = paste(c("Size class", "Survey", "Year"), collapse = " + "),
                   'Fixed Effects' = NULL,
                   'Random Effects' = NULL)) %>%
  bind_rows(tibble(Model = 10,
                   ModelID = "IM_ST10", 
                   Type = "Spatio-temporal",
                   Description = "Spatio-temporal: 3RWs Year Size class, 3 RWs Depth-Size class",
                   Formula = paste(deparse((model_setup_list[[ModelID]]$Formula)), collapse = ""), 
                   Variables = paste(c("Size class", "Survey", "Depth"), collapse = " + "),
                   'Fixed Effects' = NULL,
                   'Random Effects' = paste(c( "Depth: random walk for each size class"), collapse = ", ")
  )) %>%
  bind_rows(tibble(Model = 11,
                   ModelID = "IM_ST11", 
                   Type = "Spatio-temporal", 
                   Description = "Spatio-temporal: 3RWs Year Size class, 3 RWs Depth-Size class, Habitat",
                   Formula = paste(deparse((model_setup_list[[ModelID]]$Formula)), collapse = ""), 
                   Variables = paste(c("Size class", "Survey", "Year", "Depth", "Habitat"), collapse = " + "),
                   'Fixed Effects' = paste(c("Habitat"), collapse = " + "),
                   'Random Effects' = paste(c( "Depth: random walk for each size class"), collapse = ", ")
  )) %>%
  bind_rows(tibble(Model = 12,
                   ModelID = "IM_ST12", 
                   Type = "Spatio-temporal", 
                   Description = "Spatio-temporal: 3RWs Year Size class, 3 RWs Depth-Size class, Substrate",
                   Formula = paste(deparse((model_setup_list[[ModelID]]$Formula)), collapse = ""), 
                   Variables = paste(c("Size class", "Survey", "Year","logSweptAreakm2", "Depth", "Substrate"), collapse = " + "), 
                   'Fixed Effects' = paste(c("Substrate"), collapse = " + "),
                   'Random Effects' = paste(c( "Depth: random walk for each size class"), collapse = ", ")
  ))


models_tbl2 <- models_tbl %>% rename(ModelID = model_name) %>% 
  right_join(model_description, by = "ModelID") %>% 
  relocate(Model, ModelID, Type, 'Fixed Effects', 'Random Effects', DIC, WAIC, LCPO, Variables, Description, Formula) %>% 
  rename('Depth RW' = 'Random Effects') %>% 
  mutate("Depth RW" = case_when(`Depth RW` == "Depth: random walk for each size class" ~ "X", TRUE ~ NA)) %>%
  arrange(Model)


write_rds(models_tbl2, file = file.path(results_path, "model_tbl2.rds"))

Table1 model evaluations table

# models_tbl2<- read_rds(file = file.path(results_path, "model_tbl2.rds"))

# here is an example using flextable to generate a configured word table
model_evaluation_flex <-  models_tbl2 %>% 
  select(Type, Model, `Fixed Effects`, `Depth RW`, WAIC, LCPO) %>%
  mutate(`Fixed Effects` = `Fixed Effects` %>% str_replace_all("\\ \\+\\ ", "\\,\\ "),
         `Depth RW` = `Depth RW` %>% str_replace_all("\\ \\+\\ ", "\\,\\ ")) %>% # replace "+" with ","in fixed and Depth RW
  as_grouped_data(groups = c("Type"), columns = NULL) %>% #
  flextable::as_flextable() %>%
  flextable::compose(
    i = ~ !is.na(Type), # when var_group not NA
    j = c('Model', 'Fixed Effects', 'Depth RW', 'WAIC', 'LCPO'), # on column "var"
    # create a paragraph containing a chunk containing value of `var_group`
    value = as_paragraph(as_chunk(Type))) %>%
  flextable::theme_vanilla() %>%
  flextable::font(fontname = "Verdana", part = "all") %>%
  flextable::fontsize(size = 8, part = "all") %>%
  flextable::colformat_double(j = c(4:5), digits = 0) %>% # print 0 digits after decimal for DIC WAIC and LCPO column
  flextable::bold(i = 13, bold = TRUE, part = "body") %>%
  flextable::bold(j = 1, i = ~ !is.na(Type), bold = TRUE, part = "body" ) %>%
  flextable::bg(i = c(1, 6, 11), j = NULL, "lightgrey", part = "body", source = j) %>%  # set background of Model type rows
  flextable::align(i = c(1, 6, 11), align = "left", part = "body") %>%
  flextable::align(i = c(2:5, 7:10, 12:15), align = "center", part = "body") %>%
  flextable::align(i = 1, align = "center", part = "head")


#model_evaluation_flex

# save flextable table
model_evaluation_flex %>% 
  width(j = c(1), width = 3.87 , unit = "cm") %>%
  width(j = c(2), width = 3.86 , unit = "cm") %>%
  width(j = c(3), width = 3.09 , unit = "cm") %>%
  width(j = c(4, 5), width = 3.09 , unit = "cm") %>%
  #FitFlextableToPage(ft = ., pgwidth = 17) %>%
  flextable::save_as_docx(path = file.path(results_path, "tables", "Table1_RJC_INLA_model_evaluations.docx"))

Load best model data

IM <- read_rds(file.path(model_output_path, model_file))

summary(IM)
model_setup <- read_rds(file.path(model_output_path, paste("model_setup", model_file, sep = "_")))

Results best model IM_ST10, prediction & simulation

Table 2 & 3 summary of best model

Investigate spatial effect, table of estimates of parameters of the spatial effect

spde <- model_setup$spde
SpatField.w <- inla.spde2.result(inla = IM, name = "w", spde = spde, do.transfer = TRUE)

# table of kappa and sigma estimates
sum_kappa <- as_tibble(inla.zmarginal(SpatField.w$marginals.kappa[[1]])) %>%
  mutate("Estimated Parameter" = "$\\kappa$", "Species" = "Raja clavata", "Model" = model_name) %>%
  relocate("Estimated Parameter", Species, Model)
sum_sigma_u <- as_tibble(inla.zmarginal(SpatField.w$marginals.variance.nominal[[1]])) %>%
  mutate("Estimated Parameter" = "$\\sigma_u$", "Species" = "Raja clavata", "Model" = model_name) %>%
  relocate("Estimated Parameter", Species, Model)
sum_range <- as_tibble(inla.zmarginal(SpatField.w$marginals.range.nominal[[1]])) %>% mutate("Estimated Parameter" = "range", "Species" = "Raja clavata", "Model" = model_name) %>% relocate("Estimated Parameter", Species, Model)
sum_spatial_latent_effects <- bind_rows(sum_kappa, sum_sigma_u) %>%
  rename(Mean = mean, SD = sd, Q0.025 = `quant0.025`, Q0.5 = `quant0.5`, Q0.975 = `quant0.975`) %>%
  select(Species, Model, "Estimated Parameter", Mean, SD, Q0.025, Q0.5, Q0.975)

sum_spatial_latent_effects

plot posterior of fixed effects

marg_fixed <- IM$marginals.fixed
sum_fixed <- IM$summary.fixed

plots_marginal_fixed <- list()
for(i in 1:length(marg_fixed)){
  fixed_covariate <- names(marg_fixed)[i]
  if(fixed_covariate == "Intercept"){title <- "Intercept (BTS + Size class 1)"
  } else{
    title <- fixed_covariate
  }
  
  # summary of fixed covariate
  sum_fixed_covariate <- sum_fixed[fixed_covariate, ] %>% as_tibble()
  
  plots_marginal_fixed[[fixed_covariate]] <- marg_fixed[[fixed_covariate]] %>% as_tibble() %>%
    ggplot(aes(x = x, y = y)) +
    geom_line() +
    geom_vline(xintercept = c(sum_fixed_covariate$`0.025quant`, sum_fixed_covariate$`0.975quant`), lty = "dashed") +
    theme_bw() +
    ggtitle(title)
}

grid.arrange(grobs = plots_marginal_fixed)

Table 2: Summary tables of fixed effect

summary_fixed <- IM$summary.fixed %>%
  cbind("Predictor" = rownames(.), "Species" = "Raja clavata", "Model" = model_name) %>%
  as_tibble() %>%
  rename(Mean = mean, SD = sd, Q0.025 = `0.025quant`, Q0.5 = `0.5quant`, Q0.975 = `0.975quant`) %>%
  select(Species, Model, Predictor, Mean, SD, Q0.025, Q0.5, Q0.975) %>%
  arrange(Predictor)

# rename Predictor variables
summary_fixed <- summary_fixed %>% 
  mutate(Predictor = replace(Predictor, Predictor == "Exploit_Maturityexploited_immature", "Size class 2")) %>%
  mutate(Predictor = replace(Predictor, Predictor == "Exploit_Maturityexploited_mature", "Size class 3")) %>%
  mutate(Predictor = replace(Predictor, Predictor == "fSurveyFRCGFS", "Survey: FR-CGFS")) %>%
  mutate(Predictor = replace(Predictor, Predictor == "fSurveyNSIBTS", "Survey: NS-IBTS")) %>% 
  mutate(Predictor = replace(Predictor, Predictor == "fSurveyFRCGFSExploit_Maturityexploited_immature", "Survey: FR-CGFS; Size class 2")) %>%
  mutate(Predictor = replace(Predictor, Predictor == "fSurveyFRCGFSExploit_Maturityexploited_mature", "Survey: FR-CGFS; Size class 3")) %>% 
  mutate(Predictor = replace(Predictor, Predictor == "fSurveyNSIBTSExploit_Maturityexploited_immature", "Survey: NS-IBTS; Size class 2")) %>%
mutate(Predictor = replace(Predictor, Predictor == "fSurveyNSIBTSExploit_Maturityexploited_mature", "Survey: NS-IBTS; Size class 3")) %>% 
  rename("Parameter" = Predictor)

# arrange by  Parameter of choice
summary_fixed <- summary_fixed %>% 
  mutate(Parameter = factor(Parameter, levels = c("Intercept",
                                                  "Size class 2",
                                                  "Size class 3",
                                                  "Survey: FR-CGFS",
                                                  "Survey: NS-IBTS",
                                                  "Survey: FR-CGFS; Size class 2",
                                                  "Survey: FR-CGFS; Size class 3",
                                                  "Survey: NS-IBTS; Size class 2",
                                                  "Survey: NS-IBTS; Size class 3"
  ))) %>% 
  arrange(Parameter)

summary_fixed_flex <- summary_fixed %>% select(-c(Species, Model, Q0.5)) %>%
  flextable() %>%
  theme_booktabs() %>%
  flextable::font(fontname = "Verdana", part = "all") %>%
  flextable::fontsize(size = 8, part = "all") %>%
  colformat_double(digits = 3)

# save flextable table (gives error so commented out)
summary_fixed_flex %>%
 # FitFlextableToPage(ft = ., pgwidth = 17) %>%
  save_as_docx(path = file.path(tables_path, paste0("Table2_", model_name, "summary_fixed_parameters_table.docx")))

Table 3: Summary tables of hyperparameters

summary_hyper <- IM$summary.hyperpar %>% cbind("Description" = rownames(.), "Species" = "Raja clavata", "Model" = model_name) %>% as.tibble() %>% rename(Mean = mean, SD = sd, Q0.025 = `0.025quant`, Q0.5 = `0.5quant`, Q0.975 = `0.975quant`) %>% select(Species, Model, Description, Mean, SD, Q0.025, Q0.5, Q0.975)

# rename Description variables
summary_hyper <- summary_hyper %>% 
  mutate(Description = replace(Description, Description == "Precision for Year_Unexploited_Immature", "Precision for Year: Size class 1")) %>%
  mutate(Description = replace(Description, Description == "Precision for Year_Exploited_Immature", "Precision for Year: Size class 2")) %>%
  mutate(Description = replace(Description, Description == "Precision for Year_Exploited_Mature", "Precision for Year: Size class 3")) %>%
  mutate(Description = replace(Description, Description == "Precision for Depth_Unexploited_Immature", "Precision for Depth: Size class 1")) %>%
  mutate(Description = replace(Description, Description == "Precision for Depth_Exploited_Immature", "Precision for Depth: Size class 2")) %>%
  mutate(Description = replace(Description, Description == "Precision for Depth_Exploited_Mature", "Precision for Depth: Size class 3")) %>%
  mutate(Description = replace(Description, Description == "Theta1 for w", "Mean of $\\upsilon_{i,t}$")) %>%
  mutate(Description = replace(Description, Description == "Theta2 for w", "SD of $\\upsilon_{i,t}$")) %>%
  mutate(Description = replace(Description, Description == "GroupRho for w", "Temporal Correlation between consecutive time steps for $\\upsilon_{i,t}$"))  

summary_hyper <- summary_hyper %>%
  mutate(Hyperparameter = c("k",
                            "$\\rho_{\\eta_{t, m = 1}}$",
                            "$\\rho_{\\eta_{t, m = 2}}$",
                            "$\\rho_{\\eta_{t, m = 3}}$",
                            "$\\rho_{\\eta_{d, m = 1}}$",
                            "$\\rho_{\\eta_{d,, m = 2}}$",
                            "$\\rho_{\\eta_{d,, m = 3}}$",
                            "$\\theta_1$",
                            "$\\theta_2$",
                            "$\\rho_t$")) %>%
  relocate(Hyperparameter, .before = Mean)

summary_hyper_flex <- bind_rows(rename(summary_hyper, "Estimated Parameter" = Hyperparameter),
                                sum_spatial_latent_effects) %>%
  select(-c(Species, Model, Q0.5, Description)) %>%
  rename("Parameter" = "Estimated Parameter" ) %>% 
  flextable() %>%
  theme_booktabs() %>%
  flextable::font(fontname = "Verdana", part = "all") %>%
  flextable::fontsize(size = 8, part = "all") %>%
  colformat_double(digits = 3)

# save flextable table
summary_hyper_flex %>%
  # FitFlextableToPage(ft = ., pgwidth = 17) %>%
  save_as_docx(path = file.path(tables_path, paste0("Table3_", model_name, " summary_hyperparameters_table.docx")))

Figure 1: Random effects for size class

## plot Trends of Random effects
sumrnd <- IM$summary.random

quants <- function(x){range(x$"0.025quant", x$"0.975quant")}
# get shared limits for rw for Unexploited_Immature and Exploited_Mature estimations for plotting on same scale

y_lim_year  <- range(sapply(sumrnd[c("Year_Unexploited_Immature", "Year_Exploited_Immature", "Year_Exploited_Mature")], FUN = quants))
y_lim_depth <- range(sapply(sumrnd[c("Depth_Unexploited_Immature", "Depth_Exploited_Immature", "Depth_Exploited_Mature")], FUN = quants))
y_lim_w     <- range(sapply(sumrnd["w"], FUN = quants))

lim_tib <- tibble(range = c("min", "max"), y_lim_year, y_lim_depth, y_lim_w)

plots_RandomEffects <- list()

for(i in 1:length(sumrnd)){
  covariate <- names(sumrnd)[i]
  
  titleplot <- ggtitle(case_when(str_detect(covariate, "Unexploited_Immature") ~ "Size class 1",
                        str_detect(covariate, "Exploited_Immature") ~ "Size class 2",
                        str_detect(covariate, "Exploited_Mature") ~ "Size class 3"))
  
  if (str_detect(covariate, pattern = "Year")){
    y_lim_rw <- y_lim_year
    
    plots_RandomEffects[[covariate]] <- sumrnd[[covariate]] %>%
      ggplot(aes(x = ID, y = mean, ymin = `0.025quant`, ymax = `0.975quant`)) +
      geom_line() +
      geom_ribbon(alpha = 0.2) +
      geom_hline(yintercept = 0, lty = "dashed") +
      scale_x_continuous(expand = c(0, 0), limits = c(min_data_yr, max_data_yr)) +
      scale_y_continuous(expand = c(0, 0), limits = c(floor(y_lim_rw[1]), ceiling(y_lim_rw[2]))) +
      xlab("Year") +
      ylab("") + 
      theme_classic() +
      titleplot
  }
  
  if (str_detect(covariate, pattern = "Depth")){
    y_lim_rw <- y_lim_depth

    plots_RandomEffects[[covariate]] <- sumrnd[[covariate]] %>%
      ggplot(aes(x = ID, y = mean, ymin = `0.025quant`, ymax = `0.975quant`)) +
      geom_line() +
      geom_ribbon(alpha = 0.2) +
      geom_hline(yintercept = 0, lty = "dashed") +
      scale_x_continuous(expand = c(0, 0)) +
      scale_y_continuous(expand = c(0, 0), limits = c(floor(y_lim_rw[1]), ceiling(y_lim_rw[2]))) +
      xlab("Depth (m)") +
      ylab("") + 
      theme_classic() +
      titleplot
  }
  
  if (str_detect(covariate, pattern = "w")){
    y_lim_rw <- y_lim_w
    
    plots_RandomEffects[[covariate]] <- sumrnd[[covariate]] %>%
      ggplot(aes(x = ID, y = mean, ymin = `0.025quant`, ymax = `0.975quant`)) +
      geom_line() +
      geom_ribbon(alpha = 0.2) +
      ylim(c(floor(y_lim_rw[1]), ceiling(y_lim_rw[2]))) +
      titleplot
  }
}

## save plot of random walks without w plot
year_random_effects_plot <- plots_RandomEffects[[1]] +  ylab("Marginal effect") + 
  plots_RandomEffects[[2]] + 
  plots_RandomEffects[[3]] & 
  mytheme_ts

year_random_effects_plot

Tables of trends by exploitation-maturity & detect which years are significant (95% CI not covering zero) & whether positive or negative effect at a given Year or Depth

random_effects  <- setNames(names(sumrnd), names(sumrnd))
random_effects  <- random_effects[! random_effects == "w"]

random_effects_tibs <- purrr::map(.x = random_effects,
                                  .f = function(x){
                                    value <- 0
                                    tmp_tib <- sumrnd[[x]] %>% mutate(sign_effect_test = !(value >= `0.025quant` & value <= `0.975quant`),
                                                      sign_effect = case_when(sign_effect_test == TRUE & `0.975quant` < 0 ~ "negative",
                                                      sign_effect_test == TRUE & `0.025quant` > 0 ~ "positive", TRUE ~ NA),
                                                      lowest_sign_effect = case_when(sign_effect_test == TRUE & mean == min(mean) ~ TRUE, TRUE ~ NA),
                                                      highest_sign_effect = case_when(sign_effect_test == TRUE & mean == max(mean) ~ TRUE, TRUE ~ NA))
                                  })

random_effects_tibs[4:6]

# write out table of random effects
dir.create(file.path(results_path, "tables", "random_effects"), recursive = TRUE)
purrr::map(.x = random_effects,
           .f = function(x){
             print(x)
             write_csv(random_effects_tibs[[x]], file = file.path(results_path, "tables", "random_effects", paste0(x, "_table.csv")))
           }
)

Figure 2: Matern covariance function & range estimate

Kappa   <- inla.emarginal(function(x) x, SpatField.w$marginals.kappa[[1]] )
Sigma_u <- inla.emarginal(function(x) sqrt(x), SpatField.w$marginals.variance.nominal[[1]] )
range   <- inla.emarginal(function(x) x, SpatField.w$marginals.range.nominal[[1]] )

print(paste0("Kappa: ", Kappa))
print(paste0("Sigma: ", Sigma_u))
print(paste0("Range: ", range))

# Show correlation structure
mesh <- model_setup$mesh
LocMesh <- mesh$loc[,1:2]

# And then we calculate the distance between each vertex.
D <- as.matrix(dist(LocMesh))

# Using the estimated parameters from the model (see above)
# we can calculate the imposed Matern correlation values.
d.vec <- seq(0, max(D), length = 100)      
Cor.M <- (Kappa * d.vec) * besselK(Kappa * d.vec, 1) 

mat_tib <- tibble(d.vec, Cor.M) # save into tibble for ggplotting

## ggplot matern correlation
mat_plot <- mat_tib %>% 
  ggplot(aes(x = d.vec, y = Cor.M)) +
  geom_line() +
  geom_hline(yintercept = 0.1, lty = 2) +
  geom_vline(xintercept = range, lty = 2) +
  scale_y_continuous(expand = c(0, 0), limits = c(0, 1)) +
  scale_x_continuous(expand = c(0, 0), limits = c(0, 252)) +
  xlab("Distance (km)") +
  ylab("Correlation") +
  mytheme_ts

mat_plot

ggsave(plot = mat_plot, filename = file.path(figures_path, "matern_corr_dist_plot.tiff"),
       units = "cm", width = 8.5, height = 6, dpi = 500, compression ="lzw") # width 8.5cm for 1-column & 17cm for 2-column Figure ICES JMS

Figures S1-S3: Plots of Survey hauls by year (vertical)

survey_col_pal <- c("#7CAE00", "#F8766D", "#C77CFF")
model_data_tmp <- model_setup$data[!is.na(model_setup$data$NoPerHaul),] %>%
                              distinct(HaulCode, .keep_all = TRUE) %>% 
                              mutate(Survey = case_when(Survey == "FRCGFS" ~ "FR-CGFS", 
                                                        Survey == "BTS" ~ "NS-BTS", 
                                                        Survey == "NSIBTS" ~ "NS-IBTS"))
year_split_list <- split(1988:2023, ceiling(seq_along(1988:2023)/12)) # split sequence into list elements of equal sizes

list_plot_hauls_by_year <- purrr::map(.x = year_split_list,
                              .f = function(year_seq){
                                model_data_by_year_split <- subset(model_data_tmp, Year %in% year_seq)
                                
                                year_split_name <- paste(min(model_data_by_year_split$Year), max(model_data_by_year_split$Year), sep = "_")
                                
                                plot_hauls_by_year <- model_data_by_year_split %>%
                                  ggplot() +
                                  geom_point(aes(x = XkmRounded, y = YkmRounded , colour = Survey), size = 0.2, alpha = 0.9) +
                                  scale_colour_manual(values = survey_col_pal) + 
                                  geom_sf(data = GNS_countries_UTM_km, fill = "grey90", colour = "white") + #alpha = 0.3
                                  facet_wrap(~ Year, ncol = 4) +
                                  labs(x = "Longitude", y = "Latitude") +
                                  theme_classic() +
                                  guides(colour = guide_legend(override.aes = list(size=3))) + # adjust legend point size
                                  theme(axis.text = element_text(size = 8),
                                        axis.title = element_text(size = 10),
                                        legend.position = "bottom",
                                        legend.text = element_text(size = 8),
                                        legend.title = element_text(size = 8))

            ggsave(plot = plot_hauls_by_year, filename = file.path(figures_path, paste0("plot_hauls_by_year_", year_split_name, "_vertical.tiff")),
                          units = "cm", width = 15.8, height = 21.0, dpi = 500, compression ="lzw")
                                              
            return(plot_hauls_by_year)                  
                  })

number of mesh nodes

mesh$n

Figure S5 mesh_plot with hauls

mesh_plot <- ggplot() +
  geom_point(data = model_data_tmp, aes(x = XkmRounded, y = YkmRounded , colour = Survey), size = 0.1, alpha = 0.9) +
  scale_colour_manual(values = survey_col_pal) + 
  geom_sf(data = GNS_countries_UTM_km, fill = "grey90", colour = "white") + #alpha = 0.3
  gg(data = mesh, edge.color = "steelblue1", int.color = "royalblue", ext.color = "royalblue", 
     edge.linewidth = 0.1,
     int.linewidth = 0.2,
     ext.linewidth = 0.2) +
  labs(x = "Longitude", y = "Latitude") +
  theme_classic() +
  guides(colour = guide_legend(override.aes = list(size=3))) + # adjust legend point size
  mytheme

mesh_plot

ggsave(plot = mesh_plot, filename = file.path(figures_path, "mesh_plot.tiff"),
       units = "cm", width = 15.8, height = 20, dpi = 500, compression ="lzw") 

Figure S7 & S8: plot posterior mean and sd for spatial field

interior_poly <-  model_setup$interior_mesh_poly 

# select year nodes
y_mesh <- model_setup$y_mesh
years <- y_mesh$loc

# define spatial posterior mean and sd
w.pm <- IM$summary.random$w$mean
w.psd <- IM$summary.random$w$sd
w.pq05 <- IM$summary.random$w$`0.5quant`

Loc_new <- cbind(mesh$loc[,1], mesh$loc[,2])# based on mesh extensions
xl = round(range(mesh$loc[,1]))
yl = round(range(mesh$loc[,2]))
xres <- xl[2]-xl[1] + 1
yres <- yl[2]-yl[1] + 1

wproj_5km2 <- inla.mesh.projector(mesh, dims=c(length(seq(range(Loc_new[,1])[1], range(Loc_new[,1])[2], by = 5)), length(seq(range(Loc_new[,2])[1], range(Loc_new[,2])[2], by = 5))), xlim = range(Loc_new[,1]), ylim = range(Loc_new[,2]))  # here we set it to 5km2

grid <- expand.grid(year= years, X = wproj_5km2$x, Y = wproj_5km2$y, zm = NA, zsd = NA, zq05 = NA)

w.st <- model_setup$w.st
for (i in unique(w.st$w.group)){
  w.pm_5km2 <- inla.mesh.project(wproj_5km2, w.pm[w.st$w.group==i])
  grid[grid$year == years[i],]$zm <- as.vector(w.pm_5km2)  
  
  #assign posterior sd to grid cells
  w.psd_5km2 <- inla.mesh.project(wproj_5km2, w.psd[w.st$w.group == i])
  grid[grid$year == years[i],]$zsd <- as.vector(w.psd_5km2)
  
  #assign posterior median (0.5quant)
  w.pq05_5km2 <- inla.mesh.project(wproj_5km2, w.pq05[w.st$w.group == i])
  grid[grid$year == years[i],]$zq05 <- as.vector(w.pq05_5km2)
}

# identify which points are not over land and set zm and zsd to NA if over land
GNS_countries_UTM_km_sp <- as_Spatial(GNS_countries_UTM_km)
grid_sf <- grid %>% st_as_sf(coords = c("X", "Y")) %>% st_set_crs("+proj=utm +zone=31 +units=km")
grid_sf$notover <- grid_sf %>% st_intersects(GNS_countries_UTM_km) %>% lengths() == 0 # detect which rows of grid are not over land

grid <- cbind(st_drop_geometry(grid_sf), st_coordinates(grid_sf))

# remove grid cells not within area of interest (interior mesh polygon)
grid_sf <- grid %>% st_as_sf(coords = c("X", "Y")) %>% st_set_crs("+proj=utm +zone=31 +units=km")

grid_sf$not_in_poly <- grid_sf %>% st_intersects(interior_poly) %>% lengths() == 0
grid <- cbind(st_drop_geometry(grid_sf), st_coordinates(grid_sf))
plot_w_post_mean <- grid %>% 
  ggplot() +
  geom_raster(aes(x = X, y = Y, fill = zm)) +
  geom_sf(data = NS_EEC_poly_simpl_UTM_km, fill = NA, colour = NA, size = 0.1) + 
  geom_sf(data = GNS_countries_UTM_km, size = 0.1) + 
  scale_fill_gradientn(colors = tim.colors(31), na.value = "transparent",
                       guide = guide_colourbar(title.position= "top", title.theme= element_text(size=8))) +
  geom_sf(data = interior_poly, fill = NA, col = "white") +
  coord_sf(xlim = c(GNS_bbox["xmin"],GNS_bbox["xmax"]), ylim = c(GNS_bbox["ymin"], GNS_bbox["ymax"]), expand=F) + 
  facet_wrap(~ floor(year), ncol = round(sqrt(n_distinct(grid$year)), 0)) +
  labs(x = "Longitude", y = "Latitude", fill= "Posterior mean") +
  mytheme
  
plot_w_post_mean

# save spatio-temporal posterior mean
ggsave(plot = plot_w_post_mean,
       filename = file.path(figures_path, "spatio-temporal_posterior_mean.tiff"), 
       units = "cm", width = 15.8, height = 22.4, dpi = 500, compression="lzw")
# plot spatio-temporal posterior sd
plot_w_post_sd <- grid %>%
  ggplot() +
  geom_raster(aes(x = X, y = Y, fill = zsd)) +
  geom_sf(data = NS_EEC_poly_simpl_UTM_km, fill = NA, colour = NA, size = 0.1) + 
  geom_sf(data = GNS_countries_UTM_km, size = 0.1) + 
  scale_fill_gradientn(colors = tim.colors(31), na.value = "transparent",
                       guide = guide_colourbar(title.position= "top", title.theme= element_text(size=8))) + 
  geom_sf(data = interior_poly, fill = NA, col = "white") +
  coord_sf(xlim = c(GNS_bbox["xmin"],GNS_bbox["xmax"]), ylim = c(GNS_bbox["ymin"], GNS_bbox["ymax"]), expand=F) + 
  facet_wrap(~ floor(year), nrow = round(sqrt(n_distinct(grid$year)), 0)) +
  labs(x = "Longitude", y = "Latitude", fill= "Posterior SD") +
  mytheme

plot_w_post_sd

# save spatio-temporal posterior sd
ggsave(plot = plot_w_post_sd, filename = file.path(figures_path, "spatio-temporal_posterior_sd.tiff"), 
       units = "cm", width = 15.8, height = 22.4, dpi = 500, compression="lzw")

Figure 3: Abundance prediction MAP: median and SD from prediction IM

pred_data <- readRDS(file.path(results_path, model_pred_name, "IM_ST10_pred_data.rds"))
# modify levels and coding of Exploit_Maturity for plotting
pred_data_mod <- pred_data %>% 
  mutate(Exploit_Maturity = factor(Exploit_Maturity, levels=c("Unexploited_Immature", "Exploited_Immature", "Exploited_Mature"))) %>% 
  mutate(Exploit_Maturity = recode_factor(Exploit_Maturity, "Unexploited_Immature" = "Size class 1", "Exploited_Immature" = "Size class 2", "Exploited_Mature" = "Size class 3"))
(years_to_plot <- floor(seq(min(pred_data_mod$Year), max(pred_data_mod$Year), length = 6)))

predicted catch median

n_colour_median <- pred_data_mod %>% 
  filter(Year %in% years_to_plot) %>% summarise(range = ceiling(diff(range(log10(predicted_catch_median))))) %>% pull(range)

log10_total_median_abundance_spatial_plots <- pred_data_mod  %>% 
  filter(Year %in% years_to_plot) %>% # only plot every 4th year
  ggplot() +
  geom_raster(aes(x = XkmRounded, y = YkmRounded, fill = (predicted_catch_median))) +
  geom_sf(data = GNS_countries_UTM_km) + 
  scale_fill_gradientn(colors = tim.colors(n_colour_median), trans = "log10",
                       breaks = trans_breaks("log10", function(x) 10^x, n = 10),
                       labels = trans_format("log10", math_format(10^.x))) +
  coord_sf(xlim = st_bbox(interior_poly)[c("xmin", "xmax")] + c(30, -50), ylim = st_bbox(interior_poly)[c("ymin", "ymax")] + c(50, -80)) +
  scale_x_continuous(breaks = c(0,5)) +
  facet_wrap(~Year) +
  labs(x = "Longitude", y = "Latitude", fill = bquote("log10 abundance per km"^2)) +
  mytheme_ext

log10_total_median_abundance_spatial_plots

Figures 3: predicted exploitation-maturity specific catches over the years (median and sd)

predicted catch median

pred_data_mod$predicted_catch_median_squashed <- pmax(pred_data_mod$predicted_catch_median,1)

rng_squashed <- range(log10(pred_data_mod$predicted_catch_median_squashed))

n_colour_median <- pred_data_mod %>% 
  filter(Year %in% years_to_plot) %>% 
  summarise(range =   ceiling(diff(rng_squashed))) %>%
  pull(range)

gradlabs <- do.call(expression, sapply(rng_squashed[1]:ceiling(rng_squashed[2]), function(i) bquote(10^.(i))))
gradlabs[[1]] <- bquote("<"*10^0)

log10_total_median_abundance_spatial_plots <- pred_data_mod  %>% 
  filter(Year %in% years_to_plot) %>% # only plot every 4th year
  ggplot() +
  geom_raster(aes(x = XkmRounded, y = YkmRounded, fill = (predicted_catch_median_squashed))) +
  geom_sf(data = GNS_countries_UTM_km) + 
  scale_fill_gradientn(colors = tim.colors(n_colour_median), trans = "log10",
                       breaks = trans_breaks("log10", function(x) 10^x, n = 10),
                       #labels = trans_format("log10", math_format(10^.x)),
                       labels =   gradlabs,
                       name   = "Abundance",
                       guide = guide_colourbar(title.position= "top", title.theme= element_text(size=8))) +
  coord_sf(xlim = st_bbox(interior_poly)[c("xmin", "xmax")] + c(30, -50), ylim = st_bbox(interior_poly)[c("ymin", "ymax")] + c(50, -80)) +
  scale_x_continuous(breaks = c(0,5)) +
  facet_grid(Exploit_Maturity ~ Year) +
  labs(x = "Longitude", y = "Latitude") +
  mytheme_ext 

log10_total_median_abundance_spatial_plots

ggsave(plot = log10_total_median_abundance_spatial_plots, 
       filename = file.path(figures_path, paste("log10_exploit_maturity_median_predicted_abundance_spatial_plots.tiff", sep = "_")),
       units = "cm", width = 17, height = 18, dpi = 500, compression="lzw")

predicted catch sd

(n_colour_sd <- pred_data %>% 
  filter(Year %in% years_to_plot) %>%
  summarise(range = ceiling(diff(range(log10(predicted_catch_sd))))) %>%
  pull(range))

log10_total_sd_abundance_spatial_plots <- pred_data_mod  %>% 
  filter(Year %in% years_to_plot) %>% # only plot every 4th year
  ggplot() +
  geom_raster(aes(x = XkmRounded, y = YkmRounded, fill = (predicted_catch_sd))) +
  geom_sf(data = GNS_countries_UTM_km) + 
  scale_fill_gradientn(colors = tim.colors(n_colour_sd), trans = "log10",
                       breaks = trans_breaks("log10", function(x) 10^x, n = 10),
                       labels = trans_format("log10", math_format(10^.x))
  ) +
  coord_sf(xlim = c(80, 900), ylim = c(5460, 6800)) +
  facet_grid(Exploit_Maturity ~ Year) +
  labs(x = "Longitude", y = "Latitude", fill = bquote("log10 sd of abundance per km"^2)) +
  mytheme_ext

log10_total_sd_abundance_spatial_plots

Simulation Results of total predicted numbers

# load simulation of predictions
sim_data_raw <- read_rds(file = file.path(results_path, model_pred_name, paste0(model_pred_name, "_sim1000.rds"))) %>% ungroup()

head(sim_data_raw) %>% as.data.frame()
sim_col_names <- colnames(sim_data_raw)[str_detect(colnames(sim_data_raw), pattern = "sim")]

final_sim_long <- sim_data_raw %>% 
  group_by(Year_Quarter, Exploit_Maturity, fSurvey) %>% 
  summarise(across(.cols = all_of(sim_col_names), ~ sum(.x))) %>%
  group_by(Year_Quarter, Exploit_Maturity) %>%
  pivot_longer(cols = all_of(sim_col_names), names_to = "sim") %>%
  mutate(mean = mean(value), sd = sd(value), q025 = quantile(value, 0.025), q975 = quantile(value, 0.975))

final_sim_long <- final_sim_long %>% ungroup() %>%
  select(-c(mean, sd, q025, q975)) # remove mean, sd, and quantiles as this will be recalculated from values

account for catchability by multiplying predicted counts per grid (here 10km^2) by gear efficiency for sizeclasses (catchability) & estimate biomass per grid cell (here 5km^2) (g)

# uses data in exploit_maturity_length_class_stats object

# add gear efficiency to pred_data
final_sim_long <- final_sim_long %>% 
  mutate(Gear = case_when(fSurvey %in% c("FRCGFS", "NSIBTS") ~ "GOV",
                          fSurvey %in% c("BTS") ~ "BEAM")) %>%
  left_join(exploit_maturity_length_class_stats, by = c("Exploit_Maturity", "Gear"))

# adjust ray predmean_surface & predsd_surface by 2 efficiency multiplier:
# - gear Efficiency & group 3 age class 10 efficiency from Walker et al. 2017
# - and estimate biomass per haul by multiplying predicted catch and sd with mean_weight_maturity
walker_group_3_efficiency_largest <- 0.95

final_sim_long <- final_sim_long %>% 
  mutate(sim_abundance = value * 1/Efficiency * 1/walker_group_3_efficiency_largest, #
         sim_biomass = sim_abundance * mean_weight)

head(final_sim_long)

Figure 5 preparation: Exploit_Maturity abundance and biomass prediction with credible intervals from sim results

mean and quantiles for Exploit_Maturity abundance and biomass

n_sim <- n_distinct(final_sim_long$sim)

explt_mat_final_sim_long <- final_sim_long %>% 
  group_by(Year_Quarter, Exploit_Maturity) %>% 
  mutate(mean_abundance = mean(sim_abundance),
         q0.025_abundance = quantile(sim_abundance, 0.025),
         q0.5_abundance = quantile(sim_abundance, 0.50),
         q0.975_abundance = quantile(sim_abundance, 0.975),
         mean_biomass = mean(sim_biomass),
         q0.025_biomass = quantile(sim_biomass, 0.025),
         q0.5_biomass = quantile(sim_biomass, 0.50),
         q0.975_biomass = quantile(sim_biomass, 0.975),
  ) %>%
  distinct(Year_Quarter, Exploit_Maturity, .keep_all = TRUE)

head(explt_mat_final_sim_long)
explt_mat_final_sim_long <- explt_mat_final_sim_long %>% 
  ungroup %>%
  mutate(Exploit_Maturity = fct_relevel(Exploit_Maturity,c ("Unexploited_Immature","Exploited_Immature","Exploited_Mature")))

(pre_2000_max     <- explt_mat_final_sim_long %>% 
  group_by(Exploit_Maturity) %>%
  filter(Year_Quarter < 2000) %>%
  filter(q0.5_abundance == max(q0.5_abundance)) %>%
  arrange(Exploit_Maturity, Year_Quarter))

## recent estimates 2023
(explt_mat_final_sim_long %>% filter(Year_Quarter == 2023) %>% arrange(Exploit_Maturity, Year_Quarter))
# plot total abundance and 95% CI then plot on log scale 
exploit_mat_median_sim_abun_GNS <- explt_mat_final_sim_long   %>%
  mutate(Exploit_Maturity = case_when(Exploit_Maturity == "Unexploited_Immature" ~ "1",
                                      Exploit_Maturity == "Exploited_Immature" ~ "2",
                                      Exploit_Maturity == "Exploited_Mature" ~ "3")) %>% # recode exploitation_maturity
  mutate(Exploit_Maturity = factor(Exploit_Maturity, levels = 1:3)) %>%
  ungroup() %>%
  ggplot(aes(x = Year_Quarter, y = q0.5_abundance)) +
  geom_line(aes(colour = Exploit_Maturity)) +
  geom_ribbon(aes(ymin = q0.025_abundance, ymax = q0.975_abundance, fill = Exploit_Maturity), alpha = 0.3) +
  geom_hline(data = pre_2000_max %>%
               mutate(Exploit_Maturity = case_when(Exploit_Maturity == "Unexploited_Immature" ~ "1",
                                                   Exploit_Maturity == "Exploited_Immature" ~ "2",
                                                   Exploit_Maturity == "Exploited_Mature" ~ "3")) %>% # recode exploitation_maturity
               mutate(Exploit_Maturity = factor(Exploit_Maturity, levels =  1:3)), aes(yintercept = q0.5_abundance, colour = Exploit_Maturity), lty = "dashed") +
  scale_x_continuous(minor_breaks = seq(min_data_yr,max_data_yr,1), expand = c(0, 0), limits = c(min_data_yr, max_data_yr)) +
  scale_y_log10(labels = trans_format("log10", math_format(10^.x)), limits = c(1e5, 2e8), expand=expansion(mult=c(0,0.05))) + 
  annotation_logticks() +
  xlab("Year") +
  ylab("Abundance") +
  scale_color_discrete(name="Size class", guide = guide_legend(title.position= "top", title.theme= element_text(size=8)))+
  scale_fill_discrete(name="Size class", guide = guide_legend(title.position= "top", title.theme= element_text(size=8)))+ 
  mytheme_ts
 
exploit_mat_median_sim_abun_GNS

calculate 95% CI (credible intervals here for bayesian models) for mean total abundance simulation for each year (summing over gridcells)

0.025 & 0.975 quantiles correspond to 95% credible intervals in bayesian models

total_final_sim_long <- final_sim_long %>% group_by(Year_Quarter, sim) %>% 
  mutate(total_sim_abundance = sum(sim_abundance), 
         total_sim_biomass = sum(sim_biomass)) %>%  # get total simulation for abundance and biomass by summing sim_abundance of Exploit_Maturity size classes for each simulation
  group_by(Year_Quarter) %>% 
  transmute(mean_total_abundance = mean(total_sim_abundance),
            q0.025_total_abundance = quantile(total_sim_abundance, 0.025), # get mean and sd of total abundance for each year
            q0.5_total_abundance = quantile(total_sim_abundance, 0.5),
            q0.975_total_abundance = quantile(total_sim_abundance, 0.975), 
            mean_total_biomass = mean(total_sim_biomass),
            q0.025_total_biomass = quantile(total_sim_biomass, 0.025), # get mean and sd of total abundance for each year
            q0.5_total_biomass = quantile(total_sim_biomass, 0.5),
            q0.975_total_biomass = quantile(total_sim_biomass, 0.975)
  ) %>%
  distinct(Year_Quarter, .keep_all = TRUE)

head(total_final_sim_long)

Give summaries of total abundance from posterior draws

# baselines
(total_pre_2000_max_q0.5_total_abundance <- total_final_sim_long %>%
  ungroup() %>%
  filter(Year_Quarter < 2000) %>%
  filter(q0.5_total_abundance  == max(q0.5_total_abundance))) 

(total_2023 <- total_final_sim_long %>% 
    ungroup() %>%
    filter(Year_Quarter == 2023))

More numbers about abundances for paper

size class 1

explt_mat_final_sim_long %>% filter(Exploit_Maturity == "Unexploited_Immature") %>% select(Year_Quarter,Exploit_Maturity,q0.5_abundance, q0.025_abundance, q0.975_abundance )

size class 2

explt_mat_final_sim_long %>% filter(Exploit_Maturity == "Exploited_Immature") %>% select(Year_Quarter,Exploit_Maturity,q0.5_abundance, q0.025_abundance, q0.975_abundance )

size class 3

explt_mat_final_sim_long %>% filter(Exploit_Maturity == "Exploited_Mature") %>% select(Year_Quarter,Exploit_Maturity,q0.5_abundance, q0.025_abundance, q0.975_abundance )
total_final_sim_long %>% select(Year_Quarter,q0.5_total_abundance, q0.025_total_abundance, q0.975_total_abundance)

plot total abundance median

summary_statistic_to_plot <-  "q0.5_total_abundance"  # "mean_total_abundance"

log10_tot_median_sim_abun_GNS <- ggplot() +
  geom_line(data = total_final_sim_long, aes(x = Year_Quarter, y = q0.5_total_abundance)) + # mean total predicted abundance
  geom_ribbon(data = total_final_sim_long, aes(x = Year_Quarter, y = q0.5_total_abundance, ymin = q0.025_total_abundance, ymax = q0.975_total_abundance), alpha = 0.3) +
  geom_hline(yintercept = total_pre_2000_max_q0.5_total_abundance$q0.5_total_abundance, lty = "dashed") +
  scale_x_continuous(minor_breaks = seq(min_data_yr,max_data_yr,1), expand = c(0, 0), limits = c(min_data_yr, max_data_yr)) +
  scale_y_log10(labels = trans_format("log10", math_format(10^.x)), limits = c(1e5,2e8), expand=expansion(mult=c(0,0.05)))+
  annotation_logticks(sides ="l") +
  xlab("Year") +
  ylab("Abundance") +
  mytheme_ts

log10_tot_median_sim_abun_GNS

Figure 5: log10 total abundance predictions

combd_abundance_plot <- exploit_mat_median_sim_abun_GNS +  ylab("Abundance") +   ggtitle("Size class") +  labs(tag="(a)") +  log10_tot_median_sim_abun_GNS + ylab("") + ggtitle("Total") + labs(tag="(b)") & 
  mytheme_ts

combd_abundance_plot

# save log10 total abundance predictions for total study area
ggsave(plot = combd_abundance_plot, filename = file.path(figures_path, "combd_median_sim_abundance_GNS.tiff"),
       units = "cm", width = 17, height = 7, dpi = 500, compression="lzw")

Give summaries of total biomass from posterior draws

# baselines
(total_pre_2000_max_q0.5_total_biomass <- total_final_sim_long %>%
  ungroup() %>%
  filter(Year_Quarter < 2000) %>%
  filter(q0.5_total_biomass  == max(q0.5_total_biomass)) )

(total_2023                            <- total_final_sim_long %>% ungroup() %>% filter(Year_Quarter == 2023))
total_final_sim_long %>% ungroup()  %>% 
  as.data.frame() %>%
  select(q0.025_total_biomass, q0.5_total_biomass, q0.975_total_biomass) %>%
  mutate_if(is.numeric, .funs = function(x)x/1e6) %>% add_column(Year_Quarter = total_final_sim_long$Year_Quarter) %>% relocate(Year_Quarter)

Figure 6: Median predicted biomass

# divide by 1e6 to convert grams to tonnes, divide by 1e3 to convert to tonnes x 1000 = divide in total by 1e9
ton_tot_median_sim_biomass_GNS <- ggplot() +
  geom_line(data = total_final_sim_long, aes(x = Year_Quarter, y = q0.5_total_biomass/1e9)) + # total predicted abundance
  geom_ribbon(data = total_final_sim_long, aes(x = Year_Quarter, y = q0.5_total_biomass/1e9, ymin = q0.025_total_biomass/1e9, ymax = q0.975_total_biomass/1e9), alpha = 0.3) +
  geom_hline(yintercept = total_pre_2000_max_q0.5_total_biomass$q0.5_total_biomass/1e9, lty = "dashed") +
  scale_x_continuous(minor_breaks = seq(min_data_yr,max_data_yr,1), expand = c(0, 0), limits = c(min_data_yr, max_data_yr)) +
  scale_y_continuous(limits=c(0, NA), expand=expansion(mult=c(0,0.05))) +
  xlab("Year") +
  ylab("Biomass (Tonnes x 1000)") +
  mytheme_ts

ton_tot_median_sim_biomass_GNS

# save log10 total abundance predictions for total study area
ggsave(plot = ton_tot_median_sim_biomass_GNS, filename = file.path(figures_path,paste("tonnes_total_median_sim_biomass_GNS.tiff", sep = "_")),
       units = "cm", width = 8.5, height = 6, dpi = 500, compression="lzw")

###Biomass numbers for manuscript

total_final_sim_long %>% select(q0.025_total_biomass, q0.5_total_biomass, q0.975_total_biomass)

Figures for Exploit Maturity biomass

(pre_2000_max_biomass <- explt_mat_final_sim_long %>% group_by(Exploit_Maturity) %>% filter(Year_Quarter < 2000) %>% filter(q0.5_biomass == max(q0.5_biomass)))

## recent estimates 2023
(explt_mat_final_sim_long %>% filter(Year_Quarter == 2023))
# plot log10 total abundance for entire north sea
tmp_biomass <- explt_mat_final_sim_long %>%
  mutate(Exploit_Maturity = case_when(Exploit_Maturity == "Unexploited_Immature" ~ "1",
                                      Exploit_Maturity == "Exploited_Immature" ~ "2",
                                      Exploit_Maturity == "Exploited_Mature" ~ "3")) %>% # recode exploitation_maturity
  mutate(Exploit_Maturity = factor(Exploit_Maturity, levels =  1:3)) %>%
  ungroup()
  
log10_total_maturity_sim_biomass_GNS <- tmp_biomass %>%
  ggplot(aes(x = Year_Quarter, y = q0.5_biomass/1e3)) +
  geom_line(aes(colour = Exploit_Maturity)) +
  geom_ribbon(aes(ymin = q0.025_biomass/1e3, ymax = q0.975_biomass/1e3, fill = Exploit_Maturity), alpha = 0.3) +
  geom_hline(data = pre_2000_max_biomass %>%
               mutate(Exploit_Maturity = case_when(Exploit_Maturity == "Unexploited_Immature" ~ "1",
                                                   Exploit_Maturity == "Exploited_Immature" ~ "2",
                                                   Exploit_Maturity == "Exploited_Mature" ~ "3")) %>% 
               mutate(Exploit_Maturity = factor(Exploit_Maturity, levels =  1:3)), aes(yintercept = q0.5_biomass/1e3, colour = Exploit_Maturity), lty = "dashed") +
  scale_y_log10(labels = trans_format("log10", math_format(10^.x)), limits = c(1e5, NA), expand=expansion(mult=c(0,0.05))) +
  annotation_logticks() +
   scale_x_continuous(minor_breaks = seq(min_data_yr,max_data_yr,1), expand = c(0, 0), limits = c(min_data_yr, max_data_yr)) +
  ylab("Biomass (kg)") +
  xlab("Year") +
  scale_color_discrete(name="Size class", guide = guide_legend(title.position= "top", title.theme= element_text(size=8)))+
  scale_fill_discrete(name="Size class", guide = guide_legend(title.position= "top", title.theme= element_text(size=8)))+ 
  mytheme_ts +
  theme(legend.box.margin=margin(c(0,0,0,0)))

log10_total_maturity_sim_biomass_GNS
ton_total_mat_sim_biomass_GNS <- tmp_biomass %>% 
  ggplot(aes(x = Year_Quarter, y = q0.5_biomass/1e9)) +
  geom_line(aes(colour = Exploit_Maturity)) +
  geom_ribbon(aes(ymin = q0.025_biomass/1e9, ymax = q0.975_biomass/1e9, fill = Exploit_Maturity), alpha = 0.3) +
  geom_hline(data = pre_2000_max_biomass %>%
               mutate(Exploit_Maturity = case_when(Exploit_Maturity == "Unexploited_Immature" ~ "1",
                                                   Exploit_Maturity == "Exploited_Immature" ~ "2",
                                                   Exploit_Maturity == "Exploited_Mature" ~ "3")) %>% # recode exploitation_maturity
               mutate(Exploit_Maturity = factor(Exploit_Maturity, levels =  1:3)), aes(yintercept = q0.5_biomass/1e9, colour = Exploit_Maturity), lty = "dashed") +
  scale_x_continuous(minor_breaks = seq(min_data_yr,max_data_yr,1), expand = c(0, 0), limits = c(min_data_yr, max_data_yr)) +
  scale_y_continuous(expand = c(0, 0), limits = c(0, 33)) +
  ylab("Biomass (tonnes x 1000)") +
  xlab("Year") +
  theme_bw() +
  scale_color_discrete(name="Size class", guide = guide_legend(title.position= "top", title.theme= element_text(size=8)))+
  scale_fill_discrete(name="Size class", guide = guide_legend(title.position= "top", title.theme= element_text(size=8)))+ 
  mytheme_ts +
  theme(  legend.box.margin=margin(c(0,0,0,0)))
  
ton_total_mat_sim_biomass_GNS

Figure S9: Median predicted biomass of size classes

combd_biomass_plot <- ton_total_mat_sim_biomass_GNS +   ggtitle("Linear scale") + labs(tag="(a)") + log10_total_maturity_sim_biomass_GNS  + ggtitle("Logarithmic scale") + labs(tag="(b)") & 
  mytheme_ts

combd_biomass_plot

# save log10 total abundance predictions for total study area
ggsave(plot = combd_biomass_plot, filename = file.path(figures_path, "combd_exploit_maturity_median_sim_biomass_GNS.tiff"),
       units = "cm", width = 15.8, height = 7, dpi = 500, compression="lzw")  

Figure Exploited abundance and biomass

summarise accross exploitation & account for BEAM Exploit catchability & estimate biomass (g) per km2

# uses data from object exploit_length_class_stats

sim_col_names <- str_subset(colnames(sim_data_raw), pattern = "sim")

# summarise across RFAs, summarise simulation data across Exploitation
sim_exploit <- sim_data_raw %>% mutate(Exploit = case_when(str_detect(Exploit_Maturity, "Unexploited") ~ "Unexploited",
                                                       str_detect(Exploit_Maturity, "Exploited") ~ "Exploited")) %>% 
  group_by(Year_Quarter, Exploit, fSurvey) %>% 
  summarise(across(.cols = sim_col_names, ~ sum(.x))) %>% ungroup()

# add gear efficiency to sim_exploit
sim_exploit_long <- sim_exploit %>% 
  mutate(Gear = case_when(fSurvey %in% c("FRCGFS", "NSIBTS") ~ "GOV",
                                                   fSurvey %in% c("BTS") ~ "BEAM")) %>%
  group_by(Year_Quarter, Exploit, fSurvey) %>% 
  pivot_longer(cols = sim_col_names, names_to = "sim") %>% 
  left_join(exploit_length_class_stats, by = c("Exploit", "Gear"))

head(sim_exploit_long)

# adjust ray predmean_surface & predsd_surface by BEAM_efficiency and estimate biomass per haul by multiplying predicted catch and sd with mean_weight
sim_exploit_long <- sim_exploit_long %>% ungroup() %>%
  mutate(sim_abundance = value * 1/Efficiency * 1/walker_group_3_efficiency_largest,
         sim_biomass = sim_abundance * mean_weight) %>%
  distinct(Year_Quarter, Exploit, sim, .keep_all = TRUE)

head(sim_exploit_long)

mean and quantiles for Exploit abundance and biomass

n_sim <- n_distinct(sim_exploit_long$sim)

exploit_final_sim_long <- sim_exploit_long %>% 
  group_by(Year_Quarter, Exploit) %>% 
  mutate(mean_abundance = mean(sim_abundance),
         q0.025_abundance = quantile(sim_abundance, 0.025),
         q0.5_abundance = quantile(sim_abundance, 0.50),
         q0.975_abundance = quantile(sim_abundance, 0.975),
         mean_biomass = mean(sim_biomass),
         q0.025_biomass = quantile(sim_biomass, 0.025),
         q0.5_biomass = quantile(sim_biomass, 0.50),
         q0.975_biomass = quantile(sim_biomass, 0.975),
  ) %>%
  distinct(Year_Quarter, Exploit, .keep_all = TRUE)

head(exploit_final_sim_long)
pre_2000_max <- exploit_final_sim_long %>% group_by(Exploit) %>% filter(Year_Quarter < 2000) %>% filter(q0.5_abundance == max(q0.5_abundance)); pre_2000_max

Plot actual figure Exploit abundance and biomass prediction with credible intervals

# plot total abundance and 95% CI then plot on log scale 
log10_total_exploit_median_sim_abundance_GNS <- exploit_final_sim_long %>%
  ungroup() %>%
  ggplot(aes(x = Year_Quarter, y = q0.5_abundance)) +
  geom_line(aes(colour = Exploit)) +
  geom_ribbon(aes(ymin = q0.025_abundance, ymax = q0.975_abundance, fill = Exploit), alpha = 0.3) +
  geom_hline(data = pre_2000_max, aes(yintercept = q0.5_abundance, colour = Exploit), lty = "dashed") +
  scale_y_log10(labels = trans_format("log10", math_format(10^.x)), limits = c(1e5, NA), expand=expansion(mult=c(0,0.05))) +  
  annotation_logticks() +
  scale_x_continuous(minor_breaks = seq(min_data_yr,max_data_yr,1), expand = c(0, 0), limits = c(min_data_yr, max_data_yr)) +
  ylab("Abundance") +
  xlab("Year") +
  mytheme

log10_total_exploit_median_sim_abundance_GNS

plot exploit biomass

pre_2000_max_biomass <- exploit_final_sim_long %>% group_by(Exploit) %>% filter(Year_Quarter < 2000) %>% filter(q0.5_biomass == max(q0.5_biomass)); 
# plot log10 total abundance for entire north sea
log10_total_exploit_median_sim_biomass_GNS <- exploit_final_sim_long %>%
  ungroup() %>% 
  ggplot(aes(x = Year_Quarter, y = q0.5_biomass/1e3)) +
  geom_line(aes(colour = Exploit)) +
  geom_ribbon(aes(ymin = q0.025_biomass/1e3, ymax = q0.975_biomass/1e3, fill = Exploit), alpha = 0.3) +
  geom_hline(data = pre_2000_max_biomass, aes(yintercept = q0.5_biomass/1e3, colour = Exploit), lty = "dashed") +
   scale_x_continuous(minor_breaks = seq(min_data_yr,max_data_yr,1), expand = c(0, 0), limits = c(min_data_yr, max_data_yr)) +
  scale_y_log10(labels = trans_format("log10", math_format(10^.x)), limits = c(1e5, NA), expand = expansion(mult=c(0,0.05))) + 
  annotation_logticks() +
  xlab("Year") +
  ylab("Biomass (kg)") +
  mytheme

log10_total_exploit_median_sim_biomass_GNS

Ratio Exploitation: Exploited:Unexploited of simulated predicted abundance

calculate from simulated abundances (simulated value * Efficiency) to also obtain 95% credible intervals - calculate moving average from simulated data to obtain 95% credible intervals for moving average 8 years (Generation time)

generation_time <- 8
exploit_ratio_GNS <- sim_exploit_long %>% #filter(sim  %in% c("sim1", "sim2")) %>%
  select(Year_Quarter, Exploit, sim, sim_abundance) %>%
  pivot_wider(names_from = Exploit, values_from = c(sim_abundance)) %>%
  mutate(exploit_ratio_abundance = Exploited/Unexploited) %>% # calculate ratios
  group_by(sim) %>%
  mutate(MA8_exploit_ratio_abundance = zoo::rollmean(exploit_ratio_abundance, k = generation_time, fill=NA, align='center')) %>% #calculate MA for gen time years for ratio 
  group_by(Year_Quarter) %>%
  transmute(q0.025_ratio_abundance = quantile(exploit_ratio_abundance, 0.025),
            q0.5_ratio_abundance = quantile(exploit_ratio_abundance, 0.5),
            q0.975_ratio_abundance = quantile(exploit_ratio_abundance, 0.975), 
          
            #8 year moving average
            q0.025_MA8_ratio_abundance = quantile(MA8_exploit_ratio_abundance, 0.025, na.rm = TRUE),
            q0.5_MA8_ratio_abundance = quantile(MA8_exploit_ratio_abundance, 0.5, na.rm = TRUE),
            q0.975_MA8_ratio_abundance = quantile(MA8_exploit_ratio_abundance, 0.975, na.rm = TRUE)
  ) %>% 
  distinct(Year_Quarter, .keep_all = TRUE)

exploit_ratio_GNS

Summary Ratio Exploitation

pre_2000_max_MA8_EUR_q0.5_total_abundance <- exploit_ratio_GNS %>% ungroup()  %>% filter(Year_Quarter < 2000) %>% filter(q0.5_MA8_ratio_abundance == max(q0.5_MA8_ratio_abundance, na.rm = TRUE)); pre_2000_max_MA8_EUR_q0.5_total_abundance

Figure 7: exploitation:unexploitation Ratio of abundance for GNS + 8Year moving average

# plot
exploit_median_ratio_plot <- exploit_ratio_GNS %>% 
  ggplot(aes(x = Year_Quarter, y = q0.5_ratio_abundance)) +
  geom_line(lty = "dashed", linewidth = 0.5, aes(col = "EUR")) + 
  geom_ribbon(aes(x = Year_Quarter, y = q0.5_ratio_abundance, ymin = q0.025_ratio_abundance, ymax = q0.975_ratio_abundance, fill = "EUR"), alpha = 0.2) + 
  geom_line(aes(x = Year_Quarter, y = q0.5_MA8_ratio_abundance, col = "MA8_EUR"), linewidth = 0.5, na.rm = TRUE) +
  geom_ribbon(aes(y = q0.5_MA8_ratio_abundance, ymin = q0.025_MA8_ratio_abundance, ymax = q0.975_MA8_ratio_abundance, fill = "MA8_EUR"), alpha = 0.2) + 
  geom_hline(yintercept = pre_2000_max_MA8_EUR_q0.5_total_abundance$q0.5_MA8_ratio_abundance, lty = "dashed", linewidth = 0.5, col = "limegreen") + 
  scale_x_continuous(minor_breaks = seq(min_data_yr,max_data_yr,1), expand = c(0, 0), limits = c(min_data_yr, max_data_yr)) +
  scale_y_continuous(limits = c(0.00, 1.43),  breaks = seq(0, 1.5, by = 0.2), expand = expansion(mult=c(0,0))) +
  scale_color_manual(name = "", 
                     values = c(EUR = "lightgrey", MA8_EUR = "limegreen"),
                     labels = c("EUR", "8-year moving average")) +
  scale_fill_manual(name = "",
                    values = c(EUR = "lightgrey", MA8_EUR = "limegreen"), 
                    labels = c("EUR", "8-year moving average")) +
  guides(color = guide_legend(title = "")) +
  xlab("Year") +
  ylab("Abundance ratio") +
  mytheme_ts +
  theme(  legend.box.margin=margin(c(0,0,0,0)))

exploit_median_ratio_plot

# save log10 plot of abundance predictions for total study area with mature:Immature ratio
ggsave(plot = exploit_median_ratio_plot, filename = file.path(figures_path, "Fig7_median_exploited_unexploited_ratio_with_MA8_RJC.tiff"), 
       units = "cm", width  = 8.5, height = 6, dpi = 500, compression="lzw")

Figure 4 RoundFishArea (RFA)

# convert to simple feature 
sim_loc_sf <- sim_data_raw %>% 
  distinct(XkmRounded, YkmRounded) %>% 
  st_as_sf(coords = c("XkmRounded", "YkmRounded")) %>% 
  st_set_crs("+proj=utm +zone=31 +units=km")

# assign SubAreaDiv (Area_27) & RFA to predicted gridcells
sim_loc_SubAreaDiv_RFA_sf <- sim_loc_sf %>% 
  st_intersection(NS_EEC_poly_simpl_UTM_km) %>% 
  select(Area_27) %>% 
  st_intersection(select(RFA_UTM_km, RFA = AreaName))

# convert to data_frame
sim_loc_SubAreaDiv <- st_coordinates(sim_loc_SubAreaDiv_RFA_sf) %>%
  as_tibble() %>%
  rename(XkmRounded = X, YkmRounded = Y) %>%
  bind_cols(tibble(Area_27 = sim_loc_SubAreaDiv_RFA_sf$Area_27, RFA = sim_loc_SubAreaDiv_RFA_sf$RFA))

# join SubAreaDiv to pred_data based on gridcell XY coordinates
sim_by_area <- sim_data_raw %>%
  left_join(sim_loc_SubAreaDiv, by = c("XkmRounded", "YkmRounded"))

# assign NorthSea or English channel 
sim_by_area  <- sim_by_area %>%
  mutate(NS_EC = case_when(Area_27 %in% c("4.a", "4.b", "4.c") ~ "NorthSea",
                                                         Area_27 %in% c("7.d") ~ "EnglishChannel",
                                                         is.na(Area_27) ~ NA_character_))

account for BEAM Exploit_Maturity catchability & estimate biomass (g) per km2

# summarise across RFAs
sim_RFA <- sim_by_area %>% filter(!(is.na(RFA))) %>% group_by(Year_Quarter, Exploit_Maturity, fSurvey, RFA) %>% summarise(across(.cols = sim_col_names, ~ sum(.x)))

# add BEAM efficiency to pred_data
sim_RFA_long <- sim_RFA %>% 
  mutate(Gear = case_when(fSurvey %in% c("FRCGFS", "NSIBTS") ~ "GOV",
                          fSurvey %in% c("BTS") ~ "BEAM")) %>%
  group_by(Year_Quarter, Exploit_Maturity, RFA) %>% 
  pivot_longer(cols = sim_col_names, names_to = "sim") %>% 
  left_join(exploit_maturity_length_class_stats, by = c("Exploit_Maturity", "Gear"))

# adjust ray predmean_surface & predsd_surface by Gear efficiency and estimate biomass per haul by multiplying predicted catch and sd with mean_weight_Exploit_Maturity
sim_RFA_long <- sim_RFA_long %>% ungroup() %>%
  mutate(sim_abundance = value * 1/Efficiency * 1/walker_group_3_efficiency_largest,
         sim_biomass = sim_abundance * mean_weight) %>%
  distinct(Year_Quarter, RFA, Exploit_Maturity, sim, .keep_all = TRUE)

sim_RFA_long

Calculate simulated abundances for Exploit_Maturity classes and total for all RFA areas of interest

final_sim_RFA_long <- sim_RFA_long %>% group_by(Year_Quarter, Exploit_Maturity, RFA) %>% 
  #pivot_longer(cols = sim_col_names, names_to = "sim") %>% 
  mutate(mean_abundance = mean(sim_abundance), 
         q0.025_abundance = quantile(sim_abundance, 0.025), 
         q0.5_abundance = quantile(sim_abundance, 0.5),
         q0.975_abundance = quantile(sim_abundance, 0.975), 
         mean_biomass = mean(sim_biomass), 
         q0.025_biomass = quantile(sim_biomass, 0.025),
         q0.5_biomass = quantile(sim_biomass, 0.5),
         q0.975_biomass = quantile(sim_biomass, 0.975) ) %>% 
  group_by(Year_Quarter, RFA, sim) %>%
  mutate("total_sim_value" = sum(sim_abundance)) %>% 
  group_by(Year_Quarter, RFA) %>%
  mutate(total_sim_mean = mean(total_sim_value), 
         total_sim_q0.5 = quantile(total_sim_value, 0.5), 
         total_sim_q0.025 = quantile(total_sim_value, 0.025), 
         total_sim_q0.975 = quantile(total_sim_value, 0.975)) %>%
  select(-c(sim, value, sim_abundance)) %>%
  distinct(Year_Quarter, Exploit_Maturity, RFA, .keep_all = TRUE)

head(final_sim_RFA_long)

summaries RFA estimated abundances for Exploit_Maturity size classes

# get pre 2000 max baseline
(pre_2000_max_RFA <- final_sim_RFA_long %>%
   group_by(Exploit_Maturity, RFA) %>%
   filter(Year_Quarter < 2000) %>%
   filter(q0.5_abundance == max(q0.5_abundance)) %>%
   distinct(Year_Quarter, Exploit_Maturity, q0.5_abundance) %>%
   rename(max_abundance = q0.5_abundance) %>%
   arrange(RFA))

# get estimates for 2023 per RFA
(RFA_pred_abundance_2023 <- final_sim_RFA_long %>% distinct(Year_Quarter, Exploit_Maturity, q0.025_abundance, q0.5_abundance, q0.5_abundance, q0.975_abundance, q0.5_biomass, q0.025_biomass, q0.5_biomass, q0.975_biomass) %>% filter(Year_Quarter == 2023) %>% arrange(RFA, Year_Quarter) %>% relocate(Year_Quarter, RFA) %>% ungroup())

#check which RFAs have gone over pre2k baseline # {r} # test <- left_join(final_sim_RFA_long %>% select("Year_Quarter","RFA","Exploit_Maturity","q0.5_abundance"), pre_2000_max_RFA, by = c("RFA","Exploit_Maturity"))%>% # filter(q0.5_abundance > max_abundance) % # # as.data.frame(test[!duplicated(paste0(test$RFA,test$Exploit_Maturity)),]) # #

Figure 4: circular map simulated predicted abundances per RFA ontop of RFA map

extend_x <- 1000
extend_y <- 800

RFA_centroid <- as_tibble(cbind("AreaName" = st_centroid(RFA_UTM_km)$AreaName, st_coordinates(st_centroid(RFA_UTM_km)))) # find centroids for RFAs

# move the centroid of 3 and 5 to right so that they are a bit futher away from land
RFA_centroid <- RFA_centroid %>% mutate(X = case_when(AreaName %in% c(3,5) ~ X + 25, TRUE ~ X))

basemap <- ggplot() + 
  geom_sf(data = interior_poly, fill = "skyblue2", col = "white") +
  geom_sf(data = RFA_UTM_km, colour = "Black", alpha = 0, lwd = 0.2) +
  geom_sf(data = st_crop(world_sf, st_bbox(RFA_UTM_km)), lwd = 0.2) +
  geom_text(data = RFA_centroid %>% filter(AreaName %in% c(1:7, 10)), aes(x = X, y = Y, label = AreaName), size = 4, colour = "white") + 
  geom_text(data = RFA_centroid %>% filter(AreaName %in% c(8:9)), aes(x = X, y = Y, label = AreaName), size = 4, alpha = 0.3) +
  coord_sf(xlim = st_bbox((RFA_UTM_km))[c(1,3)] + c(-extend_x , extend_x), ylim = st_bbox((RFA_UTM_km))[c(2,4)] + c(-extend_y, extend_y)) +
  theme_void() +
  theme(
    legend.position = "none", 
    plot.margin = margin(0, 0, 0, 0, "cm") 
  )


xy_annotation <- ggplot() + 
  annotate("text",x= st_bbox((RFA_UTM_km))[c(1)] - (extend_x + 200), y = mean(st_bbox((RFA_UTM_km))[c(2,4)]), label = "Abundance", size = 5, hjust = 0.5, family = "dmmono", lineheight = 0.9, angle = 90) +
  annotate("text",x= mean(st_bbox((RFA_UTM_km))[c(1, 3)]), y = st_bbox((RFA_UTM_km))[c(2)] -(extend_y + 200), label = "Year", size = 5, hjust = 0.5, family = "dmmono", lineheight = 0.9) +
  coord_sf(xlim = st_bbox((RFA_UTM_km))[c(1,3)] + c(-(extend_x + 500), extend_x), ylim = st_bbox((RFA_UTM_km))[c(2,4)] + c(-(extend_y + 500), extend_y)) +
  theme_void() +
  theme(
    legend.position = "none", 
    plot.margin = margin(0, 0, 0, 0, "cm") 
  )


##############################
## plot log10 predicted abundances per RFA
log10_RFA_prediction_list <- list()
for(i in c(3,4,5,10,1,2,7,6)){
  RFA_name <- paste("RFA", i, sep = ": ")
  
   rfa_sim <- final_sim_RFA_long %>%
     mutate(Exploit_Maturity = case_when(Exploit_Maturity == "Unexploited_Immature" ~ "1",
                                      Exploit_Maturity == "Exploited_Immature" ~ "2",
                                      Exploit_Maturity == "Exploited_Mature" ~ "3")) %>% # recode exploitation_maturity
    mutate(Exploit_Maturity = factor(Exploit_Maturity, levels = 1:3)) %>%
    filter(RFA == i)
  
  pre_2000_max <- rfa_sim %>%
    group_by(Exploit_Maturity) %>%
    filter(Year_Quarter < 2000) %>%
    summarise('max' = max(q0.5_abundance)) %>%
    mutate(max = max) # get max abundance pre 2000 for dashed line
 
  log10_RFA_prediction_list[[RFA_name]] <- rfa_sim %>%
    ggplot(aes(x = Year_Quarter, y = q0.5_abundance)) +
    geom_line(aes(colour = Exploit_Maturity), lwd = 0.3) +  # plot mean
    geom_ribbon(aes(ymin = q0.025_abundance, ymax = q0.975_abundance, fill = Exploit_Maturity), alpha = 0.3) +
    geom_hline(yintercept = pre_2000_max[pre_2000_max$Exploit_Maturity == "1",]$max, lty = "dashed", colour = "red", lwd = 0.3) + # baseline pre2000 for unexploited immature
    geom_hline(yintercept = pre_2000_max[pre_2000_max$Exploit_Maturity == "2",]$max, lty = "dashed", colour = "green", lwd = 0.3) + # baseline pre2000 for exploited Immature
    geom_hline(yintercept = pre_2000_max[pre_2000_max$Exploit_Maturity == "3",]$max, lty = "dashed", colour = "blue", lwd = 0.3) + # baseline pre2000 for  exploited mature
    scale_x_continuous(expand = c(0, 0), limits = c(min_data_yr, max_data_yr)) +
    scale_y_log10(breaks = trans_breaks("log10", function(x) 10^x), labels = trans_format("log10", math_format(10^.x)), limits = c(1e2, 1e8), expand=c(0,0)) + # log transform y axis
    annotation_logticks(sides="l",
                        short = unit(0.05, "cm"),
                        mid = unit(0.1, "cm"),
                        long = unit(0.15, "cm"),
                        size = 0.1) +
    annotate("text", label = RFA_name,  
             x = 1994, y = 4e2, 
             size = 3) +
    theme_classic() +
    theme(
      axis.text = element_text(size = 8),
      axis.title  = element_blank(), # remove axes title
      axis.line = element_line(size = 0.2),
      legend.position = "none",
      panel.grid.major = element_line(size = (0.2), colour = "grey"),
      panel.border = element_rect(colour = "black", fill = NA, linewidth = 0.5),
      plot.margin = margin(0.1, 0.1, 0.1, 0.1, "cm")) + 
   scale_color_discrete(name = "Size class") +
   scale_fill_discrete(name = "Size class") 
}

# define area lines to point to center of graphs (somewhat complex calculation where to point to based on graphs position given below in pathwork , here manually defined)
area_lines <- tibble(RFA = RFA_centroid[match(c(3,4,5,10,1,2,7,6), RFA_centroid$AreaName), ]$AreaName,
                     x = RFA_centroid[match(c(3,4,5,10,1,2,7,6), RFA_centroid$AreaName), ]$X + rep(c(- 25, 25), each =4) , 
                     y = RFA_centroid[match(c(3,4,5,10,1,2,7,6), RFA_centroid$AreaName), ]$Y) %>%
  mutate(xend = case_when(RFA %in% c(4,5) ~ st_bbox((RFA_UTM_km))[c(1)] + extend_x * -0.3,
                          RFA %in% c(2,7) ~ st_bbox((RFA_UTM_km))[c(3)] + extend_x * 0.3,
                          RFA %in% c(3,10) ~ st_bbox((RFA_UTM_km))[c(1)] + extend_x - diff(st_bbox((RFA_UTM_km))[c(1,3)] + c(-extend_x, extend_x)) * 0.35,
                          RFA %in% c(1,6) ~ st_bbox((RFA_UTM_km))[c(3)] + extend_x - diff(st_bbox((RFA_UTM_km))[c(1,3)] + c(-extend_x, extend_x)) * 0.35),
         yend = case_when(RFA %in% c(2,4) ~ st_bbox((RFA_UTM_km))[c(4)] + extend_y - diff(st_bbox((RFA_UTM_km))[c(2,4)] + c(-extend_y, extend_y)) * 0.375,
                          RFA %in% c(5,7) ~ st_bbox((RFA_UTM_km))[c(4)] + extend_y - diff(st_bbox((RFA_UTM_km))[c(2,4)] + c(-extend_y, extend_y)) * 0.625,
                          RFA %in% c(1,3) ~ st_bbox((RFA_UTM_km))[c(4)] + extend_y - diff(st_bbox((RFA_UTM_km))[c(2,4)] + c(-extend_y, extend_y)) * 0.25,
                          RFA %in% c(6,10) ~ st_bbox((RFA_UTM_km))[c(4)] + extend_y - diff(st_bbox((RFA_UTM_km))[c(2,4)] + c(-extend_y, extend_y)) * 0.75)
  )

# extract legend
mylegend <-  ggpubr::get_legend(log10_RFA_prediction_list[[1]] + theme(legend.position = "right",
                                                                       legend.direction="vertical"))

### combine basemap with log10_RFA_abundances  
log10_RFA_map_sim_abundance_plot <- basemap + 
  geom_segment(data = area_lines, aes(x = x, xend = xend, y = y, yend = yend), lty = 2, lwd = 0.7, alpha = 1) +
  patchwork::inset_element(log10_RFA_prediction_list[["RFA: 3"]],  0.2 , 0.75, 0.5, 1) +
  patchwork::inset_element(log10_RFA_prediction_list[["RFA: 4"]],  0,    0.5,  0.3, 0.75) +
  patchwork::inset_element(log10_RFA_prediction_list[["RFA: 5"]],  0,    0.25, 0.3, 0.5) +
  patchwork::inset_element(log10_RFA_prediction_list[["RFA: 10"]], 0.2,  0,    0.5, 0.25) +
  patchwork::inset_element(log10_RFA_prediction_list[["RFA: 1"]],  0.5 , 0.75, 0.8, 1) +
  patchwork::inset_element(log10_RFA_prediction_list[["RFA: 2"]],  0.7,  0.5,  1,   0.75) +
  patchwork::inset_element(log10_RFA_prediction_list[["RFA: 7"]],  0.7,  0.25, 1,   0.5) +
  patchwork::inset_element(log10_RFA_prediction_list[["RFA: 6"]],  0.5,  0,    0.8, 0.25) +
  patchwork::inset_element(mylegend,  0.85,  0.09,    0.95, 0.19) # add legend to bottom left

# add shared y and x axis label to patchwork plot
annotated_RFA_map <- gridExtra::grid.arrange(patchwork::patchworkGrob(log10_RFA_map_sim_abundance_plot), left = "Abundance", bottom = "Year", padding = unit(0.1, "line"))

ggsave(plot = annotated_RFA_map, filename = file.path(figures_path, "Fig4_annotated_log10_RFA_map_median_sim_abundance.tiff"),
       units = "cm", width = 17, height = 17.8, dpi = 500)

Get estimates for RFA10 in 2023

final_sim_RFA_long %>%
     mutate(Exploit_Maturity = case_when(Exploit_Maturity == "Unexploited_Immature" ~ "1",
                                      Exploit_Maturity == "Exploited_Immature" ~ "2",
                                      Exploit_Maturity == "Exploited_Mature" ~ "3")) %>% # recode exploitation_maturity
    mutate(Exploit_Maturity = factor(Exploit_Maturity, levels = 1:3)) %>%
    filter(RFA == 10 & Year_Quarter == 2023)