Skip to contents
## Package 'binaryRL' Version:  0.9.0
##   Subject Block Trial L_choice R_choice L_reward R_reward Sub_Choose Frame
## 1       1     1     1        A        B       36       40          B  Gain
## 2       1     1     2        A        B       36       40          B  Gain
## 3       1     1     3        C        D      -36      -40          D  Loss
## 4       1     1     4        D        C      -40      -36          D  Loss
## 5       1     1     5        D        C      -40      -36          D  Loss
## 6       1     1     6        A        B       36       40          A  Gain
##   NetWorth   RT
## 1       40 6855
## 2       80 7089
## 3       40 5147
## 4        0 1531
## 5      -40 1859
## 6       -4 1910

TD

binaryRL.res <- binaryRL::run_m(
  data = binaryRL::Mason_2024_Exp2,                    
  id = 1,                       
  eta = c(0.123),  
  tau = c(0.789),
  n_params = 2,            
  n_trials = 360,            
  mode = "fit"
)

summary(binaryRL.res)
## Preconditions for this fitting:
##  - Initial value of options: Initial reward received
##  - Random choice threshold: 1
## Results of the Reinforcement Learning Model:
## 
## Free Parameters:
##    α:  NA 
##    β:  NA 
##    γ:  1 
##    η:  0.123 
##    ε:  NA 
##    λ:  NA 
##    π:  0.001 
##    τ:  0.789 
## 
## Model Fit:
##    Accuracy:  63.61 %
##    LogL:  -542.39 
##    AIC:  1088.78 
##    BIC:  1096.55
## [[1]]
##    Parameter Value1
## 1       EV_1     NA
## 2  threshold  1.000
## 3      alpha     NA
## 4       beta     NA
## 5      gamma  1.000
## 6        eta  0.123
## 7    epsilon     NA
## 8     lambda     NA
## 9         pi  0.001
## 10       tau  0.789
## 
## [[2]]
##     Metric   Value
## 1 Accuracy   63.61
## 2     LogL -542.39
## 3      AIC 1088.78
## 4      BIC 1096.55

RSTD

binaryRL.res <- binaryRL::run_m(
  data = binaryRL::Mason_2024_Exp2,
  id = 1,
  n_params = 3,
  n_trials = 360,
  eta = c(0.123, 0.456),
  tau = c(0.789),            
  mode = "fit"
)

summary(binaryRL.res)
## Preconditions for this fitting:
##  - Initial value of options: Initial reward received
##  - Random choice threshold: 1
## Results of the Reinforcement Learning Model:
## 
## Free Parameters:
##    α:  NA 
##    β:  NA 
##    γ:  1 
##    η:  0.123 0.456 
##    ε:  NA 
##    λ:  NA 
##    π:  0.001 
##    τ:  0.789 
## 
## Model Fit:
##    Accuracy:  67.22 %
##    LogL:  -535.86 
##    AIC:  1077.72 
##    BIC:  1089.38
## [[1]]
##    Parameter Value1 Value2
## 1       EV_1     NA     NA
## 2  threshold  1.000     NA
## 3      alpha     NA     NA
## 4       beta     NA     NA
## 5      gamma  1.000     NA
## 6        eta  0.123  0.456
## 7    epsilon     NA     NA
## 8     lambda     NA     NA
## 9         pi  0.001     NA
## 10       tau  0.789     NA
## 
## [[2]]
##     Metric   Value
## 1 Accuracy   67.22
## 2     LogL -535.86
## 3      AIC 1077.72
## 4      BIC 1089.38

Utility

binaryRL.res <- binaryRL::run_m(
  data = binaryRL::Mason_2024_Exp2,
  id = 1,
  n_params = 3,
  n_trials = 360,
  eta = c(0.123),
  gamma = c(0.456),
  tau = c(0.789),            
  mode = "fit"
)

summary(binaryRL.res)
## Preconditions for this fitting:
##  - Initial value of options: Initial reward received
##  - Random choice threshold: 1
## Results of the Reinforcement Learning Model:
## 
## Free Parameters:
##    α:  NA 
##    β:  NA 
##    γ:  0.456 
##    η:  0.123 
##    ε:  NA 
##    λ:  NA 
##    π:  0.001 
##    τ:  0.789 
## 
## Model Fit:
##    Accuracy:  57.22 %
##    LogL:  -314.16 
##    AIC:  634.32 
##    BIC:  645.98
## [[1]]
##    Parameter Value1
## 1       EV_1     NA
## 2  threshold  1.000
## 3      alpha     NA
## 4       beta     NA
## 5      gamma  0.456
## 6        eta  0.123
## 7    epsilon     NA
## 8     lambda     NA
## 9         pi  0.001
## 10       tau  0.789
## 
## [[2]]
##     Metric   Value
## 1 Accuracy   57.22
## 2     LogL -314.16
## 3      AIC  634.32
## 4      BIC  645.98