OpenAI Gym is a open-source Python toolkit for developing and comparing reinforcement learning algorithms. This R package is a wrapper for the OpenAI Gym API, and enables access to an ever-growing variety of environments.
You can install:
the latest released version from CRAN:
install.packages("gym")
the latest development version from Github:
if (packageVersion("devtools") < 1.6) {
install.packages("devtools")
}::install_github("paulhendricks/gym-R", subdir = "gym") devtools
If you encounter a clear bug, please file a minimal reproducible example on github.
library(gym)
<- "http://127.0.0.1:5000"
remote_base <- create_GymClient(remote_base)
client print(client)
# Create environment
<- "CartPole-v0"
env_id <- env_create(client, env_id)
instance_id print(instance_id)
# List all environments
<- env_list_all(client)
all_envs print(all_envs)
# Set up agent
<- env_action_space_info(client, instance_id)
action_space_info print(action_space_info)
<- random_discrete_agent(action_space_info[["n"]])
agent
# Run experiment, with monitor
<- "/tmp/random-agent-results"
outdir env_monitor_start(client, instance_id, outdir, force = TRUE, resume = FALSE)
<- 100
episode_count <- 200
max_steps <- 0
reward <- FALSE
done
for (i in 1:episode_count) {
<- env_reset(client, instance_id)
ob for (i in 1:max_steps) {
<- env_action_space_sample(client, instance_id)
action <- env_step(client, instance_id, action, render = TRUE)
results if (results[["done"]]) break
}
}
# Dump result info to disk
env_monitor_close(client, instance_id)
The original author of gym
is Paul Hendricks.
The lead maintainer of gym
is Paul Hendricks.