Datasets:
Formats:
imagefolder
Size:
< 1K
| extends Node | |
| class_name AIController | |
| # ------------------ Godot RL Agents Logic ------------------------------------# | |
| var reset_after := 1000 | |
| var heuristic := "human" | |
| var done := false | |
| var reward := 0.0 | |
| var n_steps := 0 | |
| var needs_reset := false | |
| func _ready(): | |
| add_to_group("AGENT") | |
| #-- Methods that need implementing using the "extend script" option in Godot --# | |
| func get_obs() -> Dictionary: | |
| assert(false, "the get_obs method is not implemented when extending from ai_controller") | |
| return {"obs":[]} | |
| func get_reward() -> float: | |
| assert(false, "the get_reward method is not implemented when extending from ai_controller") | |
| return 0.0 | |
| func get_action_space() -> Dictionary: | |
| assert(false, "the get get_action_space method is not implemented when extending from ai_controller") | |
| return { | |
| "example_actions_continous" : { | |
| "size": 2, | |
| "action_type": "continuous" | |
| }, | |
| "example_actions_discrete" : { | |
| "size": 2, | |
| "action_type": "discrete" | |
| }, | |
| } | |
| func set_action(action) -> void: | |
| assert(false, "the get set_action method is not implemented when extending from ai_controller") | |
| # -----------------------------------------------------------------------------# | |
| func _physics_process(delta): | |
| n_steps += 1 | |
| if n_steps > reset_after: | |
| needs_reset = true | |
| func get_obs_space(): | |
| # may need overriding if the obs space is complex | |
| var obs = get_obs() | |
| return { | |
| "obs": { | |
| "size": [len(obs["obs"])], | |
| "space": "box" | |
| }, | |
| } | |
| func reset(): | |
| n_steps = 0 | |
| needs_reset = false | |
| func reset_if_done(): | |
| if done: | |
| reset() | |
| func set_heuristic(h): | |
| # sets the heuristic from "human" or "model" nothing to change here | |
| heuristic = h | |
| func get_done(): | |
| return done | |
| func set_done_false(): | |
| done = false | |
| func zero_reward(): | |
| reward = 0.0 | |